pax_global_header00006660000000000000000000000064145170076500014517gustar00rootroot0000000000000052 comment=472f9b89984da6ee31069c023c2433ec0c5d0080 sqlfluff-2.3.5/000077500000000000000000000000001451700765000133505ustar00rootroot00000000000000sqlfluff-2.3.5/.deepsource.toml000066400000000000000000000002711451700765000164610ustar00rootroot00000000000000version = 1 test_patterns = [ 'test/**', ] exclude_patterns = [ 'docs/**', 'util.py', # not part of the core sqlfluff code ] [[ analyzers ]] name = 'python' enabled = true sqlfluff-2.3.5/.dockerignore000066400000000000000000000012521451700765000160240ustar00rootroot00000000000000# Ignore IDE files .vscode .idea /.sqlfluff **/.DS_Store # Ignore Python cache and prebuilt things .cache __pycache__ *.egg-info *.pyc build _build dist .pytest_cache # Ignore the Environment env .tox venv .venv .python-version # Ignore coverage reports .coverage .coverage.* coverage.xml htmlcov *.cover # Ignore test reports .test-reports test-reports # Ignore root testing sql & python files /test*.sql /test*.py /.hypothesis/ # Ignore dbt outputs from testing /target # Ignore conda environment.yml contributors might be using and direnv config environment.yml .envrc **/*FIXED.sql # Others pip-log.txt pip-delete-this-directory.txt *.log .git .mypy_cache .pytest_cache sqlfluff-2.3.5/.editorconfig000066400000000000000000000025251451700765000160310ustar00rootroot00000000000000# editorconfig.org root = true [*] indent_style = space end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true [*.{html,md,js,css}] indent_size = 2 [*.py] indent_size = 4 # Don't correct indentation for sql and yaml files as sometimes want them wrong for tests [*.{yml,yaml,sql}] indent_style = unset # Some specific tests with trailing newlines # If adding any exceptions here, make sure to add them to .pre-commit-config.yaml as well [test/fixtures/linter/sqlfluffignore/*/*.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [test/fixtures/config/inheritance_b/{,nested/}example.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [trailing_newlines.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/macro_in_macro.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/{,dbt_utils_0.8.0/}last_day.sql] indent_style = unset trim_trailing_whitespace = unset insert_final_newline = unset [test/fixtures/linter/indentation_errors.sql,test/fixtures/templater/jinja_d_roundtrip/test.sql] trim_trailing_whitespace = false [*.rst] indent_size = 3 sqlfluff-2.3.5/.gitattributes000066400000000000000000000013241451700765000162430ustar00rootroot00000000000000# We'll let Git's auto-detection algorithm infer if a file is text. If it is, # enforce LF line endings regardless of OS or git configurations. * text=auto eol=lf # Allow Batch files to be CRLF: .bat text eol=crlf # Isolate binary files in case the auto-detection algorithm fails and # marks them as text files (which could brick them). *.{png,jpg,jpeg,gif,webp,woff,woff2} binary # Linguist was excluding our test suite from the repo language statistics # and as a result the repo indicated that no SQL is present in the repo. # Information on overrides can be found here: # https://github.com/github/linguist/blob/master/docs/overrides.md test/** linguist-vendored=false *.sql linguist-language=SQL linguist-detectable sqlfluff-2.3.5/.github/000077500000000000000000000000001451700765000147105ustar00rootroot00000000000000sqlfluff-2.3.5/.github/FUNDING.yml000066400000000000000000000014251451700765000165270ustar00rootroot00000000000000# sqlfluff is free to use to improve sql in whatever context you # wish to use it. # The BEST way to support sqlfluff if you can is to contribute TIME. # sqlfluff is a community project which needs the community to contribute # to if it's going to achieve its goals. # See CONTRIBUTING.md for more details. # If you'd like to contribute something, but your circumstances don't # allow you to commit time, then financial support is always welcome. # Anything you contribute will go toward supporting the project, either # as a donation toward infrastructure and hosting costs, or to enable # maintainers to spend more time on the project. # For more details on how this money is used, see the GitHub sponsor # page for sqlfluff at https://github.com/sponsors/sqlfluff. github: sqlfluff sqlfluff-2.3.5/.github/ISSUE_TEMPLATE/000077500000000000000000000000001451700765000170735ustar00rootroot00000000000000sqlfluff-2.3.5/.github/ISSUE_TEMPLATE/bug-report.yml000066400000000000000000000076541451700765000217200ustar00rootroot00000000000000name: Bug report description: Report a bug to help improve SQLFluff labels: [ "bug" ] body: - type: markdown attributes: value: | Use this bug report template to report issues with SQLFluff functionality, including missing syntax support for any of our currently supported dialects. - type: checkboxes attributes: label: Search before asking description: > Please make sure to search in the [issues](https://github.com/sqlfluff/sqlfluff/issues) first to see whether the same issue was reported already. options: - label: > I searched the [issues](https://github.com/sqlfluff/sqlfluff/issues) and found no similar issues. required: true - type: textarea attributes: label: What Happened description: Describe what happened. placeholder: > Please provide the context in which the problem occurred and explain what happened validations: required: true - type: textarea attributes: label: Expected Behaviour description: What is your expected behaviour placeholder: Please explain what behaviour you expected. validations: required: true - type: textarea attributes: label: Observed Behaviour description: What is your observed behaviour placeholder: > Please explain what you observed, and why you think the behaviour is erroneous. It is extremely helpful if you include SQL output and logging output with exact error messages, stack traces, etc. validations: required: true - type: textarea attributes: label: How to reproduce description: > What should we do to reproduce the problem? placeholder: > Please make sure you provide a reproducible step-by-step case of how to reproduce the problem, including the exact command(s) you ran as well as the `.sql` file. as minimally and precisely as possible. Keep in mind we do not have access to your deployment. If the issue requires more than two files (i.e. `.sql` file and `.sqlfluff` files) to reproduce, please consider providing a GitHub repo instead. Unfortunately non-reproducible issues will have to be closed. validations: required: true - type: textarea attributes: label: Dialect description: What SQLFluff dialect did you use? validations: required: true - type: textarea attributes: label: Version description: Which SQLFluff version did you use? placeholder: > * Include the output of `sqlfluff --version` along with your Python version. * If you are using dbt, then please additionally include the sqlfluff-templater-dbt and dbt package versions. validations: required: true - type: textarea attributes: label: Configuration description: Include your SQLFluff configuration (e.g. `.sqlfluff`, `.sqlfluffignore`) here validations: required: true - type: checkboxes attributes: label: Are you willing to work on and submit a PR to address the issue? description: > This is absolutely not required, but we are happy to guide you in the contribution process, especially if you already have a good understanding of how to implement the fix. SQLFluff is a totally community-driven project and we love to bring new contributors in. options: - label: Yes I am willing to submit a PR! - type: checkboxes attributes: label: Code of Conduct description: | The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it. options: - label: > I agree to follow this project's [Code of Conduct](https://github.com/sqlfluff/sqlfluff/blob/main/CODE_OF_CONDUCT.md) required: true - type: markdown attributes: value: "Thanks for completing our form!" sqlfluff-2.3.5/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000000341451700765000210600ustar00rootroot00000000000000blank_issues_enabled: false sqlfluff-2.3.5/.github/ISSUE_TEMPLATE/documentation.yml000066400000000000000000000044131451700765000224710ustar00rootroot00000000000000name: Documentation Issue description: Report an issue or a suggestion with our documentation to help improve SQLFluff labels: [ "documentation" ] body: - type: markdown attributes: value: | Use this documentation template to report issues with SQLFluff documentation, on [our website](https://docs.sqlfluff.com/en/stable/), GitHub, or within our command line. - type: checkboxes attributes: label: Search before asking description: > Please make sure to search in the [issues](https://github.com/sqlfluff/sqlfluff/issues) first to see whether the same issue was reported already. options: - label: > I searched the [issues](https://github.com/sqlfluff/sqlfluff/issues) and found no similar issues. required: true - type: textarea attributes: label: Links or command line description: What do you want to happen? placeholder: > * If you found in our website, the link may start with `https://docs.sqlfluff.com/en/stable/`. * If you found in CLI components, your command may start with `sqlfluff `. - type: textarea attributes: label: Issue/Suggested Improvement description: The issue you found or the improvement you suggest. validations: required: true - type: checkboxes attributes: label: Are you willing to work on and submit a PR to address the issue? description: > This is absolutely not required, but we are happy to guide you in the contribution process, especially if you already have a good understanding of how to implement the fix. SQLFluff is a totally community-driven project and we love to bring new contributors in. options: - label: Yes I am willing to submit a PR! - type: checkboxes attributes: label: Code of Conduct description: | The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it. options: - label: > I agree to follow this project's [Code of Conduct](https://github.com/sqlfluff/sqlfluff/blob/main/CODE_OF_CONDUCT.md) required: true - type: markdown attributes: value: "Thanks for completing our form!" sqlfluff-2.3.5/.github/ISSUE_TEMPLATE/enhancement.yml000066400000000000000000000047021451700765000221060ustar00rootroot00000000000000name: Enhancement description: Suggest an enhancement to help improve SQLFluff labels: [ "enhancement" ] body: - type: markdown attributes: value: > Use this enhancement template to suggest new features or functionality for SQLFluff. Please note that missing syntax support for any of our currently supported dialects, should instead be filed as a [Bug Report](https://github.com/sqlfluff/sqlfluff/issues/new?assignees=&labels=bug&template=bug-report.yml). - type: checkboxes attributes: label: Search before asking description: > Please make sure to search in the [issues](https://github.com/sqlfluff/sqlfluff/issues) first to see whether the same issue was reported already. options: - label: > I searched the [issues](https://github.com/sqlfluff/sqlfluff/issues) and found no similar issues. required: true - type: textarea attributes: label: Description description: A short description of the feature validations: required: true - type: textarea attributes: label: Use case description: What do you want to happen? placeholder: > Rather than telling us how you might implement this feature, try to take a step back and describe what you are trying to achieve. - type: textarea attributes: label: Dialect description: If the enhancement relates to a particular dialect, which one? validations: required: true - type: checkboxes attributes: label: Are you willing to work on and submit a PR to address the issue? description: > This is absolutely not required, but we are happy to guide you in the contribution process, especially if you already have a good understanding of how to implement the fix. SQLFluff is a totally community-driven project and we love to bring new contributors in. options: - label: Yes I am willing to submit a PR! - type: checkboxes attributes: label: Code of Conduct description: | The Code of Conduct helps create a safe space for everyone. We require that everyone agrees to it. options: - label: > I agree to follow this project's [Code of Conduct](https://github.com/sqlfluff/sqlfluff/blob/main/CODE_OF_CONDUCT.md) required: true - type: markdown attributes: value: "Thanks for completing our form!" sqlfluff-2.3.5/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000020261451700765000205110ustar00rootroot00000000000000 ### Brief summary of the change made ### Are there any other side effects of this change that we should be aware of? ### Pull Request checklist - [ ] Please confirm you have completed any of the necessary steps below. - Included test cases to demonstrate any code changes, which may be one or more of the following: - `.yml` rule test cases in `test/fixtures/rules/std_rule_cases`. - `.sql`/`.yml` parser test cases in `test/fixtures/dialects` (note YML files can be auto generated with `tox -e generate-fixture-yml`). - Full autofix test cases in `test/fixtures/linter/autofix`. - Other. - Added appropriate documentation for the change. - Created GitHub issues for any relevant followup/future enhancements if appropriate. sqlfluff-2.3.5/.github/labeler.yml000066400000000000000000000011231451700765000170360ustar00rootroot00000000000000ansi: - "/(ansi)/i" athena: - "/(athena)/i" bigquery: - "/(bigquery)/i" clickhouse: - "/(clickhouse)/i" databricks: - "/(databricks)/i" db2: - "/(db2)/i" duckdb: - "/(duckdb)/i" exasol: - "/(exasol)/i" greenplum: - "/(greenplum)/i" hive: - "/(hive)/i" mysql: - "/(mysql)/i" oracle: - "/(oracle)/i" postgres: - "/(postgres)/i" redshift: - "/(redshift)/i" snowflake: - "/(snowflake)/i" soql: - "/(soql)/i" sparksql: - "/(sparksql)/i" sqlite: - "/(sqlite)/i" t-sql: - "/(t-sql|tsql)/i" teradata: - "/(teradata)/i" trino: - "/(trino)/i" sqlfluff-2.3.5/.github/release-drafter.yml000066400000000000000000000003041451700765000204750ustar00rootroot00000000000000template: | ## What’s Changed $CHANGES exclude-labels: - 'skip-changelog' categories: - title: '🚀 Enhancements' label: 'enhancement' - title: '🐛 Bug Fixes' label: 'bug' sqlfluff-2.3.5/.github/workflows/000077500000000000000000000000001451700765000167455ustar00rootroot00000000000000sqlfluff-2.3.5/.github/workflows/add-issue-labels.yaml000066400000000000000000000006741451700765000227560ustar00rootroot00000000000000name: "Add Issue Labels" on: issues: types: [opened] jobs: triage: runs-on: ubuntu-latest steps: # Update .github/labeler.yml for new dialects - uses: github/issue-labeler@v3.2 with: configuration-path: .github/labeler.yml include-title: 1 include-body: 0 not-before: 2023-07-06T02:54:32Z enable-versioned-regex: 0 repo-token: ${{ github.token }} sqlfluff-2.3.5/.github/workflows/add-to-release-notes.yml000066400000000000000000000006031451700765000234030ustar00rootroot00000000000000# # This updates the current draft release notes when a PR is merged # name: Add to Release Notes on: push: branches: - main jobs: release-notes: runs-on: ubuntu-20.04 if: github.repository == 'sqlfluff/sqlfluff' steps: - name: Update release notes uses: release-drafter/release-drafter@v5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} sqlfluff-2.3.5/.github/workflows/ci-pr-comments.yml000066400000000000000000000075401451700765000223330ustar00rootroot00000000000000# This Workflow runs in a more secure context and comments # on pull requests. # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ name: Comment on the pull request # Run on completion of the CI job. # This workflow has access to write comments on PRs event when # that PR is triggered by a forked repo. on: workflow_run: workflows: - CI types: - completed jobs: comment-on-pr: runs-on: ubuntu-latest if: > github.event.workflow_run.event == 'pull_request' steps: - name: 'Download txt artifact' uses: actions/github-script@v6 with: script: | const artifacts = await github.rest.actions.listWorkflowRunArtifacts({ owner: context.repo.owner, repo: context.repo.repo, run_id: ${{github.event.workflow_run.id }}, }); const matchArtifact = artifacts.data.artifacts.filter((artifact) => { return artifact.name == "txt-report" })[0]; const download = await github.rest.actions.downloadArtifact({ owner: context.repo.owner, repo: context.repo.repo, artifact_id: matchArtifact.id, archive_format: 'zip', }); var fs = require('fs'); fs.writeFileSync('${{github.workspace}}/cov-report.zip', Buffer.from(download.data)); - name: Unzip Downloaded Artifact run: unzip cov-report.zip - name: Update PR comment with coverage report. uses: actions/github-script@v6 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | // First list the existing comments const trigger_str = 'Coverage Results'; console.log("Getting existing comments..."); const { promises: fs } = require('fs'); const issue_number = await fs.readFile('pr-number.txt', 'utf8'); console.log("Issue number: " + issue_number); const comments = await github.paginate( github.rest.issues.listComments, { owner: 'sqlfluff', repo: 'sqlfluff', issue_number: Number(issue_number) } ); let comment_id = null; console.log("Got %d comments", comments.length); comments.forEach(comment => { if (comment.body.indexOf(trigger_str) >= 0) { console.log("Found target comment ID: %d", comment.id); comment_id = comment.id; } else { console.log("Comment ID %d not valid with body:\n%s.", comment.id, comment.body); } }); const previous_outcome = await fs.readFile('outcome.txt', 'utf8'); console.log("Previous coverage step outcome: %s", previous_outcome); if (previous_outcome == "success\n") { status_emoji = "✅"; } else { status_emoji = "⚠️"; } const content = await fs.readFile('coverage-report.txt', 'utf8'); body = "# " + trigger_str + " " + status_emoji + "\n```\n" + content + "\n```\n"; if (comment_id > 0) { console.log("Updating comment id: %d", comment_id); await github.rest.issues.updateComment({ owner: context.repo.owner, repo: context.repo.repo, comment_id: comment_id, body: body }); } else { console.log("No existing comment matched, creating a new one..."); await github.rest.issues.createComment({ issue_number: Number(issue_number), owner: context.repo.owner, repo: context.repo.repo, body: body }); } sqlfluff-2.3.5/.github/workflows/ci-test-dbt.yml000066400000000000000000000047071451700765000216170ustar00rootroot00000000000000############################# ## GitHub Actions CI Tests ## ############################# # # This is a reusable workflow to make CI tests more modular. # See: https://docs.github.com/en/actions/using-workflows/reusing-workflows # # Called by ci-tests.yml # This one does the dbt tests # name: Modular SQLFluff dbt test workflow on: workflow_call: inputs: python-version: required: true type: string dbt-version: required: true type: string coverage: required: false type: boolean default: false secrets: gh_token: required: true jobs: modular-python-test: name: py${{ inputs.python-version }}-${{ inputs.dbt-version }} runs-on: ubuntu-latest services: # Label used to access the service container postgres: # Docker Hub image image: postgres # Provide the password for postgres env: POSTGRES_PASSWORD: password # Set health checks to wait until postgres has started options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 ports: # Maps tcp port 5432 on service container to the host - 5432:5432 steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ inputs.python-version }} cache: 'pip' cache-dependency-path: | setup.cfg requirements.txt requirements_dev.txt - name: Install dependencies run: pip install tox - name: Run the tests (with coverage) if: ${{ inputs.coverage }} run: tox -e ${{ inputs.dbt-version }} -- --cov=sqlfluff_templater_dbt plugins/sqlfluff-templater-dbt - name: Run the tests (without coverage) if: ${{ !inputs.coverage }} run: tox -e ${{ inputs.dbt-version }} -- plugins/sqlfluff-templater-dbt - name: Coveralls Parallel (coveralls) uses: coverallsapp/github-action@master if: ${{ inputs.coverage }} with: path-to-lcov: coverage.lcov github-token: ${{ secrets.gh_token }} flag-name: run-${{ inputs.dbt-version }} parallel: true - name: Upload coverage data (github) uses: actions/upload-artifact@v3 if: ${{ inputs.coverage }} with: name: coverage-data path: ".coverage.*" if-no-files-found: ignore sqlfluff-2.3.5/.github/workflows/ci-test-python.yml000066400000000000000000000064101451700765000223600ustar00rootroot00000000000000############################# ## GitHub Actions CI Tests ## ############################# # # This is a reusable workflow to make CI tests more modular. # See: https://docs.github.com/en/actions/using-workflows/reusing-workflows # # Called by ci-tests.yml # This one does the python tests # name: Modular SQLFluff python test workflow on: workflow_call: inputs: python-version: required: true type: string marks: required: false type: string default: "not integration" coverage: required: false type: boolean default: false secrets: gh_token: required: true jobs: modular-python-test: runs-on: ubuntu-latest name: py${{ inputs.python-version }} steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ inputs.python-version }} cache: 'pip' cache-dependency-path: | setup.cfg requirements.txt requirements_dev.txt - name: Install dependencies run: pip install tox - name: Parse Python Version id: py_version run: | PYVERSION=$(echo "${{ inputs.python-version }}" | sed -e 's/\.//g') echo "PYVERSION=$PYVERSION" >> $GITHUB_OUTPUT # Run test process (with or without coverage). # Arguments after the "--" are passed through to pytest: # --cov=... The library to include in coverage reporting. # -n 2 Runs with two parallel processes. # test The path to detect tests within. # -m ... The pytest marks to filter tests. # --durations=16 Displays the 16 slowest runs to help with performance debugging. - name: Run the tests (with coverage) # NOTE: We have a separate job for coverage reporting because # it impacts performance and slows the test suite significantly. if: ${{ inputs.coverage }} run: tox -e py${{ steps.py_version.outputs.PYVERSION }} -- --cov=sqlfluff -n 2 test -m "${{ inputs.marks }}" --durations=16 - name: Run the tests (without coverage) if: ${{ !inputs.coverage }} run: tox -e py${{ steps.py_version.outputs.PYVERSION }} -- -n 2 test -m "${{ inputs.marks }}" --durations=16 - name: Rename coverage files with suffix # NOTE: We do this because we're using the same tox environment for multiple # test jobs and we need to make sure that their coverage files don't collide.s id: cov_suffix if: ${{ inputs.coverage }} run: | COVSUFFIX=$(echo "${{ inputs.marks }}" | sed -e 's/ /-/g') echo "COVSUFFIX=$COVSUFFIX" >> $GITHUB_OUTPUT for file in .coverage.*; do mv "$file" "$file.$COVSUFFIX"; done; - name: Coveralls Parallel (coveralls) uses: coverallsapp/github-action@master if: ${{ inputs.coverage }} with: path-to-lcov: coverage.lcov github-token: ${{ secrets.gh_token }} flag-name: run-${{ inputs.python-version }}-${{ steps.cov_suffix.outputs.COVSUFFIX }} parallel: true - name: Upload coverage data (github) uses: actions/upload-artifact@v3 if: ${{ inputs.coverage }} with: name: coverage-data path: ".coverage.*" if-no-files-found: ignore sqlfluff-2.3.5/.github/workflows/ci-tests.yml000066400000000000000000000261201451700765000212240ustar00rootroot00000000000000############################# ## GitHub Actions CI Tests ## ############################# # # This can be kicked off manually in the Actions tab of GitHub # It will also run nightly at 2pm # It will run on any pull request, except non-code changes # (images, markdown files, ) # name: CI on: workflow_dispatch: schedule: # 2am each night - cron: '00 2 * * *' # Don't use pull_request_target here. See: # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ pull_request: push: branches: - main merge_group: # Merge Queue checks requested. This feature is still in beta # from Github and so may need updating later. # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#merge_group types: [checks_requested] jobs: linting: runs-on: ubuntu-latest strategy: matrix: jobs: [ 'linting', 'doclinting', 'docbuild', 'yamllint', 'mypy', 'doctests' ] name: ${{ matrix.jobs }} tests steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.12' - name: Install dependencies run: pip install tox - name: Run the tests run: tox -e ${{ matrix.jobs }} # Test with coverage tracking on most recent python (py12). python-version-tests: name: Python Tests strategy: matrix: python-version: [ "3.7", "3.8", "3.9", "3.10", "3.11", "3.12" ] include: # Default to test without coverage tracking on older python versions. # This saves time, as testing without coverage tracking is faster. - coverage: false # Override coverage to be true for most recent python version. - python-version: "3.12" coverage: true permissions: contents: read pull-requests: write uses: ./.github/workflows/ci-test-python.yml with: python-version: ${{ matrix.python-version }} coverage: ${{ matrix.coverage }} secrets: gh_token: ${{ secrets.github_token }} dbt-tests: name: dbt Plugin Tests strategy: matrix: dbt-version: [ dbt110, dbt120, dbt130, dbt140, dbt150, dbt160 ] include: # Default to python 3.11 for dbt tests. dbt doesn't support py 3.12 yet. - python-version: "3.11" # For the two oldest dbt versions, override to python 3.9. - dbt-version: dbt110 python-version: "3.9" - dbt-version: dbt120 python-version: "3.9" # For dbt 1.3 override to python 3.10 - dbt-version: dbt130 python-version: "3.10" permissions: contents: read pull-requests: write uses: ./.github/workflows/ci-test-dbt.yml with: python-version: ${{ matrix.python-version }} dbt-version: ${{ matrix.dbt-version }} coverage: true secrets: gh_token: ${{ secrets.github_token }} dialect-tests: name: Dialect ${{ matrix.marks }} strategy: matrix: include: # This runs the bulk of the dialect _parsing_ tests. # # It's run as a separate job as takes longer than the CI jobs and allows # them to be rerun separately if GitHub Actions or Coverage is experiencing # issues. - marks: "parse_suite" # We test coverage here for some parsing routines. coverage: true # This lints all our dialect fixtures to check rules can handle a variety # of SQL and don't error out badly. # # It's run as a separate job as takes longer than the CI jobs and allows # them to be rerun separately if GitHub Actions or Coverage is experiencing # issues. - marks: "fix_suite" coverage: false # This lints all our rules fixtures to check rules. # # It's run as a separate job as takes longer than the CI jobs and allows # them to be rerun separately if GitHub Actions or Coverage is experiencing # issues. - marks: "rules_suite" coverage: true permissions: contents: read pull-requests: write uses: ./.github/workflows/ci-test-python.yml with: python-version: "3.12" marks: ${{ matrix.marks }} coverage: ${{ matrix.coverage }} secrets: gh_token: ${{ secrets.github_token }} ymlchecks: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.12' - name: Install dependencies run: | pip install -r requirements.txt -r requirements_dev.txt pip install -e . - name: Generate the YAML files run: | python test/generate_parse_fixture_yml.py - name: Test the generated YAML files run: | if [ -n "$(git status --porcelain)" ]; then git diff echo "Generated YAML files do not match branch." echo "Please run the following command to generate these:" echo " python test/generate_parse_fixture_yml.py" exit 1 fi examples: runs-on: ubuntu-latest name: example tests steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.12' - name: Install dependencies run: | pip install -e . pip install tqdm - name: Test the example files run: | for file in examples/* do echo "Running $file" python "$file" done python-windows-tests: runs-on: windows-latest name: Python 3.12 Windows tests steps: - name: Set git to use LF run: | git config --global core.autocrlf false git config --global core.eol lf - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: "3.12" - name: List Env shell: bash run: | env | sort - name: Install dependencies shell: bash run: pip install tox - name: Run the tests shell: bash # Set python temp dir in working dir as on GitHub Actions Windows # machine often has system temp dir (which tox uses) on C drive and # working dir on D drive which causes problems. run: | mkdir temp_pytest python -m tox -e winpy -- --cov=sqlfluff -n 2 test -m "not integration" - name: Upload coverage data (github) uses: actions/upload-artifact@v3 with: name: coverage-data path: ".coverage.*" if-no-files-found: ignore python-windows-dbt-tests: runs-on: windows-latest name: dbt Plugin Python 3.11 Windows tests steps: - name: Start PostgreSQL on Windows run: | $pgService = Get-Service -Name postgresql* Set-Service -InputObject $pgService -Status running -StartupType automatic Start-Process -FilePath "$env:PGBIN\pg_isready" -Wait -PassThru - name: Set postgres user password run: | & $env:PGBIN\psql --command="ALTER USER postgres PASSWORD 'password';" --command="\du" - name: Set git to use LF run: | git config --global core.autocrlf false git config --global core.eol lf - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: # dbt doesn't support py 3.12 yet. python-version: "3.11" - name: Install dependencies shell: bash run: pip install tox - name: Run the tests shell: bash # Do not set explicitly temp dir for dbt as causes problems # None of these test need temp dir set run: | python -m tox -e dbt150-winpy -- plugins/sqlfluff-templater-dbt pip-test-pull-request: # Test that using pip install works as we've missed # some dependencies in the past - see #1842 runs-on: ubuntu-latest if: github.event_name == 'pull_request' name: pip install tests steps: - name: Set up Python uses: actions/setup-python@v4 with: python-version: "3.12" - uses: actions/checkout@v3 - name: Install dependencies run: | pip install . - name: Run the version test run: | sqlfluff --version - name: Run a simple select parse test via stdin run: | echo "select 1" | sqlfluff parse --dialect=ansi - - name: Run a simple select lint test via stdin run: | echo "select 1" | sqlfluff lint --dialect=ansi - - name: Run a simple select parse test via file run: | sqlfluff parse --dialect=ansi <(echo "select 1") - name: Run a simple select lint test via file run: | sqlfluff lint --dialect=ansi <(echo "select 1") coveralls_finish: name: Finalise coveralls. needs: [python-version-tests, dbt-tests, python-windows-tests, dialect-tests] runs-on: ubuntu-latest steps: - name: Coveralls Finished uses: coverallsapp/github-action@master with: github-token: ${{ secrets.github_token }} parallel-finished: true coverage_check: name: Combine & check 100% coverage. runs-on: ubuntu-latest needs: [python-version-tests, dbt-tests, python-windows-tests, dialect-tests] steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: python-version: "3.12" - run: python -m pip install --upgrade coverage[toml] - name: Download coverage data. uses: actions/download-artifact@v3 with: name: coverage-data - name: Combine coverage & fail if it's <100%. id: report_coverage # NOTE: Setting the pipefail option here means that even when # piping the output to `tee`, we still get the exit code of the # `coverage report` command. run: | set -o pipefail python -m coverage combine python -m coverage html --skip-covered --skip-empty python -m coverage report --fail-under=100 --skip-covered --skip-empty -m | tee coverage-report.txt - name: Upload HTML report if check failed. uses: actions/upload-artifact@v3 with: name: html-report path: htmlcov if: failure() && github.event_name == 'pull_request' - name: Stash PR Number. if: always() && github.event_name == 'pull_request' # NOTE: We do this so we know what PR to comment on when we pick up the report. run: | echo ${{ github.event.number }} > ./pr-number.txt echo ${{ steps.report_coverage.outcome }} > ./outcome.txt - name: Upload TXT report always (to add as comment to PR). # NOTE: We don't actually comment on the PR from here, we'll do that in # a more secure way by triggering a more secure workflow. # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ uses: actions/upload-artifact@v3 with: name: txt-report path: | coverage-report.txt pr-number.txt outcome.txt if: always() && github.event_name == 'pull_request' sqlfluff-2.3.5/.github/workflows/create-release-pull-request.yaml000066400000000000000000000036311451700765000251550ustar00rootroot00000000000000name: Create release pull request on: workflow_dispatch: inputs: newVersionNumber: description: 'New version number' required: true jobs: run: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Abort if branch already exists run: | _check_branch=$(git ls-remote --heads origin prep-${{ github.event.inputs.newVersionNumber }}) if [[ -z ${_check_branch} ]]; then echo "Release branch doesn't exist yet, continuing" else echo "Release branch already exists, aborting. Run the Python release script locally." exit 1 fi - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.11' - name: Install dependencies run: | pip install requests click pyyaml ghapi - name: Prepare release run: | python util.py release ${{ github.event.inputs.newVersionNumber }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_REPOSITORY_OWNER: ${{ secrets.GITHUB_REPOSITORY_OWNER }} - name: Create pull request uses: peter-evans/create-pull-request@v4 with: delete-branch: true branch: prep-${{ github.event.inputs.newVersionNumber }} commit-message: "Bump to version ${{ github.event.inputs.newVersionNumber }}" title: Prep version ${{ github.event.inputs.newVersionNumber }} body: | Prepare version ${{ github.event.inputs.newVersionNumber }} Please add all merged changes from [Release Draft][1] to the CHANGELOG.md file - Auto-generated by [create-pull-request][2] GitHub Action [1]: https://github.com/sqlfluff/sqlfluff/releases [2]: https://github.com/peter-evans/create-pull-request labels: release sqlfluff-2.3.5/.github/workflows/publish-dbt-templater-release-to-pypi.yaml000066400000000000000000000016661451700765000270670ustar00rootroot00000000000000name: Publish dbt templater PyPI Version on: release: types: - published workflow_dispatch: jobs: run: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: python-version: "3.7" - name: Install Dependencies run: | pip install --upgrade pip tox - name: Build Distribution (dbt plugin) # tox commands run relative to the repo root. run: tox -e build-dist -- plugins/sqlfluff-templater-dbt - name: Copy builds to main dist folder # We move them here so that the github action can still access them run: cp -r plugins/sqlfluff-templater-dbt/dist/. dist/ - name: Publish Python distribution to PyPI uses: pypa/gh-action-pypi-publish@master with: user: __token__ password: ${{ secrets.PYPI_DBT_TEMPLATER_TOKEN }} skip_existing: true sqlfluff-2.3.5/.github/workflows/publish-sqlfluff-docker-image-to-dockerhub.yaml000066400000000000000000000045471451700765000300420ustar00rootroot00000000000000# Create and push Docker image of latest release to DockerHub. name: Publish SQLFluff DockerHub Version on: release: types: - published workflow_dispatch: # Create tag for integration test. env: TEST_TAG: ${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:test jobs: docker: runs-on: ubuntu-latest steps: # Get the version of latest release in # order to tag published Docker image. - name: Get latest release name id: latest_release uses: pozetroninc/github-action-get-latest-release@master with: repository: ${{ github.repository }} # Setup QEMU and Buildx to allow for multi-platform builds. - name: Set up QEMU id: docker_qemu uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx id: docker_buildx uses: docker/setup-buildx-action@v1 # Authenticate with DockerHub. - name: Login to DockerHub id: docker_login uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} # Build amd64 image to use in the integration test. - name: Build and export to Docker id: docker_build uses: docker/build-push-action@v2 with: load: true tags: ${{ env.TEST_TAG }} cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:latest cache-to: type=inline # Integration test to validate newly created image is working. - name: Test Docker image id: docker_test run: | echo "SELECT 1" > test.sql docker run --rm -i -v $PWD:/sql ${{ env.TEST_TAG }} lint --dialect ansi /sql/test.sql # Build arm64 image (amd64 is cached from docker_build step) and export to DockerHub. # N.B. We tag this image as both latest and with its version number. - name: Build and push id: docker_build_push uses: docker/build-push-action@v2 with: push: true platforms: linux/amd64,linux/arm64 tags: | ${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:latest ${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:${{ steps.latest_release.outputs.release }} cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/sqlfluff:latest cache-to: type=inline sqlfluff-2.3.5/.github/workflows/publish-sqlfluff-release-to-pypi.yaml000066400000000000000000000012121451700765000261300ustar00rootroot00000000000000name: Publish SQFluff PyPI Version on: release: types: - published workflow_dispatch: jobs: run: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: python-version: "3.7" - name: Install Dependencies run: | pip install --upgrade pip tox - name: Build Distribution (Core) run: tox -e build-dist - name: Publish Python distribution to PyPI uses: pypa/gh-action-pypi-publish@master with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} skip_existing: true sqlfluff-2.3.5/.gitignore000066400000000000000000000014411451700765000153400ustar00rootroot00000000000000# Ignore IDE files .vscode .idea /.sqlfluff **/.DS_Store # Ignore Python cache and prebuilt things .cache __pycache__ *.egg-info *.pyc build _build dist .pytest_cache # Ignore the Environment env .tox venv .venv .python-version # Ignore coverage reports .coverage .coverage.* coverage.xml htmlcov # Ignore test reports .test-reports test-reports # Ignore root testing sql & python files /test*.sql /test*.py /test*.txt /.hypothesis/ # Ignore dbt outputs from testing /target # Ignore any timing outputs /*.csv # Ignore conda environment.yml contributors might be using and direnv config environment.yml .envrc **/*FIXED.sql *.prof # Ignore temp packages.yml generated during testing. plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml # VSCode .vscode *.code-workspace sqlfluff-2.3.5/.pre-commit-config.yaml000066400000000000000000000045451451700765000176410ustar00rootroot00000000000000repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.1.0 hooks: # If adding any exceptions here, make sure to add them to .editorconfig as well - id: end-of-file-fixer exclude: | (?x)^( test/fixtures/linter/sqlfluffignore/| test/fixtures/config/inheritance_b/example.sql| test/fixtures/config/inheritance_b/nested/example.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/trailing_newlines.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/trailing_newlines.sql )$ - id: trailing-whitespace exclude: | (?x)^( test/fixtures/linter/indentation_errors.sql| test/fixtures/templater/jinja_d_roundtrip/test.sql| test/fixtures/config/inheritance_b/example.sql| test/fixtures/config/inheritance_b/nested/example.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/macro_in_macro.sq| plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/last_day.sql| plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/dbt_utils_0.8.0/last_day.sql| test/fixtures/linter/sqlfluffignore/ )$ - repo: https://github.com/psf/black rev: 22.3.0 hooks: - id: black - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.4.1 hooks: - id: mypy args: [--ignore-missing-imports] additional_dependencies: [ types-toml, types-pkg_resources, types-chardet, types-appdirs, types-colorama, types-pyyaml, types-regex, ] files: ^src/sqlfluff/.* - repo: https://github.com/pycqa/flake8 rev: 4.0.1 hooks: - id: flake8 additional_dependencies: [flake8-black>=0.2.4, flake8-docstrings] - repo: https://github.com/pycqa/doc8 rev: 0.10.1 hooks: - id: doc8 args: [--file-encoding, utf8] files: docs/source/.*\.rst$ - repo: https://github.com/adrienverge/yamllint.git rev: v1.26.3 hooks: - id: yamllint args: [-c=.yamllint] - repo: https://github.com/charliermarsh/ruff-pre-commit # Ruff version. rev: "v0.0.243" hooks: - id: ruff sqlfluff-2.3.5/.pre-commit-hooks.yaml000066400000000000000000000014031451700765000175050ustar00rootroot00000000000000- id: sqlfluff-lint name: sqlfluff-lint # Set `--processes 0` to use maximum parallelism entry: sqlfluff lint --processes 0 language: python description: "Lints sql files with `SQLFluff`" types: [sql] require_serial: true additional_dependencies: [] - id: sqlfluff-fix name: sqlfluff-fix # Set a couple of default flags: # - `--force` to disable confirmation # - `--show-lint-violations` shows issues to not require running `sqlfluff lint` # - `--processes 0` to use maximum parallelism # By default, this hook applies all rules. entry: sqlfluff fix --force --show-lint-violations --processes 0 language: python description: "Fixes sql lint errors with `SQLFluff`" types: [sql] require_serial: true additional_dependencies: [] sqlfluff-2.3.5/.readthedocs.yml000066400000000000000000000014061451700765000164370ustar00rootroot00000000000000# .readthedocs.yml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Build documentation in the docs/source directory with Sphinx sphinx: configuration: docs/source/conf.py # Don't build any additional formats formats: [] # Optionally set the version of Python and requirements required to build your docs. # In our case we need both the docs requirements and the package itself. python: install: - requirements: requirements.txt - requirements: docs/requirements.txt - method: setuptools path: . build: os: ubuntu-22.04 tools: python: "3.11" jobs: # Before building, generate the rule docs pre_build: - python docs/generate-rule-docs.py sqlfluff-2.3.5/.ruff.toml000066400000000000000000000006721451700765000152720ustar00rootroot00000000000000extend-select = ["I", "D"] # D105: Missing docstring in magic method # D107: Missing docstring in __init__ # D418: Function/ Method decorated with @overload shouldn’t contain a docstring ignore = ["D107", "D105", "D418"] [isort] # Mark sqlfluff, test and it's plugins as known first party known-first-party = [ "sqlfluff", "sqlfluff_plugin_example", "sqlfluff_templater_dbt", "test", ] [pydocstyle] convention = "google" sqlfluff-2.3.5/.yamllint000066400000000000000000000004461451700765000152060ustar00rootroot00000000000000--- extends: default ignore: | .tox/ .venv/ dbt_modules/ dbt_packages/ rules: brackets: disable document-start: disable indentation: indent-sequences: whatever line-length: disable truthy: check-keys: false # .github workflow uses "on:" (but not as a truthy value) sqlfluff-2.3.5/CHANGELOG.md000066400000000000000000015400721451700765000151720ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [2.3.5] - 2023-10-27 ## Highlights This is a fairly minor release, primarily bugfixes and dialect improvements. For python API users, there's the addition of a public method on the `FluffConfig` object allowing the construction of a config object from multiple strings to mimic the effect of nested config files in the CLI. This release also includes a selection of internal refactoring and reorganisation to support future development work. This also sees the first contributions by [@ShubhamJagtap2000](https://github.com/ShubhamJagtap2000) & [@kang8](https://github.com/kang8), particularly notable in that both were contributions to SQLFluff documentation! 🎉🎉🏆🎉🎉 ## What’s Changed * One (very) small typing improvements [#5355](https://github.com/sqlfluff/sqlfluff/pull/5355) [@alanmcruickshank](https://github.com/alanmcruickshank) * Unpick dependencies between modules in `sqlfluff.core` [#5348](https://github.com/sqlfluff/sqlfluff/pull/5348) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve SparkSQL re-parsing issue + test validation in test suite. [#5351](https://github.com/sqlfluff/sqlfluff/pull/5351) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Support ALTER MASKING POLICY [#5350](https://github.com/sqlfluff/sqlfluff/pull/5350) [@jmks](https://github.com/jmks) * Add a public API for nesting config strings. [#5349](https://github.com/sqlfluff/sqlfluff/pull/5349) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update handling of dbt compilation errors [#5345](https://github.com/sqlfluff/sqlfluff/pull/5345) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake - Extend Column Default Constraint [#5343](https://github.com/sqlfluff/sqlfluff/pull/5343) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix the dbt anchor link in the realworld documentation [#5341](https://github.com/sqlfluff/sqlfluff/pull/5341) [@kang8](https://github.com/kang8) * Update README.md [#5340](https://github.com/sqlfluff/sqlfluff/pull/5340) [@ShubhamJagtap2000](https://github.com/ShubhamJagtap2000) * Logic to render variants of Jinja templates for more coverage. [#5339](https://github.com/sqlfluff/sqlfluff/pull/5339) [@alanmcruickshank](https://github.com/alanmcruickshank) * Templater slicing refactoring of `RawFileSlice` [#5338](https://github.com/sqlfluff/sqlfluff/pull/5338) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: Support multiple statements in the `BEGIN..EXCEPTION..END` [#5322](https://github.com/sqlfluff/sqlfluff/pull/5322) [@abdel](https://github.com/abdel) * Remove codecov traces [#5337](https://github.com/sqlfluff/sqlfluff/pull/5337) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@ShubhamJagtap2000](https://github.com/ShubhamJagtap2000) made their first contribution in [#5340](https://github.com/sqlfluff/sqlfluff/pull/5340) * [@kang8](https://github.com/kang8) made their first contribution in [#5341](https://github.com/sqlfluff/sqlfluff/pull/5341) ## [2.3.4] - 2023-10-17 ## Highlights This is a fairly small bugfix release, mostly to resolve a bug introduced in 2.3.3 with commas and LT09. This also includes a couple of additional small performance improvements and some dialect improvements for Oracle, BigQuery and MySQL. Thanks in particular to [@bonnal-enzo](https://github.com/bonnal-enzo) who made their first contribution as part of this release 🎉🎉🏆🎉🎉. ## What’s Changed * Commas fix in LT09 [#5335](https://github.com/sqlfluff/sqlfluff/pull/5335) [@alanmcruickshank](https://github.com/alanmcruickshank) * UUID Comparisons [#5332](https://github.com/sqlfluff/sqlfluff/pull/5332) [@alanmcruickshank](https://github.com/alanmcruickshank) * Two depth map performance improvements [#5333](https://github.com/sqlfluff/sqlfluff/pull/5333) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stash parent idx with parent reference [#5331](https://github.com/sqlfluff/sqlfluff/pull/5331) [@alanmcruickshank](https://github.com/alanmcruickshank) * `Set` to `FrozenSet` in segment class_types [#5334](https://github.com/sqlfluff/sqlfluff/pull/5334) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for ANY_VALUE( _ HAVING MIN/MAX _ ) to BigQuery dialect [#5321](https://github.com/sqlfluff/sqlfluff/pull/5321) [@bonnal-enzo](https://github.com/bonnal-enzo) * Fix parsing error when using quoted slash in Oracle [#5323](https://github.com/sqlfluff/sqlfluff/pull/5323) [@joaostorrer](https://github.com/joaostorrer) * Add support for functions and procedures calls via database link in Oracle [#5326](https://github.com/sqlfluff/sqlfluff/pull/5326) [@joaostorrer](https://github.com/joaostorrer) * Fix parsing error with table name '_' in MySQL [#5324](https://github.com/sqlfluff/sqlfluff/pull/5324) [@joaostorrer](https://github.com/joaostorrer) ## New Contributors * [@bonnal-enzo](https://github.com/bonnal-enzo) made their first contribution in [#5321](https://github.com/sqlfluff/sqlfluff/pull/5321) ## [2.3.3] - 2023-10-13 ## Highlights There's a *lot* in this release. Most of it is under the covers and so shouldn't cause any breaking changes for most users. If your use case depends on some of the internals of SQLFluff, you may find some breaking changes. The bigger changes are: - Python 3.12 support is now official (although older releases may also work as only a few changes were required for full 3.12 support). - We've done a significant re-write of the parsing engine to remove some unnecessary segment manipulation and get us closer to "single pass" parsing. This changes the internal API being used on any `.match()` methods, and also removes the `parse_grammar` attribute on any dialect segments. We are not aware of any 3rd party libraries which rely on these APIs however and so have not triggered a more major release. These lead to significant performance improvements during parsing. - Standardisation of terminators in the parser, and the introduction of the `ParseMode` option has enabled the removal of the `StartsWith`, `GreedyUntil` and `EphemeralSegment` parser classes. - Several validation checks have been revised in this release, which should both improve performance (by reducing duplication), but also be more effective in preventing the application of any fixes which would result in unparsable files. Alongside the big things this also includes a host of bugfixes, dialect improvements and CI/testing improvements. This release also sees a bumper crop of new contributors, thanks to [@dehume](https://github.com/dehume), [@andychannery](https://github.com/andychannery), [@Kylea650](https://github.com/Kylea650), [@robin-alphasophia](https://github.com/robin-alphasophia), [@jtbg](https://github.com/jtbg), [@r-petit](https://github.com/r-petit), [@bpfaust](https://github.com/bpfaust) & [@freewaydev](https://github.com/freewaydev) who all made the first contibutions in this release! 🎉🎉🎉 ## What’s Changed * Oracle space between alias and column reference [#5313](https://github.com/sqlfluff/sqlfluff/pull/5313) [@joaostorrer](https://github.com/joaostorrer) * Don't apply LT05 on templated rebreak locations #5096 [#5318](https://github.com/sqlfluff/sqlfluff/pull/5318) [@alanmcruickshank](https://github.com/alanmcruickshank) * Disable JJ01 unless jinja active [#5319](https://github.com/sqlfluff/sqlfluff/pull/5319) [@alanmcruickshank](https://github.com/alanmcruickshank) * Cache the `BaseSegment` hash in reflow [#5320](https://github.com/sqlfluff/sqlfluff/pull/5320) [@alanmcruickshank](https://github.com/alanmcruickshank) * Better error reporting for invalid macros [#5317](https://github.com/sqlfluff/sqlfluff/pull/5317) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for begin atomic functions in Postgres [#5316](https://github.com/sqlfluff/sqlfluff/pull/5316) [@joaostorrer](https://github.com/joaostorrer) * Fix parsing when statement uses plus_sign_join and function in Oracle [#5315](https://github.com/sqlfluff/sqlfluff/pull/5315) [@joaostorrer](https://github.com/joaostorrer) * Update rule docs with correct config [#5314](https://github.com/sqlfluff/sqlfluff/pull/5314) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #5258. More robust algorithm for multiline fix. [#5309](https://github.com/sqlfluff/sqlfluff/pull/5309) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: Add support for `BEGIN..EXCEPTION...END` block [#5307](https://github.com/sqlfluff/sqlfluff/pull/5307) [@abdel](https://github.com/abdel) * Refine placement of metas around templated blocks [#5294](https://github.com/sqlfluff/sqlfluff/pull/5294) [@alanmcruickshank](https://github.com/alanmcruickshank) * Extend ruff checking to docstring rules [#5302](https://github.com/sqlfluff/sqlfluff/pull/5302) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix for strange TSQL bugs [#5306](https://github.com/sqlfluff/sqlfluff/pull/5306) [@alanmcruickshank](https://github.com/alanmcruickshank) * Staging PR for #5282 [#5305](https://github.com/sqlfluff/sqlfluff/pull/5305) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve some strange whitespace indentation behaviour [#5292](https://github.com/sqlfluff/sqlfluff/pull/5292) [@alanmcruickshank](https://github.com/alanmcruickshank) * Simplify `_process_lint_result` [#5304](https://github.com/sqlfluff/sqlfluff/pull/5304) [@alanmcruickshank](https://github.com/alanmcruickshank) * Performance improvement on segment comparison [#5303](https://github.com/sqlfluff/sqlfluff/pull/5303) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor LT09 [#5299](https://github.com/sqlfluff/sqlfluff/pull/5299) [@alanmcruickshank](https://github.com/alanmcruickshank) * Change drop function to allow DropBehaviourGrammar with space after function name [#5295](https://github.com/sqlfluff/sqlfluff/pull/5295) [@joaostorrer](https://github.com/joaostorrer) * Resolve click import options on autocomplete [#5293](https://github.com/sqlfluff/sqlfluff/pull/5293) [@alanmcruickshank](https://github.com/alanmcruickshank) * Updated docstrings with missing args/returns/etc info, added missing docstrings, minor formatting fixes. [#5278](https://github.com/sqlfluff/sqlfluff/pull/5278) [@freewaydev](https://github.com/freewaydev) * Use ruff rule I replace isort [#5289](https://github.com/sqlfluff/sqlfluff/pull/5289) [@zhongjiajie](https://github.com/zhongjiajie) * Snowflake: Parse ALTER DATABASE statement [#5284](https://github.com/sqlfluff/sqlfluff/pull/5284) [@jmks](https://github.com/jmks) * Snowflake: Parse ALTER ACCOUNT statements [#5283](https://github.com/sqlfluff/sqlfluff/pull/5283) [@jmks](https://github.com/jmks) * Snowflake: create AlterProcedureStatementSegment [#5291](https://github.com/sqlfluff/sqlfluff/pull/5291) [@moreaupascal56](https://github.com/moreaupascal56) * Rewrite of matching interface [#5230](https://github.com/sqlfluff/sqlfluff/pull/5230) [@alanmcruickshank](https://github.com/alanmcruickshank) * Follow noqa in block comments [#5133](https://github.com/sqlfluff/sqlfluff/pull/5133) [@daviewales](https://github.com/daviewales) * Fix insert on conflict with function in Postgres [#5286](https://github.com/sqlfluff/sqlfluff/pull/5286) [@joaostorrer](https://github.com/joaostorrer) * Add support for Pivot and Unpivot clauses in Oracle [#5285](https://github.com/sqlfluff/sqlfluff/pull/5285) [@joaostorrer](https://github.com/joaostorrer) * Adding "create table as" for greenplum dialect [#5173](https://github.com/sqlfluff/sqlfluff/pull/5173) [@bpfaust](https://github.com/bpfaust) * Update CI to python 3.12 [#5267](https://github.com/sqlfluff/sqlfluff/pull/5267) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Add CreateResourceMonitorStatementSegment & AlterResourceMonitorStatementSegment [#5272](https://github.com/sqlfluff/sqlfluff/pull/5272) [@moreaupascal56](https://github.com/moreaupascal56) * [TSQL] Add create fulltext index statement segment class [#5274](https://github.com/sqlfluff/sqlfluff/pull/5274) [@r-petit](https://github.com/r-petit) * Snowflake: Add CreateSequenceStatementSegment & AlterSequenceStatementSegment [#5270](https://github.com/sqlfluff/sqlfluff/pull/5270) [@moreaupascal56](https://github.com/moreaupascal56) * Add CommaSegment to AlterWarehouseStatementSegment SET clause [#5268](https://github.com/sqlfluff/sqlfluff/pull/5268) [@moreaupascal56](https://github.com/moreaupascal56) * Snowflake: Parse EXECUTE IMMEDIATE clause [#5275](https://github.com/sqlfluff/sqlfluff/pull/5275) [@jmks](https://github.com/jmks) * TSQL: Add missing `HISTORY_RETENTION_PERIOD` sequence to the table option segment [#5273](https://github.com/sqlfluff/sqlfluff/pull/5273) [@r-petit](https://github.com/r-petit) * Snowflake: Fix ScalingPolicy and WarehouseType Refs in WarehouseObjectProperties and use ObjectReferenceSegment in AlterWarehouseStatementSegment [#5264](https://github.com/sqlfluff/sqlfluff/pull/5264) [@moreaupascal56](https://github.com/moreaupascal56) * Finish the removal of `GreedyUntil` [#5263](https://github.com/sqlfluff/sqlfluff/pull/5263) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for date operations with intervals in Oracle [#5262](https://github.com/sqlfluff/sqlfluff/pull/5262) [@joaostorrer](https://github.com/joaostorrer) * Change RawSegment `type` to `instance_types` [#5253](https://github.com/sqlfluff/sqlfluff/pull/5253) [@alanmcruickshank](https://github.com/alanmcruickshank) * Revise MatchableType -> Matchable [#5252](https://github.com/sqlfluff/sqlfluff/pull/5252) [@alanmcruickshank](https://github.com/alanmcruickshank) * Unnest fixing and re-address validation triggers [#5249](https://github.com/sqlfluff/sqlfluff/pull/5249) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolves #5174: Snowflake alter table constraint [#5247](https://github.com/sqlfluff/sqlfluff/pull/5247) [@andychannery](https://github.com/andychannery) * Bring together the generic segments [#5243](https://github.com/sqlfluff/sqlfluff/pull/5243) [@alanmcruickshank](https://github.com/alanmcruickshank) * minor: update docs with correct link to airflow ds_filter [#5244](https://github.com/sqlfluff/sqlfluff/pull/5244) [@jtbg](https://github.com/jtbg) * #5245 - Snowflake dialect: Adds support for variable definitions in scripting blocks [#5246](https://github.com/sqlfluff/sqlfluff/pull/5246) [@robin-alphasophia](https://github.com/robin-alphasophia) * Introduce "word" segment [#5234](https://github.com/sqlfluff/sqlfluff/pull/5234) [@alanmcruickshank](https://github.com/alanmcruickshank) * #5239 Added (basic) support for properly linted Snowflake scripting [#5242](https://github.com/sqlfluff/sqlfluff/pull/5242) [@robin-alphasophia](https://github.com/robin-alphasophia) * Allow Snowflake pipe integration to be a quoted or unquoted [#5241](https://github.com/sqlfluff/sqlfluff/pull/5241) [@Kylea650](https://github.com/Kylea650) * Fix LT01 alignment regression #4023 [#5238](https://github.com/sqlfluff/sqlfluff/pull/5238) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for oracle non ansi joins [#5231](https://github.com/sqlfluff/sqlfluff/pull/5231) [@joaostorrer](https://github.com/joaostorrer) * add azure_storage_queue and quoted providers [#5236](https://github.com/sqlfluff/sqlfluff/pull/5236) [@Kylea650](https://github.com/Kylea650) * Set type automatically within the lexer. [#5232](https://github.com/sqlfluff/sqlfluff/pull/5232) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #5225: Snowflake unparsable select replace [#5227](https://github.com/sqlfluff/sqlfluff/pull/5227) [@andychannery](https://github.com/andychannery) * Spark Accessor Grammars [#5226](https://github.com/sqlfluff/sqlfluff/pull/5226) [@alanmcruickshank](https://github.com/alanmcruickshank) * Test Script Timing [#5228](https://github.com/sqlfluff/sqlfluff/pull/5228) [@alanmcruickshank](https://github.com/alanmcruickshank) * Unify lexer names and types for brackets [#5229](https://github.com/sqlfluff/sqlfluff/pull/5229) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #3176: Snowflake unparsable array casting [#5224](https://github.com/sqlfluff/sqlfluff/pull/5224) [@andychannery](https://github.com/andychannery) * BigQuery system time syntax [#5220](https://github.com/sqlfluff/sqlfluff/pull/5220) [@greg-finley](https://github.com/greg-finley) * Parser test nits [#5217](https://github.com/sqlfluff/sqlfluff/pull/5217) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove `parse_grammar` [#5189](https://github.com/sqlfluff/sqlfluff/pull/5189) [@alanmcruickshank](https://github.com/alanmcruickshank) * Revise segment whitespace validation [#5194](https://github.com/sqlfluff/sqlfluff/pull/5194) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for bpchar datatype in postgres [#5215](https://github.com/sqlfluff/sqlfluff/pull/5215) [@joaostorrer](https://github.com/joaostorrer) * Resolve #5203: `BaseSegment.copy()` isolation [#5206](https://github.com/sqlfluff/sqlfluff/pull/5206) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Materialize Syntax [#5210](https://github.com/sqlfluff/sqlfluff/pull/5210) [@dehume](https://github.com/dehume) * Validate fix parsing based on match_grammar [#5196](https://github.com/sqlfluff/sqlfluff/pull/5196) [@alanmcruickshank](https://github.com/alanmcruickshank) * Position assertions in BaseSegment [#5209](https://github.com/sqlfluff/sqlfluff/pull/5209) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL bracketed column constraint [#5208](https://github.com/sqlfluff/sqlfluff/pull/5208) [@greg-finley](https://github.com/greg-finley) * Dialect spacing & quoting issues [#5205](https://github.com/sqlfluff/sqlfluff/pull/5205) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update comment workflow again [#5201](https://github.com/sqlfluff/sqlfluff/pull/5201) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stash PR Number and hydrate later [#5200](https://github.com/sqlfluff/sqlfluff/pull/5200) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix API issues in github comment action [#5199](https://github.com/sqlfluff/sqlfluff/pull/5199) [@alanmcruickshank](https://github.com/alanmcruickshank) * Consistency check in root parse [#5191](https://github.com/sqlfluff/sqlfluff/pull/5191) [@alanmcruickshank](https://github.com/alanmcruickshank) * PR Comment action [#5192](https://github.com/sqlfluff/sqlfluff/pull/5192) [@alanmcruickshank](https://github.com/alanmcruickshank) * Cache python dependencies in GHA [#5193](https://github.com/sqlfluff/sqlfluff/pull/5193) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for create server, create user mapping and import foreign schema in postgres [#5185](https://github.com/sqlfluff/sqlfluff/pull/5185) [@joaostorrer](https://github.com/joaostorrer) * Terminators on `Anything()` + Strip _most_ of the other `parse_grammar` [#5186](https://github.com/sqlfluff/sqlfluff/pull/5186) [@alanmcruickshank](https://github.com/alanmcruickshank) * Parse modes for `AnyNumberOf` [#5187](https://github.com/sqlfluff/sqlfluff/pull/5187) [@alanmcruickshank](https://github.com/alanmcruickshank) * Introduce `parse_mode` and remove `StartsWith` & `EphemeralSegment`. [#5167](https://github.com/sqlfluff/sqlfluff/pull/5167) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@dehume](https://github.com/dehume) made their first contribution in [#5210](https://github.com/sqlfluff/sqlfluff/pull/5210) * [@andychannery](https://github.com/andychannery) made their first contribution in [#5224](https://github.com/sqlfluff/sqlfluff/pull/5224) * [@Kylea650](https://github.com/Kylea650) made their first contribution in [#5236](https://github.com/sqlfluff/sqlfluff/pull/5236) * [@robin-alphasophia](https://github.com/robin-alphasophia) made their first contribution in [#5242](https://github.com/sqlfluff/sqlfluff/pull/5242) * [@jtbg](https://github.com/jtbg) made their first contribution in [#5244](https://github.com/sqlfluff/sqlfluff/pull/5244) * [@r-petit](https://github.com/r-petit) made their first contribution in [#5273](https://github.com/sqlfluff/sqlfluff/pull/5273) * [@bpfaust](https://github.com/bpfaust) made their first contribution in [#5173](https://github.com/sqlfluff/sqlfluff/pull/5173) * [@freewaydev](https://github.com/freewaydev) made their first contribution in [#5278](https://github.com/sqlfluff/sqlfluff/pull/5278) * [@abdel](https://github.com/abdel) made their first contribution in [#5307](https://github.com/sqlfluff/sqlfluff/pull/5307) ## [2.3.2] - 2023-09-10 ## Highlights Much of this release is internal optimisations and refactoring. We're in the process of upgrading some quite old code in the parser, most of which should not be visible to end users (apart from perhaps some performance improvements!). This release also allows missing template variables in the placeholder templater to be automatically filled with the name of the variable rather than raising an error (see: [#5101](https://github.com/sqlfluff/sqlfluff/pull/5101)). Beyond that this includes some dialect improvements for DuckDB, SparkSQL, Snowflake, Redshift & Postgres. Thanks particularly to [@shyaginuma](https://github.com/shyaginuma), [@Fullcure3](https://github.com/Fullcure3), [@adilkhanekt](https://github.com/adilkhanekt) & [@pilou-komoot](https://github.com/pilou-komoot) who made their first contributions as part of this release. 🎉🎉🎉 ## What’s Changed * Allow not specifying parameters names when using placeholder templater [#5101](https://github.com/sqlfluff/sqlfluff/pull/5101) [@shyaginuma](https://github.com/shyaginuma) * Update coverage job to run in the right conditions [#5183](https://github.com/sqlfluff/sqlfluff/pull/5183) [@alanmcruickshank](https://github.com/alanmcruickshank) * Duckdb: UNION BY NAME [#5176](https://github.com/sqlfluff/sqlfluff/pull/5176) [@greg-finley](https://github.com/greg-finley) * Output coverage report direct to PR [#5180](https://github.com/sqlfluff/sqlfluff/pull/5180) [@alanmcruickshank](https://github.com/alanmcruickshank) * Upgrades to the parse fixture generation script [#5182](https://github.com/sqlfluff/sqlfluff/pull/5182) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor of Sequence match [#5177](https://github.com/sqlfluff/sqlfluff/pull/5177) [@alanmcruickshank](https://github.com/alanmcruickshank) * Simplify Greedy Match [#5178](https://github.com/sqlfluff/sqlfluff/pull/5178) [@alanmcruickshank](https://github.com/alanmcruickshank) * Quality of life improvements on parse fixture script [#5179](https://github.com/sqlfluff/sqlfluff/pull/5179) [@alanmcruickshank](https://github.com/alanmcruickshank) * Lift and shift matching algorithms [#5170](https://github.com/sqlfluff/sqlfluff/pull/5170) [@alanmcruickshank](https://github.com/alanmcruickshank) * Capitalise boolean values in example configs, for consistency [#5175](https://github.com/sqlfluff/sqlfluff/pull/5175) [@pilou-komoot](https://github.com/pilou-komoot) * Pull terminator setting up into the base grammar [#5172](https://github.com/sqlfluff/sqlfluff/pull/5172) [@alanmcruickshank](https://github.com/alanmcruickshank) * Flip the if in sequence and un-nest [#5171](https://github.com/sqlfluff/sqlfluff/pull/5171) [@alanmcruickshank](https://github.com/alanmcruickshank) * SparkSQL: Support CACHE TABLE without query [#5165](https://github.com/sqlfluff/sqlfluff/pull/5165) [@reata](https://github.com/reata) * Remove configurable `enforce_whitespace_preceding_terminator` [#5162](https://github.com/sqlfluff/sqlfluff/pull/5162) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adding optional sequence block for columns parsing in Snowflake external tables [#5157](https://github.com/sqlfluff/sqlfluff/pull/5157) [@adilkhanekt](https://github.com/adilkhanekt) * SparkSQL: Support ALTER TABLE SET LOCATION without partition spec [#5168](https://github.com/sqlfluff/sqlfluff/pull/5168) [@reata](https://github.com/reata) * Tighten terminators on `Delimited` [#5161](https://github.com/sqlfluff/sqlfluff/pull/5161) [@alanmcruickshank](https://github.com/alanmcruickshank) * `terminator` > `terminators` on StartsWith [#5152](https://github.com/sqlfluff/sqlfluff/pull/5152) [@alanmcruickshank](https://github.com/alanmcruickshank) * Redshift: Support SELECT INTO [#5159](https://github.com/sqlfluff/sqlfluff/pull/5159) [@reata](https://github.com/reata) * Duckdb: Integer division [#5154](https://github.com/sqlfluff/sqlfluff/pull/5154) [@greg-finley](https://github.com/greg-finley) * `terminator` > `terminators` on Delimited grammar [#5150](https://github.com/sqlfluff/sqlfluff/pull/5150) [@alanmcruickshank](https://github.com/alanmcruickshank) * Tests for unparsable sections [#5149](https://github.com/sqlfluff/sqlfluff/pull/5149) [@alanmcruickshank](https://github.com/alanmcruickshank) * Un-nest the delimited match method [#5147](https://github.com/sqlfluff/sqlfluff/pull/5147) [@alanmcruickshank](https://github.com/alanmcruickshank) * Grammar .copy() assert no unexpected kwargs [#5148](https://github.com/sqlfluff/sqlfluff/pull/5148) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: CLUSTER [#5146](https://github.com/sqlfluff/sqlfluff/pull/5146) [@greg-finley](https://github.com/greg-finley) * Postgres alter policy [#5138](https://github.com/sqlfluff/sqlfluff/pull/5138) [@Fullcure3](https://github.com/Fullcure3) ## New Contributors * [@Fullcure3](https://github.com/Fullcure3) made their first contribution in [#5138](https://github.com/sqlfluff/sqlfluff/pull/5138) * [@adilkhanekt](https://github.com/adilkhanekt) made their first contribution in [#5157](https://github.com/sqlfluff/sqlfluff/pull/5157) * [@pilou-komoot](https://github.com/pilou-komoot) made their first contribution in [#5175](https://github.com/sqlfluff/sqlfluff/pull/5175) * [@shyaginuma](https://github.com/shyaginuma) made their first contribution in [#5101](https://github.com/sqlfluff/sqlfluff/pull/5101) ## [2.3.1] - 2023-08-29 ## Highlights This release is primarily a performance release, with most major changes aimed at the linting and fixing phases of operation. Most of the longest duration rules (excepting the layout rules) should see noticeable speed improvements. Alongside those changes, there are a selection of bugfixes and dialect improvements for Oracle, PostgreSQL, Snowflake & TSQL. ## What’s Changed * Postgres: Update returning with alias [#5137](https://github.com/sqlfluff/sqlfluff/pull/5137) [@greg-finley](https://github.com/greg-finley) * Reduce copying on _position_segments (improves `fix`) [#5119](https://github.com/sqlfluff/sqlfluff/pull/5119) [@alanmcruickshank](https://github.com/alanmcruickshank) * Import rationalisation [#5135](https://github.com/sqlfluff/sqlfluff/pull/5135) [@alanmcruickshank](https://github.com/alanmcruickshank) * Select Crawler Refactor: Part 3 [#5115](https://github.com/sqlfluff/sqlfluff/pull/5115) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for comparison operators with space in Oracle [#5132](https://github.com/sqlfluff/sqlfluff/pull/5132) [@joaostorrer](https://github.com/joaostorrer) * Snowflake support for bracketed query after `EXCEPT` [#5126](https://github.com/sqlfluff/sqlfluff/pull/5126) [@ulixius9](https://github.com/ulixius9) * Treatment of null literals. #5099 [#5125](https://github.com/sqlfluff/sqlfluff/pull/5125) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow double-quoted parameters in create procedure [#5131](https://github.com/sqlfluff/sqlfluff/pull/5131) [@greg-finley](https://github.com/greg-finley) * Fix coverage & mypy [#5134](https://github.com/sqlfluff/sqlfluff/pull/5134) [@alanmcruickshank](https://github.com/alanmcruickshank) * Ensure Unparsable can be given position. [#5117](https://github.com/sqlfluff/sqlfluff/pull/5117) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reduce copying in LintFix instantiation [#5118](https://github.com/sqlfluff/sqlfluff/pull/5118) [@alanmcruickshank](https://github.com/alanmcruickshank) * Optimise crawl behaviour of JJ01 [#5116](https://github.com/sqlfluff/sqlfluff/pull/5116) [@alanmcruickshank](https://github.com/alanmcruickshank) * Simplify rules with improvement to SegmentSeeker [#5113](https://github.com/sqlfluff/sqlfluff/pull/5113) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor AM07 [#5112](https://github.com/sqlfluff/sqlfluff/pull/5112) [@alanmcruickshank](https://github.com/alanmcruickshank) * Select Crawler Refactor: Part 2 [#5110](https://github.com/sqlfluff/sqlfluff/pull/5110) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support to Hierarchical Queries in Oracle [#5108](https://github.com/sqlfluff/sqlfluff/pull/5108) [@joaostorrer](https://github.com/joaostorrer) * ✅ Strict MyPy for sqlfluff.core.parser [#5107](https://github.com/sqlfluff/sqlfluff/pull/5107) [@alanmcruickshank](https://github.com/alanmcruickshank) * Free up pydocstyle again [#5109](https://github.com/sqlfluff/sqlfluff/pull/5109) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Allow CREATE TABLE INHERITS with no new columns [#5100](https://github.com/sqlfluff/sqlfluff/pull/5100) [@greg-finley](https://github.com/greg-finley) * Strict mypy in parser.segments [#5094](https://github.com/sqlfluff/sqlfluff/pull/5094) [@alanmcruickshank](https://github.com/alanmcruickshank) * Select Crawler Refactor: Part 1 [#5104](https://github.com/sqlfluff/sqlfluff/pull/5104) [@alanmcruickshank](https://github.com/alanmcruickshank) * RF01 & recursive_crawl improvements [#5102](https://github.com/sqlfluff/sqlfluff/pull/5102) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix new more restrictive tox [#5103](https://github.com/sqlfluff/sqlfluff/pull/5103) [@alanmcruickshank](https://github.com/alanmcruickshank) * Make Day as Non Reserved Keyword [#5062](https://github.com/sqlfluff/sqlfluff/pull/5062) [@ulixius9](https://github.com/ulixius9) ## [2.3.0] - 2023-08-14 ## Highlights This release brings one new dialect, two new rules and some changes to the CLI: - We now support the [trino](https://trino.io/) dialect. This is a first version of support, so do post any issues on GitHub in the usual way. This was also the first contribution to the project from [@efung](https://github.com/efung) 🏆. - `ST09` / `structure.join_condition_order`: Which checks whether tables referenced in `JOIN` clauses are referenced in the order of their definition. By default this means that in the `ON` clause, the column referencing the table in the `FROM` clause should come before the column referencing the table in the `JOIN` clause (e.g. `... FROM a JOIN b on a.c = b.c`). This rule was also the first contribution to the project from [@thibonacci](https://github.com/thibonacci) 🏆. - `AL08` / `aliasing.unique.column`: Which checks that column aliases and names are not repeated within the same `SELECT` clause. This is normally an error as it implies the same column has been imported twice, or that two expressions have been given the same alias. - The `--profiler` option on `sqlfluff parse` has been removed. It was only present on the `parse` command and not `lint` or `fix`, and it is just as simple to invoke the python `cProfiler` directly. - The `--recurse` cli option and `sqlfluff.recurse` configuration option have both been removed. They both existed purely for debugging the parser, and were never used in a production setting. The improvement in other debugging messages when unparsable sections are found means that this option is no longer necessary. Along side these more significant changes this also includes: - Performance optimisations for `AL04`, `AL05`, `AM04`, `RF01` & `ST05` which cumulatively may save up to 30% on the total time spend in the linting phase for some projects. - Dialect improvements for Oracle & TSQL. ## What’s Changed * Remove IdentitySet [#5093](https://github.com/sqlfluff/sqlfluff/pull/5093) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stricter typing in smaller sqlfluff.core.parser [#5088](https://github.com/sqlfluff/sqlfluff/pull/5088) [@alanmcruickshank](https://github.com/alanmcruickshank) * Preliminary support of Trino dialect [#4913](https://github.com/sqlfluff/sqlfluff/pull/4913) [@efung](https://github.com/efung) * Rename ST09 [#5091](https://github.com/sqlfluff/sqlfluff/pull/5091) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Fix Clustered Index asc/desc [#5090](https://github.com/sqlfluff/sqlfluff/pull/5090) [@greg-finley](https://github.com/greg-finley) * Parent references and more efficient path_to [#5076](https://github.com/sqlfluff/sqlfluff/pull/5076) [@alanmcruickshank](https://github.com/alanmcruickshank) * New Rule: AL08 - column aliases must be unique [#5079](https://github.com/sqlfluff/sqlfluff/pull/5079) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for fetch first row(s) only in Oracle [#5089](https://github.com/sqlfluff/sqlfluff/pull/5089) [@joaostorrer](https://github.com/joaostorrer) * Fix bug around quoted identifiers for ST09 [#5087](https://github.com/sqlfluff/sqlfluff/pull/5087) [@thibonacci](https://github.com/thibonacci) * Add strict typing to the templating tracer [#5085](https://github.com/sqlfluff/sqlfluff/pull/5085) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove recurse config [#5065](https://github.com/sqlfluff/sqlfluff/pull/5065) [@alanmcruickshank](https://github.com/alanmcruickshank) * ✅ Strictly type dialect [#5067](https://github.com/sqlfluff/sqlfluff/pull/5067) [@pwildenhain](https://github.com/pwildenhain) * Add new rule ST09: Joins should list the table referenced earlier (default)/later first [#4974](https://github.com/sqlfluff/sqlfluff/pull/4974) [@thibonacci](https://github.com/thibonacci) * Remove the internal cProfiler option [#5081](https://github.com/sqlfluff/sqlfluff/pull/5081) [@alanmcruickshank](https://github.com/alanmcruickshank) * Optimisation on select analysis [#5082](https://github.com/sqlfluff/sqlfluff/pull/5082) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@thibonacci](https://github.com/thibonacci) made their first contribution in [#4974](https://github.com/sqlfluff/sqlfluff/pull/4974) * [@efung](https://github.com/efung) made their first contribution in [#4913](https://github.com/sqlfluff/sqlfluff/pull/4913) ## [2.2.1] - 2023-08-09 ## Highlights This is primarily a bugfix release for 2.2.0 which introduced a bug in the `exit_code` returned by linting commands which ignored errors while setting `processes > 1`. In addition to that this release introduces bugfixes for: - Errors raised by two specific `dbt` exceptions. - Issues with unwanted logging output when using `-f yaml` or `-f json` alongside the `dbt` templater. This also introduces dialect improvements for Oracle and for `LIMIT` clauses. Thanks also to [@adityapat3l](https://github.com/adityapat3l) who made their first contribution as part of this release! 🎉🎉🎉 ## What’s Changed * Split apart the grammar tests [#5078](https://github.com/sqlfluff/sqlfluff/pull/5078) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve pickling of errors #5066 [#5074](https://github.com/sqlfluff/sqlfluff/pull/5074) [@alanmcruickshank](https://github.com/alanmcruickshank) * Better context based tracking [#5064](https://github.com/sqlfluff/sqlfluff/pull/5064) [@alanmcruickshank](https://github.com/alanmcruickshank) * fixing limit handling for bracketed arithmathic operations [#5068](https://github.com/sqlfluff/sqlfluff/pull/5068) [@adityapat3l](https://github.com/adityapat3l) * Never run in multiprocessing mode with only 1 file. [#5071](https://github.com/sqlfluff/sqlfluff/pull/5071) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add dbt 1.6 tests [#5073](https://github.com/sqlfluff/sqlfluff/pull/5073) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle two kinds of dbt errors more gracefully [#5072](https://github.com/sqlfluff/sqlfluff/pull/5072) [@alanmcruickshank](https://github.com/alanmcruickshank) * Try to silence dbt logging #5054 [#5070](https://github.com/sqlfluff/sqlfluff/pull/5070) [@alanmcruickshank](https://github.com/alanmcruickshank) * Move `_prune_options` within `_longest_trimmed_match`. [#5063](https://github.com/sqlfluff/sqlfluff/pull/5063) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix issue 4998 - Add backup and auto refresh grammar to redshift materialized view [#5060](https://github.com/sqlfluff/sqlfluff/pull/5060) [@adityapat3l](https://github.com/adityapat3l) * Add mypy strict typing for sqlfluff.core.rules [#5048](https://github.com/sqlfluff/sqlfluff/pull/5048) [@pwildenhain](https://github.com/pwildenhain) * :arrow_up: Bump mypy version in pre-commit [#5055](https://github.com/sqlfluff/sqlfluff/pull/5055) [@pwildenhain](https://github.com/pwildenhain) * Add SQL Plus bind variable support (Oracle) [#5053](https://github.com/sqlfluff/sqlfluff/pull/5053) [@joaostorrer](https://github.com/joaostorrer) ## New Contributors * [@adityapat3l](https://github.com/adityapat3l) made their first contribution in [#5060](https://github.com/sqlfluff/sqlfluff/pull/5060) ## [2.2.0] - 2023-08-04 ## Highlights This release changes some of the interfaces between SQLFluff core and our plugin ecosystem. The only *breaking* change is in the interface between SQLFluff and *templater* plugins (which are not common in the ecosystem, hence why this is only a minor and not a major release). For all plugins, we also recommend a different structure for their imports (especially for rule plugins which are more common in the ecosystem) - for performance and stability reasons. Some users had been experiencing very long import times with previous releases as a result of the layout of plugin imports. Users with affected plugins will begin to see a warning from this release onward, which can be resolved for their plugin by updating to a new version of that plugin which follows the guidelines. For more details (especially if you're a plugin maintainer) see our [release notes](https://docs.sqlfluff.com/en/latest/releasenotes.html). Additionally this release includes: - Some internal performance gains which may cumulatively save roughly 10% of the time spent in the parsing phase of larger files. - Improvements to the Simple API, including the ability to pass in a `FluffConfig` object directly, and better support for parsing config files directly from strings (see [the included example](examples/05_simple_api_config.py)). - A bugfix for `AM06`. - A new `--warn-unused-ignores` CLI option (and corresponding config setting) to allow warnings to be shown if any `noqa` comments in SQL files are unused. - Improvements to Redshift, Oracle, Clickhouse, Materialize & MySQL dialects. - A selection of internal improvements, documentation and type hints. Thanks also to [@kaiyannameighu](https://github.com/kaiyannameighu), [@josef-v](https://github.com/josef-v), [@aglebov](https://github.com/aglebov) & [@joaostorrer](https://github.com/joaostorrer) who made their first contributions as part of this release! 🎉🎉🎉 ## What’s Changed * Mypy: Ephemeral + Tuple Return on .parse() [#5044](https://github.com/sqlfluff/sqlfluff/pull/5044) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support to oracle's global and private temporary tables [#5039](https://github.com/sqlfluff/sqlfluff/pull/5039) [@joaostorrer](https://github.com/joaostorrer) * Redshift-dialect: Support GRANT USAGE ON DATASHARE [#5007](https://github.com/sqlfluff/sqlfluff/pull/5007) [@josef-v](https://github.com/josef-v) * :white_check_mark: Add strict typing for errors module [#5047](https://github.com/sqlfluff/sqlfluff/pull/5047) [@pwildenhain](https://github.com/pwildenhain) * Less copying in the ParseContext [#5046](https://github.com/sqlfluff/sqlfluff/pull/5046) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adding support to use `ADD COLUMN IF NOT EXISTS` syntax on `ALTER TABLE` [#5035](https://github.com/sqlfluff/sqlfluff/pull/5035) [@wfelipew](https://github.com/wfelipew) * Closes #4815 [#5042](https://github.com/sqlfluff/sqlfluff/pull/5042) [@joaostorrer](https://github.com/joaostorrer) * Fix for multiprocessing warnings. [#5032](https://github.com/sqlfluff/sqlfluff/pull/5032) [@alanmcruickshank](https://github.com/alanmcruickshank) * Mypy gain: Remove unnecessary tuple construction in MatchResult [#5045](https://github.com/sqlfluff/sqlfluff/pull/5045) [@alanmcruickshank](https://github.com/alanmcruickshank) * mypy strict in config [#5036](https://github.com/sqlfluff/sqlfluff/pull/5036) [@pwildenhain](https://github.com/pwildenhain) * strict mypy: match_wrapper & match_logging [#5033](https://github.com/sqlfluff/sqlfluff/pull/5033) [@alanmcruickshank](https://github.com/alanmcruickshank) * MyPy on errors, helpers, markers & context + remove ParseContext.denylist [#5030](https://github.com/sqlfluff/sqlfluff/pull/5030) [@alanmcruickshank](https://github.com/alanmcruickshank) * Warn on unused `noqa` directives [#5029](https://github.com/sqlfluff/sqlfluff/pull/5029) [@alanmcruickshank](https://github.com/alanmcruickshank) * Even more mypy strict [#5023](https://github.com/sqlfluff/sqlfluff/pull/5023) [@WittierDinosaur](https://github.com/WittierDinosaur) * Handle windows paths better in config files. [#5022](https://github.com/sqlfluff/sqlfluff/pull/5022) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix for parsing of Oracle functions with named arguments [#5027](https://github.com/sqlfluff/sqlfluff/pull/5027) [@joaostorrer](https://github.com/joaostorrer) * DOC: Fix .sqlfluff example in Getting Started [#5026](https://github.com/sqlfluff/sqlfluff/pull/5026) [@aglebov](https://github.com/aglebov) * Fix: Add exception to the warning & config for the BaseRule. [#5025](https://github.com/sqlfluff/sqlfluff/pull/5025) [@alanmcruickshank](https://github.com/alanmcruickshank) * Move from `make_template` to `render_func` in jinja and dbt [#4942](https://github.com/sqlfluff/sqlfluff/pull/4942) [@alanmcruickshank](https://github.com/alanmcruickshank) * Streamline imports to reduce initial load times #4917 [#5020](https://github.com/sqlfluff/sqlfluff/pull/5020) [@alanmcruickshank](https://github.com/alanmcruickshank) * More mypy strict [#5019](https://github.com/sqlfluff/sqlfluff/pull/5019) [@WittierDinosaur](https://github.com/WittierDinosaur) * Simple API config and examples [#5018](https://github.com/sqlfluff/sqlfluff/pull/5018) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix some new linting issues [#5021](https://github.com/sqlfluff/sqlfluff/pull/5021) [@alanmcruickshank](https://github.com/alanmcruickshank) * A step towards mypy strict [#5014](https://github.com/sqlfluff/sqlfluff/pull/5014) [@WittierDinosaur](https://github.com/WittierDinosaur) * Materialize: Make RETURNING a reserved keyword [#5017](https://github.com/sqlfluff/sqlfluff/pull/5017) [@bobbyiliev](https://github.com/bobbyiliev) * Config from string and load default_config as resource [#5012](https://github.com/sqlfluff/sqlfluff/pull/5012) [@alanmcruickshank](https://github.com/alanmcruickshank) * Documentation for the test suite (#2180) [#5011](https://github.com/sqlfluff/sqlfluff/pull/5011) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support to oracle's listagg function [#4999](https://github.com/sqlfluff/sqlfluff/pull/4999) [@joaostorrer](https://github.com/joaostorrer) * Assorted typehints [#5013](https://github.com/sqlfluff/sqlfluff/pull/5013) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor: Extract noqa methods and tests. [#5010](https://github.com/sqlfluff/sqlfluff/pull/5010) [@alanmcruickshank](https://github.com/alanmcruickshank) * AM06 to ignore aggregate ORDER BY clauses [#5008](https://github.com/sqlfluff/sqlfluff/pull/5008) [@tunetheweb](https://github.com/tunetheweb) * Bugfix: Treat Function name properly in grants [#5006](https://github.com/sqlfluff/sqlfluff/pull/5006) [@WittierDinosaur](https://github.com/WittierDinosaur) * Redshift: Add Qualify Clause [#5002](https://github.com/sqlfluff/sqlfluff/pull/5002) [@WittierDinosaur](https://github.com/WittierDinosaur) * Clickhouse Dialect - Support Dollar Quoted Literals [#5003](https://github.com/sqlfluff/sqlfluff/pull/5003) [@kaiyannameighu](https://github.com/kaiyannameighu) ## New Contributors * [@kaiyannameighu](https://github.com/kaiyannameighu) made their first contribution in [#5003](https://github.com/sqlfluff/sqlfluff/pull/5003) * [@joaostorrer](https://github.com/joaostorrer) made their first contribution in [#4999](https://github.com/sqlfluff/sqlfluff/pull/4999) * [@aglebov](https://github.com/aglebov) made their first contribution in [#5026](https://github.com/sqlfluff/sqlfluff/pull/5026) * [@josef-v](https://github.com/josef-v) made their first contribution in [#5007](https://github.com/sqlfluff/sqlfluff/pull/5007) ## [2.1.4] - 2023-07-25 ## Highlights This release brings some meaningful performance improvements to the parsing of complex SQL statements. In files with deeply nested expressions, we have seen up to a 50% reduction on time spent in the parsing phase. These changes are all internal optimisations and have minimal implications for the parser. In a few isolated cases they did highlight inconsistencies in the parsing of literals and so if your use case relies on the specific structure of literal and expression parsing you may find some small differences in how some expressions are parsed. Additionally this release brings new validation steps to configuration. Layout configuration is now validated on load (and so users with invalid layout configurations may see some of these being caught now) and inline configuration statements in files are also now validated for both their layout rules and for any removed or deprecated settings. On top of both we've seen dialect improvements to Databricks, PostgreSQL, BigQuery, Snowflake & Athena. ## What’s Changed * Databricks set time zone [#5000](https://github.com/sqlfluff/sqlfluff/pull/5000) [@greg-finley](https://github.com/greg-finley) * Terminator inheritance [#4981](https://github.com/sqlfluff/sqlfluff/pull/4981) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reduce copying in the parse phase [#4988](https://github.com/sqlfluff/sqlfluff/pull/4988) [@alanmcruickshank](https://github.com/alanmcruickshank) * Validate layout configs #4578 [#4997](https://github.com/sqlfluff/sqlfluff/pull/4997) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix handling of keywords for roles in ALTER ROLE statement [#4994](https://github.com/sqlfluff/sqlfluff/pull/4994) [@anzelpwj](https://github.com/anzelpwj) * BigQuery: fixes parse error on some literals with data type and quoted [#4992](https://github.com/sqlfluff/sqlfluff/pull/4992) [@yoichi](https://github.com/yoichi) * Correct Snowflake `CROSS JOIN` syntax [#4996](https://github.com/sqlfluff/sqlfluff/pull/4996) [@tunetheweb](https://github.com/tunetheweb) * Remove broken 'fork me' banner from docs [#4989](https://github.com/sqlfluff/sqlfluff/pull/4989) [@greg-finley](https://github.com/greg-finley) * feat: support athena optional WITH ORDINALITY post UNNEST function [#4991](https://github.com/sqlfluff/sqlfluff/pull/4991) [@reata](https://github.com/reata) ## [2.1.3] - 2023-07-19 ## Highlights This release is a fairly standard incremental release. Highlights include bugfixes to `RF05` and dialect improvements to Snowflake, Teradata, MySQL, TSQL, SparkSQL & Postgres. Internally, the last few weeks have brought several improvements to developer tooling. We've also moved over to GitHub sponsorships - so if you previously used the old flattr link, you can find our new profile page at https://github.com/sponsors/sqlfluff. ## What’s Changed * Add the which dbt flag to DbtConfigArgs with default as "compile" [#4982](https://github.com/sqlfluff/sqlfluff/pull/4982) [@moreaupascal56](https://github.com/moreaupascal56) * feat: support tsql COPY INTO [#4985](https://github.com/sqlfluff/sqlfluff/pull/4985) [@reata](https://github.com/reata) * fix: sparksql lateral view parse tree for multiple column alias [#4980](https://github.com/sqlfluff/sqlfluff/pull/4980) [@reata](https://github.com/reata) * Revert "Ignore click mypy issues" [#4967](https://github.com/sqlfluff/sqlfluff/pull/4967) [@greg-finley](https://github.com/greg-finley) * Snowflake: Parse column named cross [#4975](https://github.com/sqlfluff/sqlfluff/pull/4975) [@greg-finley](https://github.com/greg-finley) * Snowflake: Group by all [#4976](https://github.com/sqlfluff/sqlfluff/pull/4976) [@greg-finley](https://github.com/greg-finley) * Update funding yaml to use github sponsors [#4973](https://github.com/sqlfluff/sqlfluff/pull/4973) [@alanmcruickshank](https://github.com/alanmcruickshank) * Added DEL keyword [#4962](https://github.com/sqlfluff/sqlfluff/pull/4962) [@dflem97](https://github.com/dflem97) * Remove mypy ignores [#4972](https://github.com/sqlfluff/sqlfluff/pull/4972) [@greg-finley](https://github.com/greg-finley) * Allow running one rule test locally [#4963](https://github.com/sqlfluff/sqlfluff/pull/4963) [@greg-finley](https://github.com/greg-finley) * Postgres support underscore array data type syntax [#4959](https://github.com/sqlfluff/sqlfluff/pull/4959) [@greg-finley](https://github.com/greg-finley) * Bump issue-labeler [#4958](https://github.com/sqlfluff/sqlfluff/pull/4958) [@greg-finley](https://github.com/greg-finley) * Standardize test fixture names [#4955](https://github.com/sqlfluff/sqlfluff/pull/4955) [@greg-finley](https://github.com/greg-finley) * RF05 BigQuery empty identifier bug [#4953](https://github.com/sqlfluff/sqlfluff/pull/4953) [@keitherskine](https://github.com/keitherskine) * New GitHub issue labeler library [#4952](https://github.com/sqlfluff/sqlfluff/pull/4952) [@greg-finley](https://github.com/greg-finley) * Ignore click mypy issues [#4954](https://github.com/sqlfluff/sqlfluff/pull/4954) [@greg-finley](https://github.com/greg-finley) * MySQL: Rename index [#4950](https://github.com/sqlfluff/sqlfluff/pull/4950) [@greg-finley](https://github.com/greg-finley) * Adding support to ALTER TABLE with RENAME COLUMN on MySQL dialect [#4948](https://github.com/sqlfluff/sqlfluff/pull/4948) [@jrballot](https://github.com/jrballot) ## New Contributors * [@jrballot](https://github.com/jrballot) made their first contribution in [#4948](https://github.com/sqlfluff/sqlfluff/pull/4948) * [@keitherskine](https://github.com/keitherskine) made their first contribution in [#4953](https://github.com/sqlfluff/sqlfluff/pull/4953) * [@reata](https://github.com/reata) made their first contribution in [#4980](https://github.com/sqlfluff/sqlfluff/pull/4980) ## [2.1.2] - 2023-07-03 ## Highlights This release resolves compatibility issues with a set of `dbt-core` versions. - `dbt-core` 1.5.2 onwards is now properly supported. - support for `dbt-core` 1.1 to 1.4 has now been re-enabled after support had to be abandoned a few releases ago. NOTE: We cannot guarantee that SQLFluff will always continue to remain compatible with all dbt versions, particularly as the folks at dbt-labs have often backported breaking changes to their internal APIs to previous versions of `dbt-core`. This release does at least bring more extensive internal testing to catch when this does occur to allow our community to react. This release fixes also resolves a potential security issue for when using external libraries (and the `library_path` config setting), and also contains various dialect improvements. ## What’s Changed * docs(templater): Add documentation for `SQLFLUFF_JINJA_FILTERS` [#4932](https://github.com/sqlfluff/sqlfluff/pull/4932) [@dmohns](https://github.com/dmohns) * Re-enable dbt 1.1 & 1.2 [#4944](https://github.com/sqlfluff/sqlfluff/pull/4944) [@alanmcruickshank](https://github.com/alanmcruickshank) * Re-enable dbt 1.4 & 1.3 [#4941](https://github.com/sqlfluff/sqlfluff/pull/4941) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix compatibility with dbt 1.5.2+ [#4939](https://github.com/sqlfluff/sqlfluff/pull/4939) [@alanmcruickshank](https://github.com/alanmcruickshank) * Security option for library path [#4925](https://github.com/sqlfluff/sqlfluff/pull/4925) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove extra code escapes from release notes docs [#4921](https://github.com/sqlfluff/sqlfluff/pull/4921) [@tunetheweb](https://github.com/tunetheweb) * Postgres frame_clause quoted interval [#4915](https://github.com/sqlfluff/sqlfluff/pull/4915) [@greg-finley](https://github.com/greg-finley) * Snowflake: CREATE TAG [#4914](https://github.com/sqlfluff/sqlfluff/pull/4914) [@greg-finley](https://github.com/greg-finley) * TSQL: support for `DROP EXTERNAL TABLE` [#4919](https://github.com/sqlfluff/sqlfluff/pull/4919) [@keen85](https://github.com/keen85) * fix(dialect-clickhouse): Support create database [#4620](https://github.com/sqlfluff/sqlfluff/pull/4620) [@germainlefebvre4](https://github.com/germainlefebvre4) * Snowflake: Actualize the CreateProcedureStatementSegment and CreateFunctionStatementSegment [#4908](https://github.com/sqlfluff/sqlfluff/pull/4908) [@moreaupascal56](https://github.com/moreaupascal56) * Oracle: Add support for `$` and `#` in identifier [#4903](https://github.com/sqlfluff/sqlfluff/pull/4903) [@ulixius9](https://github.com/ulixius9) * docs(templater): Refactor templater configuration docs [#4835](https://github.com/sqlfluff/sqlfluff/pull/4835) [@dmohns](https://github.com/dmohns) * Handle brackets in from clause with joins [#4890](https://github.com/sqlfluff/sqlfluff/pull/4890) [@ulixius9](https://github.com/ulixius9) * Postgres: Add support for dollar literal & mark collation as non-reserved [#4883](https://github.com/sqlfluff/sqlfluff/pull/4883) [@ulixius9](https://github.com/ulixius9) * MySQL: ON UPDATE NOW [#4898](https://github.com/sqlfluff/sqlfluff/pull/4898) [@greg-finley](https://github.com/greg-finley) * Support ROLLUP/CUBE in AM06 [#4892](https://github.com/sqlfluff/sqlfluff/pull/4892) [@tunetheweb](https://github.com/tunetheweb) ## [2.1.1] - 2023-05-25 ## Highlights This releases fixes a compatability issue with the latest version of dbt. It also ships various dialect improvements. ## What’s Changed * profiles dir env var or default [#4886](https://github.com/sqlfluff/sqlfluff/pull/4886) [@JasonGluck](https://github.com/JasonGluck) * Bigquery: Allow empty `struct` in `TO_JSON` [#4879](https://github.com/sqlfluff/sqlfluff/pull/4879) [@dimitris-flyr](https://github.com/dimitris-flyr) * Set type of ARRAY function for BigQuery [#4880](https://github.com/sqlfluff/sqlfluff/pull/4880) [@tunetheweb](https://github.com/tunetheweb) * Full athena SHOW coverage [#4876](https://github.com/sqlfluff/sqlfluff/pull/4876) [@dogversioning](https://github.com/dogversioning) * Sparksql add star support in multiparameter functions [#4874](https://github.com/sqlfluff/sqlfluff/pull/4874) [@spex66](https://github.com/spex66) * Oracle create view with EDITIONING & FORCE [#4872](https://github.com/sqlfluff/sqlfluff/pull/4872) [@ulixius9](https://github.com/ulixius9) * Fixes pip installation link on Getting Started [#4867](https://github.com/sqlfluff/sqlfluff/pull/4867) [@segoldma](https://github.com/segoldma) * Athena: add "weird" test cases for `group by` [#4869](https://github.com/sqlfluff/sqlfluff/pull/4869) [@KulykDmytro](https://github.com/KulykDmytro) * Athena: add support for `CUBE` `ROLLUP` `GROUPING SETS` [#4862](https://github.com/sqlfluff/sqlfluff/pull/4862) [@KulykDmytro](https://github.com/KulykDmytro) * Add show tables/views to athena [#4854](https://github.com/sqlfluff/sqlfluff/pull/4854) [@dogversioning](https://github.com/dogversioning) * Adding support for NOCOPY and INSTANT algorithm on CREATE INDEX on MySQL dialect [#4865](https://github.com/sqlfluff/sqlfluff/pull/4865) [@wfelipew](https://github.com/wfelipew) * Add link to Trino keywords (Athena v3) [#4858](https://github.com/sqlfluff/sqlfluff/pull/4858) [@KulykDmytro](https://github.com/KulykDmytro) * TSQL: Create Role Authorization [#4852](https://github.com/sqlfluff/sqlfluff/pull/4852) [@greg-finley](https://github.com/greg-finley) * TSQL: DEADLOCK_PRIORITY [#4853](https://github.com/sqlfluff/sqlfluff/pull/4853) [@greg-finley](https://github.com/greg-finley) * fix(dialect-clickhouse): Support SYSTEM queries [#4625](https://github.com/sqlfluff/sqlfluff/pull/4625) [@germainlefebvre4](https://github.com/germainlefebvre4) * Fix #4807: LT02 & LT12 issues with empty files. [#4834](https://github.com/sqlfluff/sqlfluff/pull/4834) [@alanmcruickshank](https://github.com/alanmcruickshank) * Sqlite: COLLATE column constraint [#4845](https://github.com/sqlfluff/sqlfluff/pull/4845) [@greg-finley](https://github.com/greg-finley) * Hive: Support REGEXP and IREGEXP [#4846](https://github.com/sqlfluff/sqlfluff/pull/4846) [@greg-finley](https://github.com/greg-finley) ## New Contributors * [@dogversioning](https://github.com/dogversioning) made their first contribution in [#4854](https://github.com/sqlfluff/sqlfluff/pull/4854) * [@segoldma](https://github.com/segoldma) made their first contribution in [#4867](https://github.com/sqlfluff/sqlfluff/pull/4867) * [@spex66](https://github.com/spex66) made their first contribution in [#4874](https://github.com/sqlfluff/sqlfluff/pull/4874) * [@dimitris-flyr](https://github.com/dimitris-flyr) made their first contribution in [#4879](https://github.com/sqlfluff/sqlfluff/pull/4879) * [@JasonGluck](https://github.com/JasonGluck) made their first contribution in [#4886](https://github.com/sqlfluff/sqlfluff/pull/4886) ## [2.1.0] - 2023-05-03 ## Highlights This release brings support for dbt 1.5+. Some internals of dbt mean that SQFluff versions prior to this release may experience errors with dbt versions post 1.5. In addition to that there are some dialect and templating improvements bundled too: * Support for custom Jinja filters. * An additional configurable indent behaviour within `CASE WHEN` clauses. * Additional support for bracket quoted literals in TSQL and RF06. * Dialect improvements to Snowflake, Hive, Redshift, Postgres, Clickhouse, Oracle and SQLite ## What’s Changed * Add support for Jinja filters [#4810](https://github.com/sqlfluff/sqlfluff/pull/4810) [@dmohns](https://github.com/dmohns) * Postgres: Allow INSERT RETURNING [#4820](https://github.com/sqlfluff/sqlfluff/pull/4820) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite: Support partial index [#4833](https://github.com/sqlfluff/sqlfluff/pull/4833) [@WittierDinosaur](https://github.com/WittierDinosaur) * Make SQLFluff compatible with DBT 1.5 [#4828](https://github.com/sqlfluff/sqlfluff/pull/4828) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake dialect: Add support for comment clause in the create warehouse statement [#4823](https://github.com/sqlfluff/sqlfluff/pull/4823) [@moreaupascal56](https://github.com/moreaupascal56) * fix(dialect-clickhouse): Support DROP statements [#4821](https://github.com/sqlfluff/sqlfluff/pull/4821) [@germainlefebvre4](https://github.com/germainlefebvre4) * Hive: INSERT INTO without TABLE keyword [#4819](https://github.com/sqlfluff/sqlfluff/pull/4819) [@greg-finley](https://github.com/greg-finley) * Fix: Small typo in error message [#4814](https://github.com/sqlfluff/sqlfluff/pull/4814) [@JavierMonton](https://github.com/JavierMonton) * Redshift: Support with no schema binding [#4813](https://github.com/sqlfluff/sqlfluff/pull/4813) [@WittierDinosaur](https://github.com/WittierDinosaur) * Detect tsql square bracket quotes for RF06 #4724 [#4781](https://github.com/sqlfluff/sqlfluff/pull/4781) [@daviewales](https://github.com/daviewales) * Apply implicit indents to `WHEN` blocks and introduce `indented_then_contents` [#4755](https://github.com/sqlfluff/sqlfluff/pull/4755) [@borchero](https://github.com/borchero) * Oracle: Update Drop Behaviour [#4803](https://github.com/sqlfluff/sqlfluff/pull/4803) [@WittierDinosaur](https://github.com/WittierDinosaur) * Oracle: Update bare functions [#4804](https://github.com/sqlfluff/sqlfluff/pull/4804) [@WittierDinosaur](https://github.com/WittierDinosaur) ## New Contributors * [@daviewales](https://github.com/daviewales) made their first contribution in [#4781](https://github.com/sqlfluff/sqlfluff/pull/4781) ## [2.0.7] - 2023-04-20 ## Highlights This is a bugfix release to resolve two regressions included in 2.0.6 related to implicit indents. This also includes a bugfix for config file on osx, contributed by first time contributor [@jpuris](https://github.com/jpuris) 🎉. ## What’s Changed * Fix regression in implicit indents [#4798](https://github.com/sqlfluff/sqlfluff/pull/4798) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix bug with brackets and implicit indents [#4797](https://github.com/sqlfluff/sqlfluff/pull/4797) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix: correct macos/osx config file location [#4795](https://github.com/sqlfluff/sqlfluff/pull/4795) [@jpuris](https://github.com/jpuris) ## New Contributors * [@jpuris](https://github.com/jpuris) made their first contribution in [#4795](https://github.com/sqlfluff/sqlfluff/pull/4795) ## [2.0.6] - 2023-04-19 ## Highlights * Introduction of a `--quiet` option for the CLI for situations where less output is useful. * When using the `--force` option is used for `sqlfluff fix` each file is fixed during the linting process rather than at the end. * Bugfixes to comment and templated section indentation. * Performance improvements to parsing. * Bugfix to macros triggering LT01. * Renaming `layout.end-of-file` to `layout.end_of_file` in line with other rules. * Dialect improvements to SparkSQL, BigQuery, Hive & Snowflake. ## What’s Changed * Snowflake: Support Temporary View [#4789](https://github.com/sqlfluff/sqlfluff/pull/4789) [@WittierDinosaur](https://github.com/WittierDinosaur) * Inroduce `SAFE` prefix segment [#4773](https://github.com/sqlfluff/sqlfluff/pull/4773) [@dmohns](https://github.com/dmohns) * Fix #4660: Better handling of empty files. [#4780](https://github.com/sqlfluff/sqlfluff/pull/4780) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #3538: (Fix files as we go) [#4777](https://github.com/sqlfluff/sqlfluff/pull/4777) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #2855: (Tech debt: check consistency in TemplatedFile init) [#4776](https://github.com/sqlfluff/sqlfluff/pull/4776) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add a --quiet option for fix [#4764](https://github.com/sqlfluff/sqlfluff/pull/4764) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4603 indent after Jinja 'do' directive [#4778](https://github.com/sqlfluff/sqlfluff/pull/4778) [@fredriv](https://github.com/fredriv) * Snowflake Execute Task with Schema [#4771](https://github.com/sqlfluff/sqlfluff/pull/4771) [@Thashin](https://github.com/Thashin) * SQLite: Support CreateTrigger [#4767](https://github.com/sqlfluff/sqlfluff/pull/4767) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix #2865 (AL05 exception for Redshift Semi-structured) [#4775](https://github.com/sqlfluff/sqlfluff/pull/4775) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4540: Untaken indents evaluation order. [#4768](https://github.com/sqlfluff/sqlfluff/pull/4768) [@alanmcruickshank](https://github.com/alanmcruickshank) * Use the new CollationReferenceSegment everywhere [#4770](https://github.com/sqlfluff/sqlfluff/pull/4770) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SQLite: Fix multiple parse issues in Expression_A_Grammar [#4769](https://github.com/sqlfluff/sqlfluff/pull/4769) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SQLite: Remove refs to RESPECT and QUALIFY [#4765](https://github.com/sqlfluff/sqlfluff/pull/4765) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite: Support STRICT [#4766](https://github.com/sqlfluff/sqlfluff/pull/4766) [@WittierDinosaur](https://github.com/WittierDinosaur) * Support hive set syntax [#4763](https://github.com/sqlfluff/sqlfluff/pull/4763) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4582: Comments after end of line [#4760](https://github.com/sqlfluff/sqlfluff/pull/4760) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow comment match with preceding line [#4758](https://github.com/sqlfluff/sqlfluff/pull/4758) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove the majority of greedy matchers [#4761](https://github.com/sqlfluff/sqlfluff/pull/4761) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix #4745: (max() error in reindent) [#4752](https://github.com/sqlfluff/sqlfluff/pull/4752) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix issue with macros triggering LT01 [#4757](https://github.com/sqlfluff/sqlfluff/pull/4757) [@alanmcruickshank](https://github.com/alanmcruickshank) * end-of-file > end_of_file [#4753](https://github.com/sqlfluff/sqlfluff/pull/4753) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [2.0.5] - 2023-04-14 ## Highlights This is a relatively swift bugfix to refine some of the changes made to widow function indentation in `2.0.4`. In addition there are two dialect refinements also made since that release. ## What’s Changed * Refactor PG segments to reuse new common segments [#4726](https://github.com/sqlfluff/sqlfluff/pull/4726) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Recognize quoted data types [#4747](https://github.com/sqlfluff/sqlfluff/pull/4747) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) ## [2.0.4] - 2023-04-14 ## Highlights This is primarily a _bugfix_ and _dialect_ release: * Several bugfixes related to templating and indentation, in particular some improvements to the indentation of aliases and window functions. * Performance improvements to the parser. * The `--persist-timing` option is now also available on `sqlfluff fix`. * A refresh to getting started and rule documentation. * Dialect improvements to PostgreSQL, Athena, SparkSQL, MySQL & Snowflake. Thanks also to [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) and [@Thashin](https://github.com/Thashin) who made their first contributions in this release. In particular, [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) made **twenty one** contributions in their first month! 🎉🎉🎉 ## What’s Changed * SparkSQL: Improvements to lateral view, hints, sort by [#4731](https://github.com/sqlfluff/sqlfluff/pull/4731) [@bmorck](https://github.com/bmorck) * Add ExpressionSegment to CREATE TABLE ... DEFAULT / Fix multiple parse issues in Expression_A_Grammar [#4717](https://github.com/sqlfluff/sqlfluff/pull/4717) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add support for the PG VACUUM statement [#4742](https://github.com/sqlfluff/sqlfluff/pull/4742) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Simplify and fix PG array accessor segment & support expressions [#4748](https://github.com/sqlfluff/sqlfluff/pull/4748) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SparkSQL: Allow for any ordering of create table clauses [#4721](https://github.com/sqlfluff/sqlfluff/pull/4721) [@bmorck](https://github.com/bmorck) * Suggested started config file [#4702](https://github.com/sqlfluff/sqlfluff/pull/4702) [@alanmcruickshank](https://github.com/alanmcruickshank) * Indents on window functions [#4560](https://github.com/sqlfluff/sqlfluff/pull/4560) [@alanmcruickshank](https://github.com/alanmcruickshank) * SparkSQL: Fix Group By Clause [#4732](https://github.com/sqlfluff/sqlfluff/pull/4732) [@bmorck](https://github.com/bmorck) * Improve support for EXCLUDE table constraints in PG [#4725](https://github.com/sqlfluff/sqlfluff/pull/4725) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add support for dropping multiple indexes in PG [#4737](https://github.com/sqlfluff/sqlfluff/pull/4737) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Recognize "on" value and integers for PG SET statement [#4740](https://github.com/sqlfluff/sqlfluff/pull/4740) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Improve interval expressions on MySQL [#4746](https://github.com/sqlfluff/sqlfluff/pull/4746) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Keep out zero length keywords [#4723](https://github.com/sqlfluff/sqlfluff/pull/4723) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add PG support for CREATE SCHEMA AUTHORIZATION [#4735](https://github.com/sqlfluff/sqlfluff/pull/4735) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add support for dropping multiple views with PostgreSQL [#4736](https://github.com/sqlfluff/sqlfluff/pull/4736) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add CHAR VARYING data type for PG [#4738](https://github.com/sqlfluff/sqlfluff/pull/4738) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * fix(athena): map type matching failed, array type only contains a datatype [#4739](https://github.com/sqlfluff/sqlfluff/pull/4739) [@timcosta](https://github.com/timcosta) * Allow DML queries to be selectable in CTEs on PG [#4741](https://github.com/sqlfluff/sqlfluff/pull/4741) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add the CREATE/DROP CAST statements to ANSI and PG [#4744](https://github.com/sqlfluff/sqlfluff/pull/4744) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add support for PG SET ROLE / RESET ROLE [#4734](https://github.com/sqlfluff/sqlfluff/pull/4734) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Support Spark Iceberg DDL [#4690](https://github.com/sqlfluff/sqlfluff/pull/4690) [@bmorck](https://github.com/bmorck) * Fix #4680 [#4707](https://github.com/sqlfluff/sqlfluff/pull/4707) [@alanmcruickshank](https://github.com/alanmcruickshank) * Indent Aliases [#4706](https://github.com/sqlfluff/sqlfluff/pull/4706) [@alanmcruickshank](https://github.com/alanmcruickshank) * SparkSQL: Improve window frame bounds [#4722](https://github.com/sqlfluff/sqlfluff/pull/4722) [@bmorck](https://github.com/bmorck) * Add support for PG CREATE/ALTER/DROP PUBLICATION stmts [#4716](https://github.com/sqlfluff/sqlfluff/pull/4716) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SparkSQL: Create external table support [#4692](https://github.com/sqlfluff/sqlfluff/pull/4692) [@bmorck](https://github.com/bmorck) * SparkSQL: Fix file literal lexing [#4718](https://github.com/sqlfluff/sqlfluff/pull/4718) [@bmorck](https://github.com/bmorck) * Add PG DROP/REASSIGN OWNED statements [#4720](https://github.com/sqlfluff/sqlfluff/pull/4720) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * SparkSQL: Add distinct to comparison operator [#4719](https://github.com/sqlfluff/sqlfluff/pull/4719) [@bmorck](https://github.com/bmorck) * Rethink Rule Docs [#4695](https://github.com/sqlfluff/sqlfluff/pull/4695) [@alanmcruickshank](https://github.com/alanmcruickshank) * Performance: Reduce calls to _prune_options [#4705](https://github.com/sqlfluff/sqlfluff/pull/4705) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Add ReferencedVariableNameSegment to sample function [#4712](https://github.com/sqlfluff/sqlfluff/pull/4712) [@WittierDinosaur](https://github.com/WittierDinosaur) * Mark AM02 as fix compatible [#4714](https://github.com/sqlfluff/sqlfluff/pull/4714) [@yoichi](https://github.com/yoichi) * Fix LT01 spacing check in templated areas [#4698](https://github.com/sqlfluff/sqlfluff/pull/4698) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Don't do newline conversion on write [#4703](https://github.com/sqlfluff/sqlfluff/pull/4703) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: CREATE/ALTER VIEW may take UNION [#4713](https://github.com/sqlfluff/sqlfluff/pull/4713) [@yoichi](https://github.com/yoichi) * Preserve zero-length template segments [#4708](https://github.com/sqlfluff/sqlfluff/pull/4708) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * CV06: don't flag files that don't have code [#4709](https://github.com/sqlfluff/sqlfluff/pull/4709) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Add a no-output option [#4704](https://github.com/sqlfluff/sqlfluff/pull/4704) [@alanmcruickshank](https://github.com/alanmcruickshank) * Jinja templater: treat "import" and "from" as templated [#4696](https://github.com/sqlfluff/sqlfluff/pull/4696) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Capitalization rules ignore templated code only if configured to [#4697](https://github.com/sqlfluff/sqlfluff/pull/4697) [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) * Update getting started docs [#4700](https://github.com/sqlfluff/sqlfluff/pull/4700) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add a default for config_keywords and remove noisy error. [#4701](https://github.com/sqlfluff/sqlfluff/pull/4701) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake Select System Functions [#4687](https://github.com/sqlfluff/sqlfluff/pull/4687) [@Thashin](https://github.com/Thashin) * SparkSQL: Add using and options clause to create view statement [#4691](https://github.com/sqlfluff/sqlfluff/pull/4691) [@bmorck](https://github.com/bmorck) * MySQL: Add RETURN Statement [#4693](https://github.com/sqlfluff/sqlfluff/pull/4693) [@yoichi](https://github.com/yoichi) * Safety valve for fixes in CV03 [#4685](https://github.com/sqlfluff/sqlfluff/pull/4685) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow persist timing on `fix` too. [#4679](https://github.com/sqlfluff/sqlfluff/pull/4679) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix{dialect-snowflake}:Alter Table Column Set/Unset Tag [#4682](https://github.com/sqlfluff/sqlfluff/pull/4682) [@Thashin](https://github.com/Thashin) * fix{dialect-snowflake}:Execute Task [#4683](https://github.com/sqlfluff/sqlfluff/pull/4683) [@Thashin](https://github.com/Thashin) * Make version number an argument not an option in release script. [#4677](https://github.com/sqlfluff/sqlfluff/pull/4677) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@Thashin](https://github.com/Thashin) made their first contribution in [#4683](https://github.com/sqlfluff/sqlfluff/pull/4683) * [@james-johnston-thumbtack](https://github.com/james-johnston-thumbtack) made their first contribution in [#4697](https://github.com/sqlfluff/sqlfluff/pull/4697) ## [2.0.3] - 2023-04-05 ## Highlights This is primarily a _bugfix_ and _dialect_ release: * Several bugfixes related to templating and indentation. * Configurable indentation before `THEN` in `CASE` statements (see [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598)). * Performance improvements to `TypedParser`, `LT03` & `LT04`. * Rule timings now appear in the `--persist-timing` option for deeper performance understanding. * The introduction of a Greenplum dialect. * Dialect improvements to TSQL, Athena, Snowflake, MySQL, SparkSQL BigQuery, Databricks, Clickhouse & Postgres. We also saw a _huge number of first time contributors_ with **9** contributing in this release 🎉🏆🎉. ## What’s Changed * Better error message for missing keywords [#4676](https://github.com/sqlfluff/sqlfluff/pull/4676) [@tunetheweb](https://github.com/tunetheweb) * Add performance shortcuts to LT03 & LT04 [#4672](https://github.com/sqlfluff/sqlfluff/pull/4672) [@alanmcruickshank](https://github.com/alanmcruickshank) * Clickhouse: Add support for [LEFT] ARRAY JOIN [#4618](https://github.com/sqlfluff/sqlfluff/pull/4618) [@simpl1g](https://github.com/simpl1g) * Postgres - allow untyped OVERLAPS clauses [#4674](https://github.com/sqlfluff/sqlfluff/pull/4674) [@tunetheweb](https://github.com/tunetheweb) * Mark `is_alias_required` as a private class so it doesn't appear in docs [#4673](https://github.com/sqlfluff/sqlfluff/pull/4673) [@tunetheweb](https://github.com/tunetheweb) * Fix bug in templated with clauses LT07 [#4671](https://github.com/sqlfluff/sqlfluff/pull/4671) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: `OPENJSON()` [#4652](https://github.com/sqlfluff/sqlfluff/pull/4652) [@keen85](https://github.com/keen85) * fix(RF06/L059): allows configuring prefer_quoted_keywords to deconflict with L029 [#4396](https://github.com/sqlfluff/sqlfluff/pull/4396) [@timcosta](https://github.com/timcosta) * TSQL: `Create External Table` [#4642](https://github.com/sqlfluff/sqlfluff/pull/4642) [@aly76](https://github.com/aly76) * Consistent indentation in `MERGE` `INSERT` clause [#4666](https://github.com/sqlfluff/sqlfluff/pull/4666) [@dmohns](https://github.com/dmohns) * BigQuery: Fix null assignment in options segment [#4669](https://github.com/sqlfluff/sqlfluff/pull/4669) [@greg-finley](https://github.com/greg-finley) * BigQuery: Delete table reference [#4668](https://github.com/sqlfluff/sqlfluff/pull/4668) [@greg-finley](https://github.com/greg-finley) * TSQL: `CREATE EXTERNAL FILE FORMAT` [#4647](https://github.com/sqlfluff/sqlfluff/pull/4647) [@keen85](https://github.com/keen85) * Remove TIME as reserved keyword in SparkSQL [#4662](https://github.com/sqlfluff/sqlfluff/pull/4662) [@bmorck](https://github.com/bmorck) * Start of the Greenplum dialect implementation [#4661](https://github.com/sqlfluff/sqlfluff/pull/4661) [@JackWolverson](https://github.com/JackWolverson) * Enable configuring whether to require indent before THEN [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598) [@fredriv](https://github.com/fredriv) * Sequence Meta Handling [#4622](https://github.com/sqlfluff/sqlfluff/pull/4622) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add support for non-quoted file paths in SparkSQL [#4650](https://github.com/sqlfluff/sqlfluff/pull/4650) [@bmorck](https://github.com/bmorck) * Remove three RegexParsers [#4658](https://github.com/sqlfluff/sqlfluff/pull/4658) [@alanmcruickshank](https://github.com/alanmcruickshank) * Make parse test readout more helpful [#4657](https://github.com/sqlfluff/sqlfluff/pull/4657) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: support for `sqlcmd` commands `:r` and `:setvar` [#4653](https://github.com/sqlfluff/sqlfluff/pull/4653) [@keen85](https://github.com/keen85) * Update README with Databricks note [#4632](https://github.com/sqlfluff/sqlfluff/pull/4632) [@liamperritt](https://github.com/liamperritt) * Athena: Fix parsing error with aliases starting with underscore [#4636](https://github.com/sqlfluff/sqlfluff/pull/4636) [@maiarareinaldo](https://github.com/maiarareinaldo) * Snowflake: Stop ever-increasing indent in CREATE USER [#4638](https://github.com/sqlfluff/sqlfluff/pull/4638) [@roman-ef](https://github.com/roman-ef) * TSQL: `PERIOD FOR SYSTEM_TIME` (temporal tables) [#4654](https://github.com/sqlfluff/sqlfluff/pull/4654) [@keen85](https://github.com/keen85) * MySQL: SelectStatementSegment in CREATE/ALTER VIEW may be bracketed [#4655](https://github.com/sqlfluff/sqlfluff/pull/4655) [@yoichi](https://github.com/yoichi) * TSQL: `CREATE EXTERNAL DATA SOURCE` [#4634](https://github.com/sqlfluff/sqlfluff/pull/4634) [@keen85](https://github.com/keen85) * Safety valve on source fixes [#4640](https://github.com/sqlfluff/sqlfluff/pull/4640) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add SparkSQL support for LONG primitive type [#4639](https://github.com/sqlfluff/sqlfluff/pull/4639) [@bmorck](https://github.com/bmorck) * Fix PIVOT clauses for BigQuery and SparkSQL [#4630](https://github.com/sqlfluff/sqlfluff/pull/4630) [@tunetheweb](https://github.com/tunetheweb) * Correct BigQuery WINDOW parsing [#4629](https://github.com/sqlfluff/sqlfluff/pull/4629) [@tunetheweb](https://github.com/tunetheweb) * Add Databricks dialect support for Unity Catalog [#4568](https://github.com/sqlfluff/sqlfluff/pull/4568) [@liamperritt](https://github.com/liamperritt) * .simple() matching for TypedMatcher [#4612](https://github.com/sqlfluff/sqlfluff/pull/4612) [@alanmcruickshank](https://github.com/alanmcruickshank) * --bench output with rule timings [#4601](https://github.com/sqlfluff/sqlfluff/pull/4601) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Unnamed constraints [#4616](https://github.com/sqlfluff/sqlfluff/pull/4616) [@greg-finley](https://github.com/greg-finley) * TSQL: Create database scoped credential [#4615](https://github.com/sqlfluff/sqlfluff/pull/4615) [@greg-finley](https://github.com/greg-finley) * fix(dialect-clickhouse): Add materialized view statement [#4605](https://github.com/sqlfluff/sqlfluff/pull/4605) [@germainlefebvre4](https://github.com/germainlefebvre4) * Nicer formatted dbt errors [#4606](https://github.com/sqlfluff/sqlfluff/pull/4606) [@alanmcruickshank](https://github.com/alanmcruickshank) * add parse lambda function Clickhouse [#4611](https://github.com/sqlfluff/sqlfluff/pull/4611) [@konnectr](https://github.com/konnectr) * Support `WITH ORDINALITY` clauses in Postgres [#4599](https://github.com/sqlfluff/sqlfluff/pull/4599) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@germainlefebvre4](https://github.com/germainlefebvre4) made their first contribution in [#4605](https://github.com/sqlfluff/sqlfluff/pull/4605) * [@liamperritt](https://github.com/liamperritt) made their first contribution in [#4568](https://github.com/sqlfluff/sqlfluff/pull/4568) * [@bmorck](https://github.com/bmorck) made their first contribution in [#4639](https://github.com/sqlfluff/sqlfluff/pull/4639) * [@keen85](https://github.com/keen85) made their first contribution in [#4634](https://github.com/sqlfluff/sqlfluff/pull/4634) * [@roman-ef](https://github.com/roman-ef) made their first contribution in [#4638](https://github.com/sqlfluff/sqlfluff/pull/4638) * [@maiarareinaldo](https://github.com/maiarareinaldo) made their first contribution in [#4636](https://github.com/sqlfluff/sqlfluff/pull/4636) * [@fredriv](https://github.com/fredriv) made their first contribution in [#4598](https://github.com/sqlfluff/sqlfluff/pull/4598) * [@aly76](https://github.com/aly76) made their first contribution in [#4642](https://github.com/sqlfluff/sqlfluff/pull/4642) * [@simpl1g](https://github.com/simpl1g) made their first contribution in [#4618](https://github.com/sqlfluff/sqlfluff/pull/4618) ## [2.0.2] - 2023-03-23 ## Highlights This is primarily a _bugfix_ release. Most notably this solves some of the issues introduced in 2.0.1 around spacing within datatypes. Expressions like `1.0::double precision` should now be spaced correctly. Beyond that, this contains a selection of smaller bugfixes and dialect improvements. Even for a relatively small release we saw three new contributors (thanks [@aurany](https://github.com/aurany), [@JackWolverson](https://github.com/JackWolverson) & [@mikaeltw](https://github.com/mikaeltw) 🎉). The one new _feature_ (as such) is being able to now configure `LT05` (aka `layout.long_lines`) to optionally move trailing comments _after_ the line they are found on, rather than the default behaviour of moving them up and _before_. Users can enable this with the `trailing_comments` configuration setting in the `indentation` section. This release _also_ contains some performance optimisations in the parser, especially on queries with heavily nested expressions. There will be more to come in this space, but we hope this leads to a better experience for many users. 🚀 ## What’s Changed * Parse Caching [#4576](https://github.com/sqlfluff/sqlfluff/pull/4576) [@alanmcruickshank](https://github.com/alanmcruickshank) * Data type spacing [#4592](https://github.com/sqlfluff/sqlfluff/pull/4592) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: allow quoted literal in alias name [#4591](https://github.com/sqlfluff/sqlfluff/pull/4591) [@yoichi](https://github.com/yoichi) * Make implicit indents visible in the parse tree [#4584](https://github.com/sqlfluff/sqlfluff/pull/4584) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4559: TSQL implicit indents on WHERE [#4583](https://github.com/sqlfluff/sqlfluff/pull/4583) [@alanmcruickshank](https://github.com/alanmcruickshank) * Added keywords to DB2 dialect from IBM docs [#4575](https://github.com/sqlfluff/sqlfluff/pull/4575) [@aurany](https://github.com/aurany) * Remove matches_target_tuples (#3873) [#4561](https://github.com/sqlfluff/sqlfluff/pull/4561) [@alanmcruickshank](https://github.com/alanmcruickshank) * Use terminators in BaseExpression [#4577](https://github.com/sqlfluff/sqlfluff/pull/4577) [@alanmcruickshank](https://github.com/alanmcruickshank) * Address #1630: Optionally move comments after long line [#4558](https://github.com/sqlfluff/sqlfluff/pull/4558) [@alanmcruickshank](https://github.com/alanmcruickshank) * Added schema to set statement [#4580](https://github.com/sqlfluff/sqlfluff/pull/4580) [@JackWolverson](https://github.com/JackWolverson) * Refactor lint_line_length and fix comma bug [#4564](https://github.com/sqlfluff/sqlfluff/pull/4564) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix untaken indent bug [#4562](https://github.com/sqlfluff/sqlfluff/pull/4562) [@alanmcruickshank](https://github.com/alanmcruickshank) * SQLite: Fix SELECT LIMIT [#4566](https://github.com/sqlfluff/sqlfluff/pull/4566) [@greg-finley](https://github.com/greg-finley) * Fix #4453: Snowflake semi-stuctured casts in CV11 [#4571](https://github.com/sqlfluff/sqlfluff/pull/4571) [@alanmcruickshank](https://github.com/alanmcruickshank) * Name of LT07 [#4557](https://github.com/sqlfluff/sqlfluff/pull/4557) [@alanmcruickshank](https://github.com/alanmcruickshank) * Patch fetch and over [#4555](https://github.com/sqlfluff/sqlfluff/pull/4555) [@mikaeltw](https://github.com/mikaeltw) ## New Contributors * [@mikaeltw](https://github.com/mikaeltw) made their first contribution in [#4555](https://github.com/sqlfluff/sqlfluff/pull/4555) * [@JackWolverson](https://github.com/JackWolverson) made their first contribution in [#4580](https://github.com/sqlfluff/sqlfluff/pull/4580) * [@aurany](https://github.com/aurany) made their first contribution in [#4575](https://github.com/sqlfluff/sqlfluff/pull/4575) ## [2.0.1] - 2023-03-17 ## Highlights This is mostly a bugfix release addressing some of the issues from the recent 2.0 release. Notable fixes are: - Spacing for (as applied by `LT01`) for datatypes, hypenated identifiers and casting operators. - Several bugs in the indentation routines (`LT02`), in particular with implicit indents. - Fixing a conflict between `LT09` and `LT02`, by only limiting `LT09` to bringing targets onto a single line if there is only one select target **and** that it contains no newlines. - Supporting arrays, and the new rules configuration more effectively in `pyproject.toml`. - Configuring dialects on a file by file basis using inline comments now works. This release also brings one small new feature in allowing additional flags to be passed to SQLFluff when called as a `pre-commit` hook. Thanks especially to [@JavierMonton](https://github.com/JavierMonton) and [@LauraRichter](https://github.com/LauraRichter) who made their first contributions to the project as part of this release! 🎉🏆 ## What’s Changed * Add support for arrays in TOML configuration [#4387](https://github.com/sqlfluff/sqlfluff/pull/4387) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Rethink test segregation in CI [#4547](https://github.com/sqlfluff/sqlfluff/pull/4547) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4515 and add more test cases [#4525](https://github.com/sqlfluff/sqlfluff/pull/4525) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add additional flags to `sqlfluff` invocations in pre-commit hooks [#4546](https://github.com/sqlfluff/sqlfluff/pull/4546) [@borchero](https://github.com/borchero) * Resolve #4484 (issues with indented_joins indents) [#4544](https://github.com/sqlfluff/sqlfluff/pull/4544) [@alanmcruickshank](https://github.com/alanmcruickshank) * Per file dialect selection fix [#4518](https://github.com/sqlfluff/sqlfluff/pull/4518) [@LauraRichter](https://github.com/LauraRichter) * MySQL: Add CREATE INDEX [#4538](https://github.com/sqlfluff/sqlfluff/pull/4538) [@yoichi](https://github.com/yoichi) * Resolve implicit indent issues when catching negative indents [#4543](https://github.com/sqlfluff/sqlfluff/pull/4543) [@alanmcruickshank](https://github.com/alanmcruickshank) * Github Action Deprecations [#4545](https://github.com/sqlfluff/sqlfluff/pull/4545) [@alanmcruickshank](https://github.com/alanmcruickshank) * LT09 and multiline select targets [#4529](https://github.com/sqlfluff/sqlfluff/pull/4529) [@alanmcruickshank](https://github.com/alanmcruickshank) * Remove Codecov from CI [#4535](https://github.com/sqlfluff/sqlfluff/pull/4535) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bigquery hyphentated identifiers [#4530](https://github.com/sqlfluff/sqlfluff/pull/4530) [@alanmcruickshank](https://github.com/alanmcruickshank) * Attempt in-house coverage [#4532](https://github.com/sqlfluff/sqlfluff/pull/4532) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres datatype spacing issues [#4528](https://github.com/sqlfluff/sqlfluff/pull/4528) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support new rules config in toml files. [#4526](https://github.com/sqlfluff/sqlfluff/pull/4526) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #1146 (log propagation) [#4513](https://github.com/sqlfluff/sqlfluff/pull/4513) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Optional quotes for `create user` statement [#4514](https://github.com/sqlfluff/sqlfluff/pull/4514) [@JavierMonton](https://github.com/JavierMonton) ## New Contributors * [@JavierMonton](https://github.com/JavierMonton) made their first contribution in [#4514](https://github.com/sqlfluff/sqlfluff/pull/4514) * [@LauraRichter](https://github.com/LauraRichter) made their first contribution in [#4518](https://github.com/sqlfluff/sqlfluff/pull/4518) ## [2.0.0] - 2023-03-13 ## Highlights Upgrading to 2.0 brings several important **breaking changes**: * All bundled rules have been recoded, both from generic `L00X` formats into groups within similar codes (e.g. an *aliasing* group with codes of the format `AL0X`), but also given *names* to allow much clearer referencing (e.g. `aliasing.column`). * [Configuring rules](https://docs.sqlfluff.com/en/latest/configuration.html#rule-configuration) now uses the rule *name* rather than the rule *code* to specify the section. Any unrecognised references in config files (whether they are references which *do* match existing rules by code or alias, or whether the match no rules at all) will raise warnings at runtime. * A complete re-write of layout and whitespace handling rules (see [layout](https://docs.sqlfluff.com/en/latest/layout.html)), and with that a change in how layout is configured (see [configuring layout](https://docs.sqlfluff.com/en/latest/layout.html#configuring-layout)) and the combination of some rules that were previously separate. One example of this is that the legacy rules `L001`, `L005`, `L006`, `L008`, `L023`, `L024`, `L039`, `L048` & `L071` have been combined simply into [LT01](https://docs.sqlfluff.com/en/latest/rules.html#sqlfluff.rules.sphinx.Rule_LT01). * Dropping support for dbt versions before `1.1`. To help users upgrade to 2.0, we've put together a recommended process as part of our [release notes](https://docs.sqlfluff.com/en/latest/releasenotes.html#upgrading-from-1-x-to-2-0). Beyond the breaking changes, this release brings *a load* of additional changes: * Introduces the the `sqlfluff format` CLI command (a la `sqlfmt` or `black`) to auto-format sql files using a known set of _fairly safe_ rules. * Databricks as a distinct new dialect (rather than as previously an alias for `sparksql`). * Performance improvements in our parsing engine. * Dialect improvements to _almost all of them_. As a new major release, especially with significant rewrites of large portions of the codebase, we recommend using [compatible release](https://peps.python.org/pep-0440/#compatible-release) specifiers in your dependencies (i.e. `sqlfluff~=2.0.0`) so that you can automatically take advantage of any bugfix releases in the coming weeks. The alpha releases of 2.0.0 have been tested on a range of large projects, but we know that the range of use cases _"in the wild"_ is very diverse. If you do experience issues, please post them [on GitHub](https://github.com/sqlfluff/sqlfluff/issues/new/choose) in the usual manner. Finally thanks to everyone who has worked on this release, especially [@konnectr](https://github.com/konnectr), [@ValentinCrr](https://github.com/ValentinCrr), [@FabianScheidt](https://github.com/FabianScheidt), [@dflem97](https://github.com/dflem97), [@timcosta](https://github.com/timcosta), [@AidanHarveyNelson](https://github.com/AidanHarveyNelson), [@joar](https://github.com/joar), [@jmpfar](https://github.com/jmpfar), [@jared-rimmer](https://github.com/jared-rimmer), [@vesatoivonen](https://github.com/vesatoivonen), [@briankravec](https://github.com/briankravec), [@saintamh](https://github.com/saintamh), [@tdurieux](https://github.com/tdurieux), [@baa-ableton](https://github.com/baa-ableton), & [@WillAyd](https://github.com/WillAyd) who made their first contributions during the development of 2.0.0. Thanks for your contributions, and especially your patience in the slightly slower release of your efforts into the wild. 🙏🎉 ## What’s Changed * Revise templating and lexing of calls. [#4506](https://github.com/sqlfluff/sqlfluff/pull/4506) [@alanmcruickshank](https://github.com/alanmcruickshank) * Struct Access Spacing [#4512](https://github.com/sqlfluff/sqlfluff/pull/4512) [@alanmcruickshank](https://github.com/alanmcruickshank) * Array and Struct Spacing [#4511](https://github.com/sqlfluff/sqlfluff/pull/4511) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add a deprecation warning for removed config option. [#4509](https://github.com/sqlfluff/sqlfluff/pull/4509) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bigquery spacing (#4508) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4433 (more untaken positive indents) [#4499](https://github.com/sqlfluff/sqlfluff/pull/4499) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix parse error on double parentheses [#4504](https://github.com/sqlfluff/sqlfluff/pull/4504) [@yoichi](https://github.com/yoichi) * 2.0.0 Migration Guide [#4498](https://github.com/sqlfluff/sqlfluff/pull/4498) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle missing aliases and align constraints better [#4493](https://github.com/sqlfluff/sqlfluff/pull/4493) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Add support For Clause [#4501](https://github.com/sqlfluff/sqlfluff/pull/4501) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Allow Jinja rule to loop safely [#4495](https://github.com/sqlfluff/sqlfluff/pull/4495) [@alanmcruickshank](https://github.com/alanmcruickshank) * Trigger CI tests for merge groups [#4503](https://github.com/sqlfluff/sqlfluff/pull/4503) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Readme and Contributing [#4502](https://github.com/sqlfluff/sqlfluff/pull/4502) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update layout docs [#4500](https://github.com/sqlfluff/sqlfluff/pull/4500) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bug in operator precedence [#4497](https://github.com/sqlfluff/sqlfluff/pull/4497) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: correct query syntax for single column `UNPIVOT` clauses [#4494](https://github.com/sqlfluff/sqlfluff/pull/4494) [@imrehg](https://github.com/imrehg) * Fix #4485 [#4491](https://github.com/sqlfluff/sqlfluff/pull/4491) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update reserved keywords in Athena language [#4490](https://github.com/sqlfluff/sqlfluff/pull/4490) [@ValentinCrr](https://github.com/ValentinCrr) * Clickhouse support all join types [#4488](https://github.com/sqlfluff/sqlfluff/pull/4488) [@konnectr](https://github.com/konnectr) * Snowflake semi-structured spacing [#4487](https://github.com/sqlfluff/sqlfluff/pull/4487) [@alanmcruickshank](https://github.com/alanmcruickshank) * Prep version 2.0.0a6 [#4476](https://github.com/sqlfluff/sqlfluff/pull/4476) [@github-actions](https://github.com/github-actions) * Fix #4367 [#4479](https://github.com/sqlfluff/sqlfluff/pull/4479) [@alanmcruickshank](https://github.com/alanmcruickshank) * Teradata: Improve COLLECT STATS parsing [#4478](https://github.com/sqlfluff/sqlfluff/pull/4478) [@dflem97](https://github.com/dflem97) * Add a sqlfluff format CLI command [#4473](https://github.com/sqlfluff/sqlfluff/pull/4473) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode and disable L031 -> AL07 [#4471](https://github.com/sqlfluff/sqlfluff/pull/4471) [@alanmcruickshank](https://github.com/alanmcruickshank) * Named Config (part 2) [#4470](https://github.com/sqlfluff/sqlfluff/pull/4470) [@alanmcruickshank](https://github.com/alanmcruickshank) * Rule config lookup improvements & config warnings [#4465](https://github.com/sqlfluff/sqlfluff/pull/4465) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode L050 [#4468](https://github.com/sqlfluff/sqlfluff/pull/4468) [@alanmcruickshank](https://github.com/alanmcruickshank) * Implicit indent fixes #4467 [#4469](https://github.com/sqlfluff/sqlfluff/pull/4469) [@alanmcruickshank](https://github.com/alanmcruickshank) * ANSI: Add IfExistsGrammar to DropTrigger [#4466](https://github.com/sqlfluff/sqlfluff/pull/4466) [@WittierDinosaur](https://github.com/WittierDinosaur) * Rules Reorg Mopup [#4462](https://github.com/sqlfluff/sqlfluff/pull/4462) [@alanmcruickshank](https://github.com/alanmcruickshank) * Layout Rules Recode (part 2) [#4456](https://github.com/sqlfluff/sqlfluff/pull/4456) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix(athena): resolve errors parsing around maps, structs, and arrays [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) [@timcosta](https://github.com/timcosta) * Layout Rules Recode (part 1) [#4432](https://github.com/sqlfluff/sqlfluff/pull/4432) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: EXEC string literal [#4458](https://github.com/sqlfluff/sqlfluff/pull/4458) [@jpers36](https://github.com/jpers36) * Teradata: Added SET QUERY_BAND statement [#4459](https://github.com/sqlfluff/sqlfluff/pull/4459) [@dflem97](https://github.com/dflem97) * Teradata: Added TOP select clause modifier [#4461](https://github.com/sqlfluff/sqlfluff/pull/4461) [@dflem97](https://github.com/dflem97) * Teradata: Addition of comparison operator extensions [#4451](https://github.com/sqlfluff/sqlfluff/pull/4451) [@dflem97](https://github.com/dflem97) * Add extensions and plugin section to the README.md [#4454](https://github.com/sqlfluff/sqlfluff/pull/4454) [@jared-rimmer](https://github.com/jared-rimmer) * Convention rules bundle [#4448](https://github.com/sqlfluff/sqlfluff/pull/4448) [@alanmcruickshank](https://github.com/alanmcruickshank) * References rule bundle [#4446](https://github.com/sqlfluff/sqlfluff/pull/4446) [@alanmcruickshank](https://github.com/alanmcruickshank) * Structure and Ambiguous rule bundles [#4444](https://github.com/sqlfluff/sqlfluff/pull/4444) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Bare functions [#4439](https://github.com/sqlfluff/sqlfluff/pull/4439) [@jpers36](https://github.com/jpers36) * Pull dbt CI tests forward to 1.1 and 1.4 [#4442](https://github.com/sqlfluff/sqlfluff/pull/4442) [@WittierDinosaur](https://github.com/WittierDinosaur) * Teradata: Added "AND STATS" options when creating table [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) [@dflem97](https://github.com/dflem97) * Add Databricks as a distinct dialect [#4438](https://github.com/sqlfluff/sqlfluff/pull/4438) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove importlib deprecated methods [#4437](https://github.com/sqlfluff/sqlfluff/pull/4437) [@alanmcruickshank](https://github.com/alanmcruickshank) * SQLite: Support PRAGMA statements [#4431](https://github.com/sqlfluff/sqlfluff/pull/4431) [@WittierDinosaur](https://github.com/WittierDinosaur) * Proposed graceful handling of noqa by L016 (#4248) [#4424](https://github.com/sqlfluff/sqlfluff/pull/4424) [@alanmcruickshank](https://github.com/alanmcruickshank) * DuckDb: Allow quoted literals as identifiers [#4410](https://github.com/sqlfluff/sqlfluff/pull/4410) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite Refactor to reduce statement and keyword scope [#4409](https://github.com/sqlfluff/sqlfluff/pull/4409) [@WittierDinosaur](https://github.com/WittierDinosaur) * L046 and L056 recode [#4430](https://github.com/sqlfluff/sqlfluff/pull/4430) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode Aliasing Rules [#4427](https://github.com/sqlfluff/sqlfluff/pull/4427) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adjust MySQL dialect to support combination of not-null, default and … [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) [@FabianScheidt](https://github.com/FabianScheidt) * Revert some changes to tox [#4428](https://github.com/sqlfluff/sqlfluff/pull/4428) [@alanmcruickshank](https://github.com/alanmcruickshank) * Migrate capitalisation rules to plugin and recode [#4413](https://github.com/sqlfluff/sqlfluff/pull/4413) [@alanmcruickshank](https://github.com/alanmcruickshank) * Prep version 2.0.0a5 [#4419](https://github.com/sqlfluff/sqlfluff/pull/4419) [@github-actions](https://github.com/github-actions) * Handle long lines without trailing newlines gracefully (#4386) [#4423](https://github.com/sqlfluff/sqlfluff/pull/4423) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4184 (index error in L007) [#4422](https://github.com/sqlfluff/sqlfluff/pull/4422) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle untaken positive indents with taken negative pair. [#4420](https://github.com/sqlfluff/sqlfluff/pull/4420) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: AS MATERIALIZED support [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) [@saintamh](https://github.com/saintamh) * Align warnings config with example shown [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) [@briankravec](https://github.com/briankravec) * BigQuery: parse "AS description" part of assert expressions [#4418](https://github.com/sqlfluff/sqlfluff/pull/4418) [@yoichi](https://github.com/yoichi) * Deprecate doc decorators (replace with metaclass) [#4415](https://github.com/sqlfluff/sqlfluff/pull/4415) [@alanmcruickshank](https://github.com/alanmcruickshank) * Enable noqa using aliases and groups [#4414](https://github.com/sqlfluff/sqlfluff/pull/4414) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add rule names to CLI outputs [#4400](https://github.com/sqlfluff/sqlfluff/pull/4400) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Remove execution keyword inherited from ANSI [#4411](https://github.com/sqlfluff/sqlfluff/pull/4411) [@WittierDinosaur](https://github.com/WittierDinosaur) * Rule names, aliases and more complicated selection. [#4399](https://github.com/sqlfluff/sqlfluff/pull/4399) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Support Recursive View [#4412](https://github.com/sqlfluff/sqlfluff/pull/4412) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL: Implement BULK INSERT statement [#4381](https://github.com/sqlfluff/sqlfluff/pull/4381) [@borchero](https://github.com/borchero) * L062: Add match_source (#4172) [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) [@vesatoivonen](https://github.com/vesatoivonen) * TSQL: Add SET to ALTER TABLE [#4407](https://github.com/sqlfluff/sqlfluff/pull/4407) [@jared-rimmer](https://github.com/jared-rimmer) * Snowflake: ALTER STORAGE INTEGRATION segment [#4406](https://github.com/sqlfluff/sqlfluff/pull/4406) [@jared-rimmer](https://github.com/jared-rimmer) * Fix incorrect link to pre-commit docs [#4405](https://github.com/sqlfluff/sqlfluff/pull/4405) [@pdebelak](https://github.com/pdebelak) * Add Snowflake dialect ALTER ROLE segment [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) [@jared-rimmer](https://github.com/jared-rimmer) * Improving Postgres create index statement [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) [@jmpfar](https://github.com/jmpfar) * Resolve #4291: Comments forcing unexpected indents. [#4384](https://github.com/sqlfluff/sqlfluff/pull/4384) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4294: Comments affecting indentation [#4337](https://github.com/sqlfluff/sqlfluff/pull/4337) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4292: Window function long line fixes [#4383](https://github.com/sqlfluff/sqlfluff/pull/4383) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: ALTER INDEX [#4364](https://github.com/sqlfluff/sqlfluff/pull/4364) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Added Varying Keyword to allowed data type segments [#4375](https://github.com/sqlfluff/sqlfluff/pull/4375) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Add ruff linter [#4372](https://github.com/sqlfluff/sqlfluff/pull/4372) [@greg-finley](https://github.com/greg-finley) * Fix postgres column constraint default syntax [#4379](https://github.com/sqlfluff/sqlfluff/pull/4379) [@pdebelak](https://github.com/pdebelak) * Allow function names to have a leading underscore [#4377](https://github.com/sqlfluff/sqlfluff/pull/4377) [@gavin-tsang](https://github.com/gavin-tsang) * TSQL: Merge Hints [#4354](https://github.com/sqlfluff/sqlfluff/pull/4354) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: Temporal Table [#4358](https://github.com/sqlfluff/sqlfluff/pull/4358) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: ALTER TABLE [#4369](https://github.com/sqlfluff/sqlfluff/pull/4369) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Bugfix: Duckdb SELECT * [#4365](https://github.com/sqlfluff/sqlfluff/pull/4365) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: TABLESAMPLE query [#4357](https://github.com/sqlfluff/sqlfluff/pull/4357) [@greg-finley](https://github.com/greg-finley) * reindent refactor [#4338](https://github.com/sqlfluff/sqlfluff/pull/4338) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: INSERT INTO [#4363](https://github.com/sqlfluff/sqlfluff/pull/4363) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Docs: correct toml syntax of pyproject.toml file config example [#4361](https://github.com/sqlfluff/sqlfluff/pull/4361) [@imrehg](https://github.com/imrehg) * Allowed Naked Identifiers [#4359](https://github.com/sqlfluff/sqlfluff/pull/4359) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: TABLESAMPLE [#4353](https://github.com/sqlfluff/sqlfluff/pull/4353) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Tsql: Function Parameters [#4352](https://github.com/sqlfluff/sqlfluff/pull/4352) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Postgres: Storage parameters [#4350](https://github.com/sqlfluff/sqlfluff/pull/4350) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: Bare Function Set [#4351](https://github.com/sqlfluff/sqlfluff/pull/4351) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Postgres: View options [#4340](https://github.com/sqlfluff/sqlfluff/pull/4340) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * BigQuery: SELECT DISTINCT AS STRUCT [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) [@joar](https://github.com/joar) * Snowflake: Fix Alter Warehouse [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Parser: Optimise lookahead_match [#4327](https://github.com/sqlfluff/sqlfluff/pull/4327) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add support for dbt test macros [#4319](https://github.com/sqlfluff/sqlfluff/pull/4319) [@pdebelak](https://github.com/pdebelak) * Bracket complex expressions before applying :: operator in Rule L067 [#4326](https://github.com/sqlfluff/sqlfluff/pull/4326) [@pdebelak](https://github.com/pdebelak) * Prep version 2.0.0a4 [#4322](https://github.com/sqlfluff/sqlfluff/pull/4322) [@github-actions](https://github.com/github-actions) * BigQuery: Alter table alter column [#4316](https://github.com/sqlfluff/sqlfluff/pull/4316) [@greg-finley](https://github.com/greg-finley) * Handle renamed dbt exceptions [#4317](https://github.com/sqlfluff/sqlfluff/pull/4317) [@greg-finley](https://github.com/greg-finley) * Parser: Fix early exit for simple matchers [#4305](https://github.com/sqlfluff/sqlfluff/pull/4305) [@WittierDinosaur](https://github.com/WittierDinosaur) * MySQL: Add CREATE DATABASE and ALTER DATABASE [#4307](https://github.com/sqlfluff/sqlfluff/pull/4307) [@yoichi](https://github.com/yoichi) * BigQuery: Add ALTER VIEW [#4306](https://github.com/sqlfluff/sqlfluff/pull/4306) [@yoichi](https://github.com/yoichi) * toml: only install `toml` dependency if < Python 3.11 (otherwise use builtin `tomllib`) [#4303](https://github.com/sqlfluff/sqlfluff/pull/4303) [@kevinmarsh](https://github.com/kevinmarsh) * Fix #4024 example plugin unit tests import [#4302](https://github.com/sqlfluff/sqlfluff/pull/4302) [@matthieucan](https://github.com/matthieucan) * Prep version 2.0.0a3 [#4290](https://github.com/sqlfluff/sqlfluff/pull/4290) [@github-actions](https://github.com/github-actions) * Move ISSUE from Snwoflake reserved keywords to unreserved ones [#4279](https://github.com/sqlfluff/sqlfluff/pull/4279) [@KaoutherElhamdi](https://github.com/KaoutherElhamdi) * Due to performance and other issues, revert the osmosis implementation of the templater for now [#4273](https://github.com/sqlfluff/sqlfluff/pull/4273) [@barrywhart](https://github.com/barrywhart) * Simplify lexing [#4289](https://github.com/sqlfluff/sqlfluff/pull/4289) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4255 (Fix exception on mixed indent description) [#4288](https://github.com/sqlfluff/sqlfluff/pull/4288) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4253 (incorrect trigger of L006 around placeholders) [#4287](https://github.com/sqlfluff/sqlfluff/pull/4287) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4249 (TSQL block comment indents) [#4286](https://github.com/sqlfluff/sqlfluff/pull/4286) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4252 (Resolve multiple sensible indents) [#4285](https://github.com/sqlfluff/sqlfluff/pull/4285) [@alanmcruickshank](https://github.com/alanmcruickshank) * Parser Performance: Cache segment string repr to reduce function calls [#4278](https://github.com/sqlfluff/sqlfluff/pull/4278) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: GRANT SUPPORT CASES [#4283](https://github.com/sqlfluff/sqlfluff/pull/4283) [@WittierDinosaur](https://github.com/WittierDinosaur) * Dialect: duckdb [#4284](https://github.com/sqlfluff/sqlfluff/pull/4284) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add variable pattern to CopyIntoTable [#4275](https://github.com/sqlfluff/sqlfluff/pull/4275) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Non-reserved keyword bugfix [#4277](https://github.com/sqlfluff/sqlfluff/pull/4277) [@WittierDinosaur](https://github.com/WittierDinosaur) * Hive: Add Table constraints DISABLE VALIDATE [#4281](https://github.com/sqlfluff/sqlfluff/pull/4281) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add Python and Java UDF support [#4280](https://github.com/sqlfluff/sqlfluff/pull/4280) [@WittierDinosaur](https://github.com/WittierDinosaur) * SparkSQL: Support DIV binary operator [#4282](https://github.com/sqlfluff/sqlfluff/pull/4282) [@WittierDinosaur](https://github.com/WittierDinosaur) * BigQuery: Add ALTER TABLE [#4272](https://github.com/sqlfluff/sqlfluff/pull/4272) [@yoichi](https://github.com/yoichi) * Snowflake: Update bare functions [#4276](https://github.com/sqlfluff/sqlfluff/pull/4276) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve Dockerfile to reduce image size [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) [@tdurieux](https://github.com/tdurieux) * Prep version 2.0.0a2 [#4247](https://github.com/sqlfluff/sqlfluff/pull/4247) [@github-actions](https://github.com/github-actions) * Push indents to after comments [#4239](https://github.com/sqlfluff/sqlfluff/pull/4239) [@alanmcruickshank](https://github.com/alanmcruickshank) * Templated fix improvements and indentation [#4245](https://github.com/sqlfluff/sqlfluff/pull/4245) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix block comment indent fixes #4224 [#4240](https://github.com/sqlfluff/sqlfluff/pull/4240) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix for #4222 [#4236](https://github.com/sqlfluff/sqlfluff/pull/4236) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Allow multiple unpivot [#4242](https://github.com/sqlfluff/sqlfluff/pull/4242) [@greg-finley](https://github.com/greg-finley) * postgres: add row-level locks to SELECT statements [#4209](https://github.com/sqlfluff/sqlfluff/pull/4209) [@Yiwen-Gao](https://github.com/Yiwen-Gao) * Add more parsing logic for db2 [#4206](https://github.com/sqlfluff/sqlfluff/pull/4206) [@NelsonTorres](https://github.com/NelsonTorres) * Include the filename in critical exceptions [#4225](https://github.com/sqlfluff/sqlfluff/pull/4225) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Readme Badges [#4219](https://github.com/sqlfluff/sqlfluff/pull/4219) [@alanmcruickshank](https://github.com/alanmcruickshank) * diff-quality: Handle the case where there are no files to check [#4220](https://github.com/sqlfluff/sqlfluff/pull/4220) [@barrywhart](https://github.com/barrywhart) * Prep version 2.0.0a1 [#4203](https://github.com/sqlfluff/sqlfluff/pull/4203) [@github-actions](https://github.com/github-actions) * Fixed False Positive for L037 [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) [@WillAyd](https://github.com/WillAyd) * Fix #4215 [#4217](https://github.com/sqlfluff/sqlfluff/pull/4217) [@alanmcruickshank](https://github.com/alanmcruickshank) * don't consider templated whitespace [#4213](https://github.com/sqlfluff/sqlfluff/pull/4213) [@alanmcruickshank](https://github.com/alanmcruickshank) * show fatal errors regardless [#4214](https://github.com/sqlfluff/sqlfluff/pull/4214) [@alanmcruickshank](https://github.com/alanmcruickshank) * don't pickle the templater [#4208](https://github.com/sqlfluff/sqlfluff/pull/4208) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Support column character set and collation [#4204](https://github.com/sqlfluff/sqlfluff/pull/4204) [@yoichi](https://github.com/yoichi) * Fix some issues with Docker Compose environment [#4201](https://github.com/sqlfluff/sqlfluff/pull/4201) [@barrywhart](https://github.com/barrywhart) * Implicit Indents [#4054](https://github.com/sqlfluff/sqlfluff/pull/4054) [@alanmcruickshank](https://github.com/alanmcruickshank) * Tweak Coveralls settings [#4199](https://github.com/sqlfluff/sqlfluff/pull/4199) [@barrywhart](https://github.com/barrywhart) * In addition to Codecov, also upload to Coveralls [#4197](https://github.com/sqlfluff/sqlfluff/pull/4197) [@barrywhart](https://github.com/barrywhart) * Fix: create table default cast returns unparsable section [#4192](https://github.com/sqlfluff/sqlfluff/pull/4192) [@NelsonTorres](https://github.com/NelsonTorres) * Fix JSON parsing issue with diff-quality plugin [#4190](https://github.com/sqlfluff/sqlfluff/pull/4190) [@barrywhart](https://github.com/barrywhart) * Codecov migration [#4195](https://github.com/sqlfluff/sqlfluff/pull/4195) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stop adding trailing os.sep if ignore file is on the root of the file… [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) [@baa-ableton](https://github.com/baa-ableton) * Port dbt-osmosis templater changes to SQLFluff [#3976](https://github.com/sqlfluff/sqlfluff/pull/3976) [@barrywhart](https://github.com/barrywhart) * Reflow 4: Long Lines [#4067](https://github.com/sqlfluff/sqlfluff/pull/4067) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix comment bug on reindent [#4179](https://github.com/sqlfluff/sqlfluff/pull/4179) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reflow 3: Reindent [#3942](https://github.com/sqlfluff/sqlfluff/pull/3942) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@konnectr](https://github.com/konnectr) made their first contribution in [#4488](https://github.com/sqlfluff/sqlfluff/pull/4488) * [@ValentinCrr](https://github.com/ValentinCrr) made their first contribution in [#4490](https://github.com/sqlfluff/sqlfluff/pull/4490) * [@FabianScheidt](https://github.com/FabianScheidt) made their first contribution in [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) * [@dflem97](https://github.com/dflem97) made their first contribution in [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) * [@timcosta](https://github.com/timcosta) made their first contribution in [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) * [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) made their first contribution in [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) * [@joar](https://github.com/joar) made their first contribution in [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) * [@jmpfar](https://github.com/jmpfar) made their first contribution in [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) * [@jared-rimmer](https://github.com/jared-rimmer) made their first contribution in [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) * [@vesatoivonen](https://github.com/vesatoivonen) made their first contribution in [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) * [@briankravec](https://github.com/briankravec) made their first contribution in [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) * [@saintamh](https://github.com/saintamh) made their first contribution in [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) * [@tdurieux](https://github.com/tdurieux) made their first contribution in [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) * [@baa-ableton](https://github.com/baa-ableton) made their first contribution in [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) * [@WillAyd](https://github.com/WillAyd) made their first contribution in [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) ## [2.0.0a6] - 2023-03-06 > NOTE: This is effectively a release candidate for testing purposes. > There are several new features here, and breaking changes to > configuration. We welcome testing feedback from the community, and > the intent is that following this release there will be no more > major breaking changes in the before the 2.0.0 release. ## Highlights This is the sixth alpha release for 2.0.0, and effectively the first release candidate for 2.0.0. All the intended breaking changes for the upcoming release have now been made and only bugfixes and non breaking feature changes should happen between this release and the full release. It contains: * A reorganisation of rules. All rules have been recoded, and can now be referred to by their name, code, alias or group. The legacy code for the rule is included as an alias for each rule to support some backward compatibility. * Configuration files (and inline configuration flags), should now use the **name** of the rule rather than the **code**. Any configuration files which reference using legacy rules (or reference unknown rules) should now display warnings. * Introduces the the `sqlfluff format` CLI command (a la `sqlfmt` or `black`) to auto-format sql files using a known set of _fairly safe_ rules. * Databricks as a distinct new dialect (rather than as previously an alias for `sparksql`). There are also numerous dialect improvements to ANSI, Athena, TSQL, Teradata, SQLite & MySQL. ## What’s Changed * Fix #4367 [#4479](https://github.com/sqlfluff/sqlfluff/pull/4479) [@alanmcruickshank](https://github.com/alanmcruickshank) * Teradata: Improve COLLECT STATS parsing [#4478](https://github.com/sqlfluff/sqlfluff/pull/4478) [@dflem97](https://github.com/dflem97) * Add a sqlfluff format CLI command [#4473](https://github.com/sqlfluff/sqlfluff/pull/4473) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode and disable L031 -> AL07 [#4471](https://github.com/sqlfluff/sqlfluff/pull/4471) [@alanmcruickshank](https://github.com/alanmcruickshank) * Named Config (part 2) [#4470](https://github.com/sqlfluff/sqlfluff/pull/4470) [@alanmcruickshank](https://github.com/alanmcruickshank) * Rule config lookup improvements & config warnings [#4465](https://github.com/sqlfluff/sqlfluff/pull/4465) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode L050 [#4468](https://github.com/sqlfluff/sqlfluff/pull/4468) [@alanmcruickshank](https://github.com/alanmcruickshank) * Implicit indent fixes #4467 [#4469](https://github.com/sqlfluff/sqlfluff/pull/4469) [@alanmcruickshank](https://github.com/alanmcruickshank) * ANSI: Add IfExistsGrammar to DropTrigger [#4466](https://github.com/sqlfluff/sqlfluff/pull/4466) [@WittierDinosaur](https://github.com/WittierDinosaur) * Rules Reorg Mopup [#4462](https://github.com/sqlfluff/sqlfluff/pull/4462) [@alanmcruickshank](https://github.com/alanmcruickshank) * Layout Rules Recode (part 2) [#4456](https://github.com/sqlfluff/sqlfluff/pull/4456) [@alanmcruickshank](https://github.com/alanmcruickshank) * fix(athena): resolve errors parsing around maps, structs, and arrays [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) [@timcosta](https://github.com/timcosta) * Layout Rules Recode (part 1) [#4432](https://github.com/sqlfluff/sqlfluff/pull/4432) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: EXEC string literal [#4458](https://github.com/sqlfluff/sqlfluff/pull/4458) [@jpers36](https://github.com/jpers36) * Teradata: Added SET QUERY_BAND statement [#4459](https://github.com/sqlfluff/sqlfluff/pull/4459) [@dflem97](https://github.com/dflem97) * Teradata: Added TOP select clause modifier [#4461](https://github.com/sqlfluff/sqlfluff/pull/4461) [@dflem97](https://github.com/dflem97) * Teradata: Addition of comparison operator extensions [#4451](https://github.com/sqlfluff/sqlfluff/pull/4451) [@dflem97](https://github.com/dflem97) * Add extensions and plugin section to the README.md [#4454](https://github.com/sqlfluff/sqlfluff/pull/4454) [@jared-rimmer](https://github.com/jared-rimmer) * Convention rules bundle [#4448](https://github.com/sqlfluff/sqlfluff/pull/4448) [@alanmcruickshank](https://github.com/alanmcruickshank) * References rule bundle [#4446](https://github.com/sqlfluff/sqlfluff/pull/4446) [@alanmcruickshank](https://github.com/alanmcruickshank) * Structure and Ambiguous rule bundles [#4444](https://github.com/sqlfluff/sqlfluff/pull/4444) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: Bare functions [#4439](https://github.com/sqlfluff/sqlfluff/pull/4439) [@jpers36](https://github.com/jpers36) * Pull dbt CI tests forward to 1.1 and 1.4 [#4442](https://github.com/sqlfluff/sqlfluff/pull/4442) [@WittierDinosaur](https://github.com/WittierDinosaur) * Teradata: Added "AND STATS" options when creating table [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) [@dflem97](https://github.com/dflem97) * Add Databricks as a distinct dialect [#4438](https://github.com/sqlfluff/sqlfluff/pull/4438) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove importlib deprecated methods [#4437](https://github.com/sqlfluff/sqlfluff/pull/4437) [@alanmcruickshank](https://github.com/alanmcruickshank) * SQLite: Support PRAGMA statements [#4431](https://github.com/sqlfluff/sqlfluff/pull/4431) [@WittierDinosaur](https://github.com/WittierDinosaur) * Proposed graceful handling of noqa by L016 (#4248) [#4424](https://github.com/sqlfluff/sqlfluff/pull/4424) [@alanmcruickshank](https://github.com/alanmcruickshank) * DuckDb: Allow quoted literals as identifiers [#4410](https://github.com/sqlfluff/sqlfluff/pull/4410) [@WittierDinosaur](https://github.com/WittierDinosaur) * SQLite Refactor to reduce statement and keyword scope [#4409](https://github.com/sqlfluff/sqlfluff/pull/4409) [@WittierDinosaur](https://github.com/WittierDinosaur) * L046 and L056 recode [#4430](https://github.com/sqlfluff/sqlfluff/pull/4430) [@alanmcruickshank](https://github.com/alanmcruickshank) * Recode Aliasing Rules [#4427](https://github.com/sqlfluff/sqlfluff/pull/4427) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adjust MySQL dialect to support combination of not-null, default and … [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) [@FabianScheidt](https://github.com/FabianScheidt) * Revert some changes to tox [#4428](https://github.com/sqlfluff/sqlfluff/pull/4428) [@alanmcruickshank](https://github.com/alanmcruickshank) * Migrate capitalisation rules to plugin and recode [#4413](https://github.com/sqlfluff/sqlfluff/pull/4413) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@FabianScheidt](https://github.com/FabianScheidt) made their first contribution in [#4426](https://github.com/sqlfluff/sqlfluff/pull/4426) * [@dflem97](https://github.com/dflem97) made their first contribution in [#4440](https://github.com/sqlfluff/sqlfluff/pull/4440) * [@timcosta](https://github.com/timcosta) made their first contribution in [#4391](https://github.com/sqlfluff/sqlfluff/pull/4391) ## [2.0.0a5] - 2023-02-24 > NOTE: This is an alpha release for testing purposes. There are several new features > here, and breaking changes to configuration. We welcome testing feedback from the > community, but know that this release may feel less polished than usual. ## Highlights This is the fifth alpha release for 2.0.0. It contains: * Significant rework to rule naming and categorisation. * Several performance improvements. * Many dialect improvements to several dialects. * Bugfixes to many of the issues raised in 2.0.0a4. There will likely be more changes to rule classification before a full release of 2.0.0, so anticipate that configuration files may change slightly again in future alpha releases. ## What’s Changed * Handle long lines without trailing newlines gracefully (#4386) [#4423](https://github.com/sqlfluff/sqlfluff/pull/4423) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4184 (index error in L007) [#4422](https://github.com/sqlfluff/sqlfluff/pull/4422) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle untaken positive indents with taken negative pair. [#4420](https://github.com/sqlfluff/sqlfluff/pull/4420) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: AS MATERIALIZED support [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) [@saintamh](https://github.com/saintamh) * Align warnings config with example shown [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) [@briankravec](https://github.com/briankravec) * BigQuery: parse "AS description" part of assert expressions [#4418](https://github.com/sqlfluff/sqlfluff/pull/4418) [@yoichi](https://github.com/yoichi) * Deprecate doc decorators (replace with metaclass) [#4415](https://github.com/sqlfluff/sqlfluff/pull/4415) [@alanmcruickshank](https://github.com/alanmcruickshank) * Enable noqa using aliases and groups [#4414](https://github.com/sqlfluff/sqlfluff/pull/4414) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add rule names to CLI outputs [#4400](https://github.com/sqlfluff/sqlfluff/pull/4400) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Remove execution keyword inherited from ANSI [#4411](https://github.com/sqlfluff/sqlfluff/pull/4411) [@WittierDinosaur](https://github.com/WittierDinosaur) * Rule names, aliases and more complicated selection. [#4399](https://github.com/sqlfluff/sqlfluff/pull/4399) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Support Recursive View [#4412](https://github.com/sqlfluff/sqlfluff/pull/4412) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL: Implement BULK INSERT statement [#4381](https://github.com/sqlfluff/sqlfluff/pull/4381) [@borchero](https://github.com/borchero) * L062: Add match_source (#4172) [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) [@vesatoivonen](https://github.com/vesatoivonen) * TSQL: Add SET to ALTER TABLE [#4407](https://github.com/sqlfluff/sqlfluff/pull/4407) [@jared-rimmer](https://github.com/jared-rimmer) * Snowflake: ALTER STORAGE INTEGRATION segment [#4406](https://github.com/sqlfluff/sqlfluff/pull/4406) [@jared-rimmer](https://github.com/jared-rimmer) * Fix incorrect link to pre-commit docs [#4405](https://github.com/sqlfluff/sqlfluff/pull/4405) [@pdebelak](https://github.com/pdebelak) * Add Snowflake dialect ALTER ROLE segment [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) [@jared-rimmer](https://github.com/jared-rimmer) * Improving Postgres create index statement [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) [@jmpfar](https://github.com/jmpfar) * Resolve #4291: Comments forcing unexpected indents. [#4384](https://github.com/sqlfluff/sqlfluff/pull/4384) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4294: Comments affecting indentation [#4337](https://github.com/sqlfluff/sqlfluff/pull/4337) [@alanmcruickshank](https://github.com/alanmcruickshank) * Resolve #4292: Window function long line fixes [#4383](https://github.com/sqlfluff/sqlfluff/pull/4383) [@alanmcruickshank](https://github.com/alanmcruickshank) * TSQL: ALTER INDEX [#4364](https://github.com/sqlfluff/sqlfluff/pull/4364) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Added Varying Keyword to allowed data type segments [#4375](https://github.com/sqlfluff/sqlfluff/pull/4375) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Add ruff linter [#4372](https://github.com/sqlfluff/sqlfluff/pull/4372) [@greg-finley](https://github.com/greg-finley) * Fix postgres column constraint default syntax [#4379](https://github.com/sqlfluff/sqlfluff/pull/4379) [@pdebelak](https://github.com/pdebelak) * Allow function names to have a leading underscore [#4377](https://github.com/sqlfluff/sqlfluff/pull/4377) [@gavin-tsang](https://github.com/gavin-tsang) * TSQL: Merge Hints [#4354](https://github.com/sqlfluff/sqlfluff/pull/4354) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: Temporal Table [#4358](https://github.com/sqlfluff/sqlfluff/pull/4358) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: ALTER TABLE [#4369](https://github.com/sqlfluff/sqlfluff/pull/4369) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Bugfix: Duckdb SELECT * [#4365](https://github.com/sqlfluff/sqlfluff/pull/4365) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: TABLESAMPLE query [#4357](https://github.com/sqlfluff/sqlfluff/pull/4357) [@greg-finley](https://github.com/greg-finley) * reindent refactor [#4338](https://github.com/sqlfluff/sqlfluff/pull/4338) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: INSERT INTO [#4363](https://github.com/sqlfluff/sqlfluff/pull/4363) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Docs: correct toml syntax of pyproject.toml file config example [#4361](https://github.com/sqlfluff/sqlfluff/pull/4361) [@imrehg](https://github.com/imrehg) * Allowed Naked Identifiers [#4359](https://github.com/sqlfluff/sqlfluff/pull/4359) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: TABLESAMPLE [#4353](https://github.com/sqlfluff/sqlfluff/pull/4353) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Tsql: Function Parameters [#4352](https://github.com/sqlfluff/sqlfluff/pull/4352) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Postgres: Storage parameters [#4350](https://github.com/sqlfluff/sqlfluff/pull/4350) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * TSQL: Bare Function Set [#4351](https://github.com/sqlfluff/sqlfluff/pull/4351) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Postgres: View options [#4340](https://github.com/sqlfluff/sqlfluff/pull/4340) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * BigQuery: SELECT DISTINCT AS STRUCT [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) [@joar](https://github.com/joar) * Snowflake: Fix Alter Warehouse [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) * Parser: Optimise lookahead_match [#4327](https://github.com/sqlfluff/sqlfluff/pull/4327) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add support for dbt test macros [#4319](https://github.com/sqlfluff/sqlfluff/pull/4319) [@pdebelak](https://github.com/pdebelak) * Bracket complex expressions before applying :: operator in Rule L067 [#4326](https://github.com/sqlfluff/sqlfluff/pull/4326) [@pdebelak](https://github.com/pdebelak) ## New Contributors * [@AidanHarveyNelson](https://github.com/AidanHarveyNelson) made their first contribution in [#4344](https://github.com/sqlfluff/sqlfluff/pull/4344) * [@joar](https://github.com/joar) made their first contribution in [#4341](https://github.com/sqlfluff/sqlfluff/pull/4341) * [@jmpfar](https://github.com/jmpfar) made their first contribution in [#4356](https://github.com/sqlfluff/sqlfluff/pull/4356) * [@jared-rimmer](https://github.com/jared-rimmer) made their first contribution in [#4403](https://github.com/sqlfluff/sqlfluff/pull/4403) * [@vesatoivonen](https://github.com/vesatoivonen) made their first contribution in [#4335](https://github.com/sqlfluff/sqlfluff/pull/4335) * [@briankravec](https://github.com/briankravec) made their first contribution in [#4421](https://github.com/sqlfluff/sqlfluff/pull/4421) * [@saintamh](https://github.com/saintamh) made their first contribution in [#4417](https://github.com/sqlfluff/sqlfluff/pull/4417) ## [2.0.0a4] - 2023-01-26 ## Highlights This is the fourth alpha release for 2.0.0. It contains a fix for the renamed dbt exceptions in dbt version 1.4.0, a fix for a major performance issue with the 2.0 dbt templater, and improvements to parse performance of large SQL files. ## What’s Changed * BigQuery: Alter table alter column [#4316](https://github.com/sqlfluff/sqlfluff/pull/4316) [@greg-finley](https://github.com/greg-finley) * Handle renamed dbt exceptions [#4317](https://github.com/sqlfluff/sqlfluff/pull/4317) [@greg-finley](https://github.com/greg-finley) * Parser: Fix early exit for simple matchers [#4305](https://github.com/sqlfluff/sqlfluff/pull/4305) [@WittierDinosaur](https://github.com/WittierDinosaur) * MySQL: Add CREATE DATABASE and ALTER DATABASE [#4307](https://github.com/sqlfluff/sqlfluff/pull/4307) [@yoichi](https://github.com/yoichi) * BigQuery: Add ALTER VIEW [#4306](https://github.com/sqlfluff/sqlfluff/pull/4306) [@yoichi](https://github.com/yoichi) * toml: only install `toml` dependency if < Python 3.11 (otherwise use builtin `tomllib`) [#4303](https://github.com/sqlfluff/sqlfluff/pull/4303) [@kevinmarsh](https://github.com/kevinmarsh) * Fix #4024 example plugin unit tests import [#4302](https://github.com/sqlfluff/sqlfluff/pull/4302) [@matthieucan](https://github.com/matthieucan) ## [2.0.0a3] - 2023-01-16 > NOTE: This is an alpha release for testing purposes. There are several new features > here, and breaking changes to configuration. We welcome testing feedback from the > community, but know that this release may feel less polished than usual. ## Highlights This is the third alpha release for 2.0.0. It contains primarily bugfixes from 2.0.0a2 to allow continued testing. In particular, some of the recent 2.0.0-related changes to the dbt templater have been reverted, primarily due to performance and other issues. If those issues can be resolved, the changes will be re-introduced. The long-term goal of this work is to ease maintenance of the templater by separating dbt integration concerns from SQLFluff concerns. There will likely be more changes to rule classification before a full release of 2.0.0, so anticipate that configuration files may change slightly again in future alpha releases. ## What’s Changed * Move ISSUE from Snowflake reserved keywords to unreserved ones [#4279](https://github.com/sqlfluff/sqlfluff/pull/4279) [@KaoutherElhamdi](https://github.com/KaoutherElhamdi) * Due to performance and other issues, revert the osmosis implementation of the templater for now [#4273](https://github.com/sqlfluff/sqlfluff/pull/4273) [@barrywhart](https://github.com/barrywhart) * Simplify lexing [#4289](https://github.com/sqlfluff/sqlfluff/pull/4289) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4255 (Fix exception on mixed indent description) [#4288](https://github.com/sqlfluff/sqlfluff/pull/4288) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4253 (incorrect trigger of L006 around placeholders) [#4287](https://github.com/sqlfluff/sqlfluff/pull/4287) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4249 (TSQL block comment indents) [#4286](https://github.com/sqlfluff/sqlfluff/pull/4286) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix #4252 (Resolve multiple sensible indents) [#4285](https://github.com/sqlfluff/sqlfluff/pull/4285) [@alanmcruickshank](https://github.com/alanmcruickshank) * Parser Performance: Cache segment string repr to reduce function calls [#4278](https://github.com/sqlfluff/sqlfluff/pull/4278) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: GRANT SUPPORT CASES [#4283](https://github.com/sqlfluff/sqlfluff/pull/4283) [@WittierDinosaur](https://github.com/WittierDinosaur) * Dialect: duckdb [#4284](https://github.com/sqlfluff/sqlfluff/pull/4284) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add variable pattern to CopyIntoTable [#4275](https://github.com/sqlfluff/sqlfluff/pull/4275) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Non-reserved keyword bugfix [#4277](https://github.com/sqlfluff/sqlfluff/pull/4277) [@WittierDinosaur](https://github.com/WittierDinosaur) * Hive: Add Table constraints DISABLE VALIDATE [#4281](https://github.com/sqlfluff/sqlfluff/pull/4281) [@WittierDinosaur](https://github.com/WittierDinosaur) * Snowflake: Add Python and Java UDF support [#4280](https://github.com/sqlfluff/sqlfluff/pull/4280) [@WittierDinosaur](https://github.com/WittierDinosaur) * SparkSQL: Support DIV binary operator [#4282](https://github.com/sqlfluff/sqlfluff/pull/4282) [@WittierDinosaur](https://github.com/WittierDinosaur) * BigQuery: Add ALTER TABLE [#4272](https://github.com/sqlfluff/sqlfluff/pull/4272) [@yoichi](https://github.com/yoichi) * Snowflake: Update bare functions [#4276](https://github.com/sqlfluff/sqlfluff/pull/4276) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve Dockerfile to reduce image size [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) [@tdurieux](https://github.com/tdurieux) ## New Contributors * [@tdurieux](https://github.com/tdurieux) made their first contribution in [#4262](https://github.com/sqlfluff/sqlfluff/pull/4262) ## [2.0.0a2] - 2023-01-07 ## Highlights This is the second alpha release for 2.0.0. It contains primarily bugfixes from 2.0.0a1 to allow continued testing along with dialect improvements for Snowflake, Postgres and DB2. ## What’s Changed * Push indents to after comments [#4239](https://github.com/sqlfluff/sqlfluff/pull/4239) [@alanmcruickshank](https://github.com/alanmcruickshank) * Templated fix improvements and indentation [#4245](https://github.com/sqlfluff/sqlfluff/pull/4245) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix block comment indent fixes #4224 [#4240](https://github.com/sqlfluff/sqlfluff/pull/4240) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix for #4222 [#4236](https://github.com/sqlfluff/sqlfluff/pull/4236) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Allow multiple unpivot [#4242](https://github.com/sqlfluff/sqlfluff/pull/4242) [@greg-finley](https://github.com/greg-finley) * postgres: add row-level locks to SELECT statements [#4209](https://github.com/sqlfluff/sqlfluff/pull/4209) [@Yiwen-Gao](https://github.com/Yiwen-Gao) * Add more parsing logic for db2 [#4206](https://github.com/sqlfluff/sqlfluff/pull/4206) [@NelsonTorres](https://github.com/NelsonTorres) * Include the filename in critical exceptions [#4225](https://github.com/sqlfluff/sqlfluff/pull/4225) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update Readme Badges [#4219](https://github.com/sqlfluff/sqlfluff/pull/4219) [@alanmcruickshank](https://github.com/alanmcruickshank) * diff-quality: Handle the case where there are no files to check [#4220](https://github.com/sqlfluff/sqlfluff/pull/4220) [@barrywhart](https://github.com/barrywhart) ## [2.0.0a1] - 2022-12-28 ## Highlights This is the first alpha version for 2.0.0. It brings all of the changes to whitespace handing, including a total rewrite of indentation and long line logic (L003 & L016). That brings several breaking changes to the configuration of layout, see the [layout docs](https://docs.sqlfluff.com/en/stable/layout.html) for more details and familiarise yourself with the new [default configuration](https://docs.sqlfluff.com/en/stable/configuration.html#default-configuration). In addition, for the dbt templater, this introduces a large re-write of the codebase, dropping support for dbt versions before 1.0.0. This leverages functionality from [dbt-osmosis](https://github.com/z3z1ma/dbt-osmosis) to reduce the amount of functionality supported directly by SQLFluff, and performance during testing of the new version has been reported as significantly faster. ## What’s Changed * Fixed False Positive for L037 [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) [@WillAyd](https://github.com/WillAyd) * Indentation bug [#4217](https://github.com/sqlfluff/sqlfluff/pull/4217) [@alanmcruickshank](https://github.com/alanmcruickshank) * Show fatal errors regardless [#4214](https://github.com/sqlfluff/sqlfluff/pull/4214) [@alanmcruickshank](https://github.com/alanmcruickshank) * Don't consider templated whitespace [#4213](https://github.com/sqlfluff/sqlfluff/pull/4213) [@alanmcruickshank](https://github.com/alanmcruickshank) * Don't pickle the templater [#4208](https://github.com/sqlfluff/sqlfluff/pull/4208) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Support column character set and collation [#4204](https://github.com/sqlfluff/sqlfluff/pull/4204) [@yoichi](https://github.com/yoichi) * Fix some issues with Docker Compose environment [#4201](https://github.com/sqlfluff/sqlfluff/pull/4201) [@barrywhart](https://github.com/barrywhart) * Implicit Indents [#4054](https://github.com/sqlfluff/sqlfluff/pull/4054) [@alanmcruickshank](https://github.com/alanmcruickshank) * Tweak Coveralls settings [#4199](https://github.com/sqlfluff/sqlfluff/pull/4199) [@barrywhart](https://github.com/barrywhart) * In addition to Codecov, also upload to Coveralls [#4197](https://github.com/sqlfluff/sqlfluff/pull/4197) [@barrywhart](https://github.com/barrywhart) * Fix: create table default cast returns unparsable section [#4192](https://github.com/sqlfluff/sqlfluff/pull/4192) [@NelsonTorres](https://github.com/NelsonTorres) * Fix JSON parsing issue with diff-quality plugin [#4190](https://github.com/sqlfluff/sqlfluff/pull/4190) [@barrywhart](https://github.com/barrywhart) * Codecov migration [#4195](https://github.com/sqlfluff/sqlfluff/pull/4195) [@alanmcruickshank](https://github.com/alanmcruickshank) * Stop adding trailing os.sep if ignore file is on the root of the file… [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) [@baa-ableton](https://github.com/baa-ableton) * Port dbt-osmosis templater changes to SQLFluff [#3976](https://github.com/sqlfluff/sqlfluff/pull/3976) [@barrywhart](https://github.com/barrywhart) * Reflow 4: Long Lines [#4067](https://github.com/sqlfluff/sqlfluff/pull/4067) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix comment bug on reindent [#4179](https://github.com/sqlfluff/sqlfluff/pull/4179) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reflow 3: Reindent [#3942](https://github.com/sqlfluff/sqlfluff/pull/3942) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@baa-ableton](https://github.com/baa-ableton) made their first contribution in [#4182](https://github.com/sqlfluff/sqlfluff/pull/4182) * [@WillAyd](https://github.com/WillAyd) made their first contribution in [#4198](https://github.com/sqlfluff/sqlfluff/pull/4198) ## [1.4.5] - 2022-12-18 ## Highlights This is a bugfix release, primarily for diff-quality. In addition, a new rules for spacing around parenthesis is also included. This is also the final 1.x.x release. Following releases will be a series of alpha releases for 2.x.x. If you affected by any outstanding bugs or regressions from this release, consider either rolling _backward_ to a previous release without those issues, or failing _forward_ to on an alpha release for 2.x.x (or a full release if that's out). Note that 2.x.x will bring a selection of breaking changes to config file structure, rule categorisation and dbt support. ## What’s Changed * Add rule for space around parenthesis [#4131](https://github.com/sqlfluff/sqlfluff/pull/4131) [@NelsonTorres](https://github.com/NelsonTorres) * diff-quality plugin: Print invalid JSON on parse failure [#4176](https://github.com/sqlfluff/sqlfluff/pull/4176) [@barrywhart](https://github.com/barrywhart) * Ensure diff-quality runs the correct SQLFluff [#4175](https://github.com/sqlfluff/sqlfluff/pull/4175) [@barrywhart](https://github.com/barrywhart) ## New Contributors * [@NelsonTorres](https://github.com/NelsonTorres) made their first contribution in [#4131](https://github.com/sqlfluff/sqlfluff/pull/4131) ## [1.4.4] - 2022-12-14 ## Highlights Bug fix for 1.4.3 which was incorrectly flagging L006 for concat operators (`||`) and other two-symbol binary operators. ## What’s Changed * Recognise || as an operator to avoid rule L006 flagging it [#4168](https://github.com/sqlfluff/sqlfluff/pull/4168) [@tunetheweb](https://github.com/tunetheweb) * :bug: Check verbosity level of pytest run before running certain tests [#4167](https://github.com/sqlfluff/sqlfluff/pull/4167) [@pwildenhain](https://github.com/pwildenhain) * [snowflake] Add support for snowflake select * exclude/replace syntax [#4160](https://github.com/sqlfluff/sqlfluff/pull/4160) [@moreaupascal56](https://github.com/moreaupascal56) ## [1.4.3] - 2022-12-13 ## Highlights * Rewrote `diff-quality` plugin to run SQLFluff as a subprocess. More reliable, easier to switch between `diff-quality` and running `sqlfluff lint` directly. * New rule L067 enforces consistent syntax for type casts. * New rule L068 enforces a consistent number of columns in set queries (e.g. UNION). * Initial support for Materialize dialect. ## What's Changed * Add flyway variables support via placeholder templater [#4026](https://github.com/sqlfluff/sqlfluff/pull/4026) [@srjonemed](https://github.com/srjonemed) * Fix Spark comparison parsing [#4066](https://github.com/sqlfluff/sqlfluff/pull/4066) [@ms32035](https://github.com/ms32035) * Add errors and fails to pytest summary [#4076](https://github.com/sqlfluff/sqlfluff/pull/4076) [@alanmcruickshank](https://github.com/alanmcruickshank) * Storage reference segment [#4057](https://github.com/sqlfluff/sqlfluff/pull/4057) [@YilangHe](https://github.com/YilangHe) * New rule L069: Consistent syntax for sql type casting [#3747](https://github.com/sqlfluff/sqlfluff/pull/3747) [@bolajiwahab](https://github.com/bolajiwahab) * Postgres: Views and named notations [#4073](https://github.com/sqlfluff/sqlfluff/pull/4073) [@davetapley](https://github.com/davetapley) * Switch reflow buffer from LintFix to LintResult [#4083](https://github.com/sqlfluff/sqlfluff/pull/4083) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support parallel linting when many individual files specified [#4084](https://github.com/sqlfluff/sqlfluff/pull/4084) [@barrywhart](https://github.com/barrywhart) * Rule: check number of columns in set operations match [#4028](https://github.com/sqlfluff/sqlfluff/pull/4028) [@erevear](https://github.com/erevear) * Fix syntax in indentation sample code docs [#4087](https://github.com/sqlfluff/sqlfluff/pull/4087) [@PBalsdon](https://github.com/PBalsdon) * Remove "mystery" binary file added in PR #2923 [#4088](https://github.com/sqlfluff/sqlfluff/pull/4088) [@barrywhart](https://github.com/barrywhart) * Fix mypy issue with regex [#4097](https://github.com/sqlfluff/sqlfluff/pull/4097) [@barrywhart](https://github.com/barrywhart) * Enable variable reference names to have leading underscore for snowflake dialect [#4098](https://github.com/sqlfluff/sqlfluff/pull/4098) [@gavin-tsang](https://github.com/gavin-tsang) * Fix flake8 issue with segment_predicates.py [#4101](https://github.com/sqlfluff/sqlfluff/pull/4101) [@barrywhart](https://github.com/barrywhart) * Fix bug in example rule plugin [#4103](https://github.com/sqlfluff/sqlfluff/pull/4103) [@barrywhart](https://github.com/barrywhart) * Fix bug where L034 should ignore INSERT or "CREATE TABLE AS SELECT" with CTE [#4108](https://github.com/sqlfluff/sqlfluff/pull/4108) [@barrywhart](https://github.com/barrywhart) * Postgres: Alter type rename value [#4100](https://github.com/sqlfluff/sqlfluff/pull/4100) [@greg-finley](https://github.com/greg-finley) * Bug fix: dbt templater ignores .sqlfluff file encoding on Windows [#4109](https://github.com/sqlfluff/sqlfluff/pull/4109) [@barrywhart](https://github.com/barrywhart) * Add initial Materialize dialect [#4112](https://github.com/sqlfluff/sqlfluff/pull/4112) [@bobbyiliev](https://github.com/bobbyiliev) * L015: Handle COUNT(DISTINCT(col)) [#4110](https://github.com/sqlfluff/sqlfluff/pull/4110) [@barrywhart](https://github.com/barrywhart) * [Snowflake] format type options extensions for copy_into_location [#4129](https://github.com/sqlfluff/sqlfluff/pull/4129) [@YilangHe](https://github.com/YilangHe) * Fix tox arguments [#4144](https://github.com/sqlfluff/sqlfluff/pull/4144) [@greg-finley](https://github.com/greg-finley) * [DB2] Fix parsing of string identifiers [#4134](https://github.com/sqlfluff/sqlfluff/pull/4134) [@borchero](https://github.com/borchero) * BigQuery: Allow double quoted literal in export_option_list [#4126](https://github.com/sqlfluff/sqlfluff/pull/4126) [@yoichi](https://github.com/yoichi) * Only upload 3 sets of test results to codecov (possible workaround for hanging builds) [#4147](https://github.com/sqlfluff/sqlfluff/pull/4147) [@barrywhart](https://github.com/barrywhart) * SparkSQL: ILIKE [#4138](https://github.com/sqlfluff/sqlfluff/pull/4138) [@greg-finley](https://github.com/greg-finley) * SparkSQL: Mark `AS` as optional keyword for CTE & CTS [#4127](https://github.com/sqlfluff/sqlfluff/pull/4127) [@ulixius9](https://github.com/ulixius9) * Fix passenv to work with tox 4 [#4154](https://github.com/sqlfluff/sqlfluff/pull/4154) [@tunetheweb](https://github.com/tunetheweb) * Allow deprecated --disable_progress_bar flag for fix command [#4151](https://github.com/sqlfluff/sqlfluff/pull/4151) [@pdebelak](https://github.com/pdebelak) * Implement diff_quality_plugin using command-line rather than Python [#4148](https://github.com/sqlfluff/sqlfluff/pull/4148) [@barrywhart](https://github.com/barrywhart) * L037: insert ASC just after column_reference [#4149](https://github.com/sqlfluff/sqlfluff/pull/4149) [@yoichi](https://github.com/yoichi) ## New Contributors * [@srjonemed](https://github.com/srjonemed) made their first contribution in [#4026](https://github.com/sqlfluff/sqlfluff/pull/4026) * [@ms32035](https://github.com/ms32035) made their first contribution in [#4066](https://github.com/sqlfluff/sqlfluff/pull/4066) * [@davetapley](https://github.com/davetapley) made their first contribution in [#4073](https://github.com/sqlfluff/sqlfluff/pull/4073) * [@PBalsdon](https://github.com/PBalsdon) made their first contribution in [#4087](https://github.com/sqlfluff/sqlfluff/pull/4087) * [@gavin-tsang](https://github.com/gavin-tsang) made their first contribution in [#4098](https://github.com/sqlfluff/sqlfluff/pull/4098) * [@bobbyiliev](https://github.com/bobbyiliev) made their first contribution in [#4112](https://github.com/sqlfluff/sqlfluff/pull/4112) * [@ulixius9](https://github.com/ulixius9) made their first contribution in [#4127](https://github.com/sqlfluff/sqlfluff/pull/4127) ## [1.4.2] - 2022-11-13 ## Highlights This release is less about internals and much more about some quality of life improvements and dialect changes. The most notable are: - The introduction of a `sqlfluff render` command to preview the results of templated sql. - Linting errors within templated loops should now only appear once in the linting output. - Indentation around jinja `{% set %}` statements should now be more consistent. - Linting errors around unparsable code are now more appropriately handled (with more to come soon on that front). - Error messages when specified files aren't found are now more specific. We've also got dialect improvements for Redshift, SOQL & SparkSQL. ## What’s Changed * Fix type error in `get_rules` hook of plugin example [#4060](https://github.com/sqlfluff/sqlfluff/pull/4060) [@Samyak2](https://github.com/Samyak2) * L003: Add missing "pragma: no cover" [#4058](https://github.com/sqlfluff/sqlfluff/pull/4058) [@barrywhart](https://github.com/barrywhart) * Fix bug in sparksql SELECT statement termination at UNION #4050 [#4052](https://github.com/sqlfluff/sqlfluff/pull/4052) [@anna-azizian](https://github.com/anna-azizian) * Deduplicate violations in the source space [#4041](https://github.com/sqlfluff/sqlfluff/pull/4041) [@alanmcruickshank](https://github.com/alanmcruickshank) * Use "docker compose", not "docker-compose" [#4055](https://github.com/sqlfluff/sqlfluff/pull/4055) [@barrywhart](https://github.com/barrywhart) * Allow warnings for specific rules [#4053](https://github.com/sqlfluff/sqlfluff/pull/4053) [@alanmcruickshank](https://github.com/alanmcruickshank) * Better file not found error #1023 [#4051](https://github.com/sqlfluff/sqlfluff/pull/4051) [@alanmcruickshank](https://github.com/alanmcruickshank) * Filter out issues in unparsable sections [#4032](https://github.com/sqlfluff/sqlfluff/pull/4032) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: ADD and DROP without COLUMN [#4049](https://github.com/sqlfluff/sqlfluff/pull/4049) [@greg-finley](https://github.com/greg-finley) * Make render command [#4043](https://github.com/sqlfluff/sqlfluff/pull/4043) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bump after_n_builds to 10 [#4046](https://github.com/sqlfluff/sqlfluff/pull/4046) [@greg-finley](https://github.com/greg-finley) * Redshift: allows for parenthesis around FROM content [#3962](https://github.com/sqlfluff/sqlfluff/pull/3962) [@adam-tokarski](https://github.com/adam-tokarski) * Update CI to use Python 3.11 [#4038](https://github.com/sqlfluff/sqlfluff/pull/4038) [@greg-finley](https://github.com/greg-finley) * Classify self contained set statements as templated [#4034](https://github.com/sqlfluff/sqlfluff/pull/4034) [@alanmcruickshank](https://github.com/alanmcruickshank) * Date and Datetime literals in SOQL [#4037](https://github.com/sqlfluff/sqlfluff/pull/4037) [@alanmcruickshank](https://github.com/alanmcruickshank) * mypy edits for 0.990 [#4035](https://github.com/sqlfluff/sqlfluff/pull/4035) [@alanmcruickshank](https://github.com/alanmcruickshank) * sparksql: support for create/remove widget clause [#4021](https://github.com/sqlfluff/sqlfluff/pull/4021) [@Coola4kov](https://github.com/Coola4kov) * Redshift CREATE EXTERNAL FUNCTION statement [#4011](https://github.com/sqlfluff/sqlfluff/pull/4011) [@rpr-ableton](https://github.com/rpr-ableton) * Update Redshift bare functions [#4012](https://github.com/sqlfluff/sqlfluff/pull/4012) [@rpr-ableton](https://github.com/rpr-ableton) ## New Contributors * [@Coola4kov](https://github.com/Coola4kov) made their first contribution in [#4021](https://github.com/sqlfluff/sqlfluff/pull/4021) * [@anna-azizian](https://github.com/anna-azizian) made their first contribution in [#4052](https://github.com/sqlfluff/sqlfluff/pull/4052) ## [1.4.1] - 2022-10-31 ## Highlights This is a fix to the configuration migration from 1.4.0. In that release, the configuration of leading/trailing operators would be migrated the wrong way around and precedence between new and old configuration values behaved unexpectedly. ## What’s Changed * Config precedence [#4007](https://github.com/sqlfluff/sqlfluff/pull/4007) [@alanmcruickshank](https://github.com/alanmcruickshank) * Redshift CREATE/ATTACH/DETACH/DROP RLS POLICY statements [#4004](https://github.com/sqlfluff/sqlfluff/pull/4004) [@rpr-ableton](https://github.com/rpr-ableton) * Redshift: Add support for APPROXIMATE functions [#3997](https://github.com/sqlfluff/sqlfluff/pull/3997) [@rpr-ableton](https://github.com/rpr-ableton) * hotfix to config migration [#4005](https://github.com/sqlfluff/sqlfluff/pull/4005) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [1.4.0] - 2022-10-31 ## Highlights This release brings several internal changes, and acts as a prelude to 2.0.0 which will be released fairly soon. In particular, the following config values have changed: - `sqlfluff:rules:L007:operator_new_lines` has been changed to `sqlfluff:layout:type:binary_operator:line_position`. - `sqlfluff:rules:comma_style` and `sqlfluff:rules:L019:comma_style` have both been consolidated into `sqlfluff:layout:type:comma:line_position`. If any of these values have been set in your config, they will be automatically translated to the new values at runtime, and a warning will be shown. To silence the warning, update your config file to the new values. For more details on configuring layout (including some changes yet to come in future versions) see [the docs](https://docs.sqlfluff.com/en/latest/layout.html#configuring-layout). These changes are driven by underlying centralisation in the routines which control layout. While for this release, no breaking changes are expected - you may find slight differences in how SQLFluff handles edge cases. We believe in the majority of cases these are _more_ consistent, but if you find any which are problematic then do post an issue on GitHub as usual. Other highlights from this release: - Better dbt supportfor graph nodes and avoiding dependency conflicts. - Numerous dialect improvements to T-SQL, MySQL, SparkSQL, SQLite, Athena Snowflake, Hive, Postgres & Databricks. There have also been first time contributions from **10 new contributors**! 🎉🎉🎉 ## What’s Changed * Snowflake partition nonreserved keyword [#3972](https://github.com/sqlfluff/sqlfluff/pull/3972) [@YilangHe](https://github.com/YilangHe) * Hive: Add support for EXCHANGE PARTITION in ALTER TABLE [#3991](https://github.com/sqlfluff/sqlfluff/pull/3991) [@nahuelverdugo](https://github.com/nahuelverdugo) * Resolve parse error on multiple bracketed statements [#3994](https://github.com/sqlfluff/sqlfluff/pull/3994) [@yoichi](https://github.com/yoichi) * Enable parsing of CLONE keyword in bigquery dialect [#3984](https://github.com/sqlfluff/sqlfluff/pull/3984) [@realLyans](https://github.com/realLyans) * BigQuery: allow nesting of SetExpressionSegment [#3990](https://github.com/sqlfluff/sqlfluff/pull/3990) [@yoichi](https://github.com/yoichi) * feat(clickhouse): Support non-standard CREATE TABLE statement [#3986](https://github.com/sqlfluff/sqlfluff/pull/3986) [@tomasfarias](https://github.com/tomasfarias) * Fix Windows CI check [#3992](https://github.com/sqlfluff/sqlfluff/pull/3992) [@greg-finley](https://github.com/greg-finley) * Snowflake tag reference segment [#3985](https://github.com/sqlfluff/sqlfluff/pull/3985) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix Jinja templater issue where undefined callable threw an exception [#3982](https://github.com/sqlfluff/sqlfluff/pull/3982) [@barrywhart](https://github.com/barrywhart) * Reflow Documentation V1 [#3970](https://github.com/sqlfluff/sqlfluff/pull/3970) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow lambda argument columns to be unqualified [#3971](https://github.com/sqlfluff/sqlfluff/pull/3971) [@olagjo](https://github.com/olagjo) * document inline configuration [#3981](https://github.com/sqlfluff/sqlfluff/pull/3981) [@alanmcruickshank](https://github.com/alanmcruickshank) * [BUGFIX] Changing cwd temporarily on manifest load as dbt is not using project_dir to read/write target folder [#3979](https://github.com/sqlfluff/sqlfluff/pull/3979) [@barrywhart](https://github.com/barrywhart) * Fix type annotation of user_rules in `Linter` [#3977](https://github.com/sqlfluff/sqlfluff/pull/3977) [@Samyak2](https://github.com/Samyak2) * Unpin `markupsafe` [#3967](https://github.com/sqlfluff/sqlfluff/pull/3967) [@judahrand](https://github.com/judahrand) * Snowflake frame clause variables [#3969](https://github.com/sqlfluff/sqlfluff/pull/3969) [@WittierDinosaur](https://github.com/WittierDinosaur) * SparkSQL: added support for : (colon sign) operator (Databricks SQL) [#3956](https://github.com/sqlfluff/sqlfluff/pull/3956) [@karabulute](https://github.com/karabulute) * Athena: Add support for using underscore aliases [#3965](https://github.com/sqlfluff/sqlfluff/pull/3965) [@hectcastro](https://github.com/hectcastro) * Snowflake: ALTER TABLE constraint actions [#3959](https://github.com/sqlfluff/sqlfluff/pull/3959) [@erevear](https://github.com/erevear) * MySQL: Support REPLACE statement [#3964](https://github.com/sqlfluff/sqlfluff/pull/3964) [@yoichi](https://github.com/yoichi) * TSQL: Add support for UPDATE STATISTICS option FULLSCAN [#3950](https://github.com/sqlfluff/sqlfluff/pull/3950) [@hectcastro](https://github.com/hectcastro) * ANSI: fixed typos in docstrings and comments [#3953](https://github.com/sqlfluff/sqlfluff/pull/3953) [@karabulute](https://github.com/karabulute) * Postgres: ALTER PROCEDURE [#3949](https://github.com/sqlfluff/sqlfluff/pull/3949) [@krokofant](https://github.com/krokofant) * T-SQL: Allow arbitrary expressions in PARTITION BY clause [#3939](https://github.com/sqlfluff/sqlfluff/pull/3939) [@borchero](https://github.com/borchero) * Enable dumping of performance information to csv. [#3937](https://github.com/sqlfluff/sqlfluff/pull/3937) [@alanmcruickshank](https://github.com/alanmcruickshank) * Consolidate comma style configs [#3945](https://github.com/sqlfluff/sqlfluff/pull/3945) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adding missing KeywordSegments for different file types in Athena dialect [#3898](https://github.com/sqlfluff/sqlfluff/pull/3898) [@CommonCrisis](https://github.com/CommonCrisis) * Add templated block uuids [#3936](https://github.com/sqlfluff/sqlfluff/pull/3936) [@alanmcruickshank](https://github.com/alanmcruickshank) * Load the full dbt manifest [#3926](https://github.com/sqlfluff/sqlfluff/pull/3926) [@davajm](https://github.com/davajm) * MySQL: Support optional "IF NOT EXISTS" with CREATE TRIGGER [#3943](https://github.com/sqlfluff/sqlfluff/pull/3943) [@yoichi](https://github.com/yoichi) * T-SQL: Allow to parse SYNONYM statements [#3941](https://github.com/sqlfluff/sqlfluff/pull/3941) [@borchero](https://github.com/borchero) * Hive: Add support for LATERAL VIEW clause [#3935](https://github.com/sqlfluff/sqlfluff/pull/3935) [@hectcastro](https://github.com/hectcastro) * Fix crash in L042 on "UNION" or other "set" queries [#3931](https://github.com/sqlfluff/sqlfluff/pull/3931) [@barrywhart](https://github.com/barrywhart) * Refactor Lexer: Split apart elements_to_segments and refine placement of meta segments. [#3925](https://github.com/sqlfluff/sqlfluff/pull/3925) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add INSERT INTO <> DEFAULT VALUES to ANSI SQL and T-SQL [#3934](https://github.com/sqlfluff/sqlfluff/pull/3934) [@borchero](https://github.com/borchero) * Break apart reflow classes and extract methods [#3919](https://github.com/sqlfluff/sqlfluff/pull/3919) [@alanmcruickshank](https://github.com/alanmcruickshank) * T-SQL: Fix indendentation of OUTER APPLY [#3932](https://github.com/sqlfluff/sqlfluff/pull/3932) [@borchero](https://github.com/borchero) * MySQL: Fix create trigger [#3928](https://github.com/sqlfluff/sqlfluff/pull/3928) [@adam-tokarski](https://github.com/adam-tokarski) * SparkSQL: Fixed bug with `QUALIFY` usage without `WHERE` clause (applies also for Databricks dialect) [#3930](https://github.com/sqlfluff/sqlfluff/pull/3930) [@karabulute](https://github.com/karabulute) * T-SQL: Allow specifying join hints [#3921](https://github.com/sqlfluff/sqlfluff/pull/3921) [@borchero](https://github.com/borchero) * SQLite: Add support for table-level CHECK constraint [#3923](https://github.com/sqlfluff/sqlfluff/pull/3923) [@hectcastro](https://github.com/hectcastro) * sparksql: added * EXCEPT for SELECT clause [#3922](https://github.com/sqlfluff/sqlfluff/pull/3922) [@adam-tokarski](https://github.com/adam-tokarski) * Map old configs to new configs [#3915](https://github.com/sqlfluff/sqlfluff/pull/3915) [@alanmcruickshank](https://github.com/alanmcruickshank) * [issue_3794] allow to use 'usage' as identifier for postgres [#3914](https://github.com/sqlfluff/sqlfluff/pull/3914) [@adam-tokarski](https://github.com/adam-tokarski) * `DATABRICKS`: Add Support for Delta Live Tables (DLT) Syntax [#3899](https://github.com/sqlfluff/sqlfluff/pull/3899) [@R7L208](https://github.com/R7L208) * Postgres Revoke fix [#3912](https://github.com/sqlfluff/sqlfluff/pull/3912) [@greg-finley](https://github.com/greg-finley) * fix: Click output to stderr on errors [#3902](https://github.com/sqlfluff/sqlfluff/pull/3902) [@KingMichaelPark](https://github.com/KingMichaelPark) * fix issue with empty enum for postgres [#3910](https://github.com/sqlfluff/sqlfluff/pull/3910) [@adam-tokarski](https://github.com/adam-tokarski) * feat: Optional numerics for postgres arrays [#3903](https://github.com/sqlfluff/sqlfluff/pull/3903) [@KingMichaelPark](https://github.com/KingMichaelPark) * fix(test): Return ParseExample namedtuple in get_parse_fixtures [#3911](https://github.com/sqlfluff/sqlfluff/pull/3911) [@tomasfarias](https://github.com/tomasfarias) * Fix typos [#3901](https://github.com/sqlfluff/sqlfluff/pull/3901) [@kianmeng](https://github.com/kianmeng) * provide custom DeprecatedOption [#3904](https://github.com/sqlfluff/sqlfluff/pull/3904) [@adam-tokarski](https://github.com/adam-tokarski) * fix(redshift): Allow keywords in qualified references [#3905](https://github.com/sqlfluff/sqlfluff/pull/3905) [@tomasfarias](https://github.com/tomasfarias) * Reflow centralisation 2: Rebreak (operators & commas) [#3847](https://github.com/sqlfluff/sqlfluff/pull/3847) [@alanmcruickshank](https://github.com/alanmcruickshank) * Bring L008 into reflow work [#3908](https://github.com/sqlfluff/sqlfluff/pull/3908) [@alanmcruickshank](https://github.com/alanmcruickshank) * Snowflake: Create network policy ip lists [#3888](https://github.com/sqlfluff/sqlfluff/pull/3888) [@greg-finley](https://github.com/greg-finley) * MySQL: Implement (key_part, ...) in index definitions [#3887](https://github.com/sqlfluff/sqlfluff/pull/3887) [@yoichi](https://github.com/yoichi) * Reflow centralisation 1: Scope + Respace [#3824](https://github.com/sqlfluff/sqlfluff/pull/3824) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update github badge and add docker badge [#3884](https://github.com/sqlfluff/sqlfluff/pull/3884) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@kianmeng](https://github.com/kianmeng) made their first contribution in [#3901](https://github.com/sqlfluff/sqlfluff/pull/3901) * [@KingMichaelPark](https://github.com/KingMichaelPark) made their first contribution in [#3903](https://github.com/sqlfluff/sqlfluff/pull/3903) * [@hectcastro](https://github.com/hectcastro) made their first contribution in [#3923](https://github.com/sqlfluff/sqlfluff/pull/3923) * [@karabulute](https://github.com/karabulute) made their first contribution in [#3930](https://github.com/sqlfluff/sqlfluff/pull/3930) * [@davajm](https://github.com/davajm) made their first contribution in [#3926](https://github.com/sqlfluff/sqlfluff/pull/3926) * [@CommonCrisis](https://github.com/CommonCrisis) made their first contribution in [#3898](https://github.com/sqlfluff/sqlfluff/pull/3898) * [@krokofant](https://github.com/krokofant) made their first contribution in [#3949](https://github.com/sqlfluff/sqlfluff/pull/3949) * [@Samyak2](https://github.com/Samyak2) made their first contribution in [#3977](https://github.com/sqlfluff/sqlfluff/pull/3977) * [@realLyans](https://github.com/realLyans) made their first contribution in [#3984](https://github.com/sqlfluff/sqlfluff/pull/3984) * [@nahuelverdugo](https://github.com/nahuelverdugo) made their first contribution in [#3991](https://github.com/sqlfluff/sqlfluff/pull/3991) * [@YilangHe](https://github.com/YilangHe) made their first contribution in [#3972](https://github.com/sqlfluff/sqlfluff/pull/3972) ## [1.3.2] - 2022-09-27 ## Highlights This is primarily a release for dialect fixes and improvements with additions and changes to TSQL, Snowflake, MySQL & Redshift. The other changes of note are: 1. Support for warnings when users set old removed config values. This supports future change work by allowing a mechanism to warn if they are used. 2. Improvements to the fix routines for L014 and L042 to handle some trickier cases. ## What’s Changed * Add CreateUserSegment for Snowflake dialect [#3880](https://github.com/sqlfluff/sqlfluff/pull/3880) [@Gal40n04ek](https://github.com/Gal40n04ek) * raw_segments_with_ancestors [#3878](https://github.com/sqlfluff/sqlfluff/pull/3878) [@alanmcruickshank](https://github.com/alanmcruickshank) * Adjust TSQL Operators [#3877](https://github.com/sqlfluff/sqlfluff/pull/3877) [@alanmcruickshank](https://github.com/alanmcruickshank) * Refactor path_to [#3875](https://github.com/sqlfluff/sqlfluff/pull/3875) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support Removed warning on configs [#3874](https://github.com/sqlfluff/sqlfluff/pull/3874) [@alanmcruickshank](https://github.com/alanmcruickshank) * MySQL: Support column-path operator for JSON type [#3864](https://github.com/sqlfluff/sqlfluff/pull/3864) [@yoichi](https://github.com/yoichi) * T-SQL: ALTER FUNCTION/PROCEDURE/VIEW parsing [#3867](https://github.com/sqlfluff/sqlfluff/pull/3867) [@differgroup](https://github.com/differgroup) * MySQL: Support hexadecimal literals and bit value literals [#3869](https://github.com/sqlfluff/sqlfluff/pull/3869) [@yoichi](https://github.com/yoichi) * MySQL: Treat double quotes the same as single quotes [#3871](https://github.com/sqlfluff/sqlfluff/pull/3871) [@yoichi](https://github.com/yoichi) * Snowflake: COMMIT WORK [#3862](https://github.com/sqlfluff/sqlfluff/pull/3862) [@rglbr](https://github.com/rglbr) * Snowflake: AlterShareStatementSegment and CreateDatabaseFromShareStatementSegment [#3858](https://github.com/sqlfluff/sqlfluff/pull/3858) [@moreaupascal56](https://github.com/moreaupascal56) * MySQL: Add CREATE/ALTER VIEW [#3859](https://github.com/sqlfluff/sqlfluff/pull/3859) [@wfelipew](https://github.com/wfelipew) * Redshift: CREATE TABLE LIKE enhancements [#3853](https://github.com/sqlfluff/sqlfluff/pull/3853) [@greg-finley](https://github.com/greg-finley) * L014 leading underscore capitalization inference fix [#3841](https://github.com/sqlfluff/sqlfluff/pull/3841) [@j-svensmark](https://github.com/j-svensmark) * MySQL: Add extended DROP TRIGGER functionality [#3846](https://github.com/sqlfluff/sqlfluff/pull/3846) [@yoichi](https://github.com/yoichi) * Allow standalone aliases in L027 [#3848](https://github.com/sqlfluff/sqlfluff/pull/3848) [@olagjo](https://github.com/olagjo) * L042: Enable autofix for some tricky cases [#3700](https://github.com/sqlfluff/sqlfluff/pull/3700) [@barrywhart](https://github.com/barrywhart) * Snowflake: CREATE FUNCTION IF NOT EXISTS [#3845](https://github.com/sqlfluff/sqlfluff/pull/3845) [@greg-finley](https://github.com/greg-finley) * ignore functions with more than one element ... [#3792](https://github.com/sqlfluff/sqlfluff/pull/3792) [@fmms](https://github.com/fmms) * MySQL: support remaining constraint parts of CREATE/ALTER TABLE [#3827](https://github.com/sqlfluff/sqlfluff/pull/3827) [@yoichi](https://github.com/yoichi) ## New Contributors * [@olagjo](https://github.com/olagjo) made their first contribution in [#3848](https://github.com/sqlfluff/sqlfluff/pull/3848) * [@j-svensmark](https://github.com/j-svensmark) made their first contribution in [#3841](https://github.com/sqlfluff/sqlfluff/pull/3841) * [@wfelipew](https://github.com/wfelipew) made their first contribution in [#3859](https://github.com/sqlfluff/sqlfluff/pull/3859) * [@moreaupascal56](https://github.com/moreaupascal56) made their first contribution in [#3858](https://github.com/sqlfluff/sqlfluff/pull/3858) * [@rglbr](https://github.com/rglbr) made their first contribution in [#3862](https://github.com/sqlfluff/sqlfluff/pull/3862) * [@differgroup](https://github.com/differgroup) made their first contribution in [#3867](https://github.com/sqlfluff/sqlfluff/pull/3867) ## [1.3.1] - 2022-09-09 ## Highlights * More refactoring of parse structures in preparation for upcoming refactor of formatting/whitespace rules. * Fixes some bugs in L003 (indentation). * New config flag `large_file_skip_byte_limit` which applies **prior to** loading the file. ## What’s Changed * Snowflake: Fix syntax errors in tests [#3834](https://github.com/sqlfluff/sqlfluff/pull/3834) [@JamesRTaylor](https://github.com/JamesRTaylor) * Add support for additional magic methods on DummyUndefined [#3835](https://github.com/sqlfluff/sqlfluff/pull/3835) [@barrywhart](https://github.com/barrywhart) * MySQL: support variable assignments by assignment operator := [#3829](https://github.com/sqlfluff/sqlfluff/pull/3829) [@yoichi](https://github.com/yoichi) * MYSQL: improve lexing for single-quoted strings [#3831](https://github.com/sqlfluff/sqlfluff/pull/3831) [@mdahlman](https://github.com/mdahlman) * MySQL: More support for index definition in CREATE TABLE [#3826](https://github.com/sqlfluff/sqlfluff/pull/3826) [@yoichi](https://github.com/yoichi) * Typed matching and ripping out the rest of .name [#3819](https://github.com/sqlfluff/sqlfluff/pull/3819) [@alanmcruickshank](https://github.com/alanmcruickshank) * sparksql dialect to support lambda expressions (->) [#3821](https://github.com/sqlfluff/sqlfluff/pull/3821) [@juhoautio](https://github.com/juhoautio) * Fixed broken main branch [#3825](https://github.com/sqlfluff/sqlfluff/pull/3825) [@alanmcruickshank](https://github.com/alanmcruickshank) * Enable file name logging for multi-files w/ --show-lint-violations flag [#3788](https://github.com/sqlfluff/sqlfluff/pull/3788) [@thechopkins](https://github.com/thechopkins) * Take database and schema out of Snowflake reserved keywords list [#3818](https://github.com/sqlfluff/sqlfluff/pull/3818) [@NiallRees](https://github.com/NiallRees) * Remove a chunk of name references [#3814](https://github.com/sqlfluff/sqlfluff/pull/3814) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix typo in Snowflake dialect [#3813](https://github.com/sqlfluff/sqlfluff/pull/3813) [@Gal40n04ek](https://github.com/Gal40n04ek) * Allow the use of libraries in macro definitions [#3803](https://github.com/sqlfluff/sqlfluff/pull/3803) [@bjgbeelen](https://github.com/bjgbeelen) * Indentation fixes and rule logging improvements [#3808](https://github.com/sqlfluff/sqlfluff/pull/3808) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fixes a recursion error in JinjaTemplater handling of undefined values [#3809](https://github.com/sqlfluff/sqlfluff/pull/3809) [@barrywhart](https://github.com/barrywhart) * Snowflake: extend `GRANT` syntax [#3807](https://github.com/sqlfluff/sqlfluff/pull/3807) [@Gal40n04ek](https://github.com/Gal40n04ek) * add warehouse_type in snowflake dialect [#3805](https://github.com/sqlfluff/sqlfluff/pull/3805) [@Gal40n04ek](https://github.com/Gal40n04ek) * add Create Notification Integration syntax [#3801](https://github.com/sqlfluff/sqlfluff/pull/3801) [@Gal40n04ek](https://github.com/Gal40n04ek) * T-SQL: fix parsing PARTITION BY NULL in window function [#3790](https://github.com/sqlfluff/sqlfluff/pull/3790) [@fmms](https://github.com/fmms) * SparkSQL: Update L014 rule to not flag Delta Change Data Feed Session & Table Property [#3689](https://github.com/sqlfluff/sqlfluff/pull/3689) [@R7L208](https://github.com/R7L208) * Snowflake: OVER (ORDER BY) clause required for first_value (fixes #3797) [#3798](https://github.com/sqlfluff/sqlfluff/pull/3798) [@JamesRTaylor](https://github.com/JamesRTaylor) * add Alter Pipe syntax for snowflake dialect [#3796](https://github.com/sqlfluff/sqlfluff/pull/3796) [@Gal40n04ek](https://github.com/Gal40n04ek) * BigQuery: Parse WEEK() in date_part [#3787](https://github.com/sqlfluff/sqlfluff/pull/3787) [@yoichi](https://github.com/yoichi) * Postgres: Support setting user properties using intrinsic ON & OFF values [#3793](https://github.com/sqlfluff/sqlfluff/pull/3793) [@chris-codaio](https://github.com/chris-codaio) * extend SF dialect for File Format statements [#3774](https://github.com/sqlfluff/sqlfluff/pull/3774) [@Gal40n04ek](https://github.com/Gal40n04ek) * Add QUALIFY to SparkSQL dialect [#3778](https://github.com/sqlfluff/sqlfluff/pull/3778) [@ThijsKoot](https://github.com/ThijsKoot) * fix regex for S3Path [#3782](https://github.com/sqlfluff/sqlfluff/pull/3782) [@Gal40n04ek](https://github.com/Gal40n04ek) * Snowflake: add Optional parameter ERROR INTEGRATION for PIPE [#3785](https://github.com/sqlfluff/sqlfluff/pull/3785) [@Gal40n04ek](https://github.com/Gal40n04ek) * Add a file size check in bytes [#3770](https://github.com/sqlfluff/sqlfluff/pull/3770) [@alanmcruickshank](https://github.com/alanmcruickshank) * Require importlib_metadata >=1.0.0 [#3769](https://github.com/sqlfluff/sqlfluff/pull/3769) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@Gal40n04ek](https://github.com/Gal40n04ek) made their first contribution in [#3785](https://github.com/sqlfluff/sqlfluff/pull/3785) * [@ThijsKoot](https://github.com/ThijsKoot) made their first contribution in [#3778](https://github.com/sqlfluff/sqlfluff/pull/3778) * [@chris-codaio](https://github.com/chris-codaio) made their first contribution in [#3793](https://github.com/sqlfluff/sqlfluff/pull/3793) * [@JamesRTaylor](https://github.com/JamesRTaylor) made their first contribution in [#3798](https://github.com/sqlfluff/sqlfluff/pull/3798) * [@fmms](https://github.com/fmms) made their first contribution in [#3790](https://github.com/sqlfluff/sqlfluff/pull/3790) * [@bjgbeelen](https://github.com/bjgbeelen) made their first contribution in [#3803](https://github.com/sqlfluff/sqlfluff/pull/3803) * [@thechopkins](https://github.com/thechopkins) made their first contribution in [#3788](https://github.com/sqlfluff/sqlfluff/pull/3788) ## [1.3.0] - 2022-08-21 ## Highlights This release brings several potentially breaking changes to the underlying parse tree. For users of the cli tool in a linting context you should notice no change. If however your application relies on the structure of the SQLFluff parse tree or the naming of certain elements within the yaml format, then this may not be a drop-in replacement. Specifically: - The addition of a new `end_of_file` meta segment at the end of the parse structure. - The addition of a `template_loop` meta segment to signify a jump backward in the source file within a loop structure (e.g. a jinja for loop). - Much more specific types on some raw segments, in particular `identifier` and `literal` type segments will now appear in the parse tree with their more specific type (which used to be called `name`) e.g. `naked_identifier`, `quoted_identifier`, `numeric_literal` etc... If using the python api, the _parent_ type (such as `identifier`) will still register if you call `.is_type("identifier")`, as this function checks all inherited types. However the eventual type returned by `.get_type()` will now be (in most cases) what used to be accessible at `.name`. The `name` attribute will be deprecated in a future release. Other highlights: * New command-line option `--show-lint-violations` to show details on unfixable errors when running `sqlfluff fix`. * Improved consistency of process exit codes. * Short CLI options for many common options. * Jinja templater: When `--ignore=templating` is enabled, undefined Jinja variables now take on "reasonable" default values rather than blank string (`""`). This can streamline initial rollout of SQLFluff by reducing or eliminating the need to configure templater variables. There are also a _ton_ of other features and bug fixes in this release, including first-time contributions from **11 new contributors**! 🎉 ## What’s Changed * T-SQL: ALTER TABLE DROP COLUMN [#3749](https://github.com/sqlfluff/sqlfluff/pull/3749) [@greg-finley](https://github.com/greg-finley) * Add "# pragma: no cover" to work around sporadic, spurious coverage failure [#3767](https://github.com/sqlfluff/sqlfluff/pull/3767) [@barrywhart](https://github.com/barrywhart) * Add end_of_file and template_loop markers [#3766](https://github.com/sqlfluff/sqlfluff/pull/3766) [@alanmcruickshank](https://github.com/alanmcruickshank) * Provide usage examples for new users [#3765](https://github.com/sqlfluff/sqlfluff/pull/3765) [@sirlark](https://github.com/sirlark) * SQLite: deferrable in create table statement [#3757](https://github.com/sqlfluff/sqlfluff/pull/3757) [@RossOkuno](https://github.com/RossOkuno) * When ignore=templating and fix_even_unparsable=True, provide defaults for missing vars [#3753](https://github.com/sqlfluff/sqlfluff/pull/3753) [@barrywhart](https://github.com/barrywhart) * BigQuery: Support Materialized Views [#3759](https://github.com/sqlfluff/sqlfluff/pull/3759) [@yoichi](https://github.com/yoichi) * Enhance L062 to ignore blocked words in comments [#3754](https://github.com/sqlfluff/sqlfluff/pull/3754) [@tunetheweb](https://github.com/tunetheweb) * Fix bug where undefined Jinja variable in macro file crashes linter [#3751](https://github.com/sqlfluff/sqlfluff/pull/3751) [@barrywhart](https://github.com/barrywhart) * Migrate analysis, functional and testing to utils [#3743](https://github.com/sqlfluff/sqlfluff/pull/3743) [@alanmcruickshank](https://github.com/alanmcruickshank) * Build out rule crawling mechanisms [#3717](https://github.com/sqlfluff/sqlfluff/pull/3717) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add current_timestamp to Redshift as a bare function [#3741](https://github.com/sqlfluff/sqlfluff/pull/3741) [@RossOkuno](https://github.com/RossOkuno) * BigQuery: Fix parsing parameterized data types [#3735](https://github.com/sqlfluff/sqlfluff/pull/3735) [@yoichi](https://github.com/yoichi) * Change MySQL Create Statement Equals Segment to Optional [#3730](https://github.com/sqlfluff/sqlfluff/pull/3730) [@keyem4251](https://github.com/keyem4251) * SQLite: add parsing of INSERT statement [#3734](https://github.com/sqlfluff/sqlfluff/pull/3734) [@imrehg](https://github.com/imrehg) * SPARKSQL: Support Delta Lake Drop Column Clause in `ALTER TABLE` [#3727](https://github.com/sqlfluff/sqlfluff/pull/3727) [@R7L208](https://github.com/R7L208) * Add short versions of several cli options [#3732](https://github.com/sqlfluff/sqlfluff/pull/3732) [@alanmcruickshank](https://github.com/alanmcruickshank) * Build out type hints in Grammars [#3718](https://github.com/sqlfluff/sqlfluff/pull/3718) [@alanmcruickshank](https://github.com/alanmcruickshank) * dbt 1.3.0 compatibility [#3708](https://github.com/sqlfluff/sqlfluff/pull/3708) [@edgarrmondragon](https://github.com/edgarrmondragon) * Revise no cover direction and remove unused code. [#3723](https://github.com/sqlfluff/sqlfluff/pull/3723) [@alanmcruickshank](https://github.com/alanmcruickshank) * Update broken flattr link [#3720](https://github.com/sqlfluff/sqlfluff/pull/3720) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery: remove `key` from unreserved keywords list [#3719](https://github.com/sqlfluff/sqlfluff/pull/3719) [@sabrikaragonen](https://github.com/sabrikaragonen) * Bigquery reset primary and foreign keys [#3714](https://github.com/sqlfluff/sqlfluff/pull/3714) [@sabrikaragonen](https://github.com/sabrikaragonen) * Name Deprecation (Part 1) [#3701](https://github.com/sqlfluff/sqlfluff/pull/3701) [@alanmcruickshank](https://github.com/alanmcruickshank) * Teradata: Add two TdTableConstraints [#3690](https://github.com/sqlfluff/sqlfluff/pull/3690) [@greg-finley](https://github.com/greg-finley) * Redshift: support expressions in array accessors [#3706](https://github.com/sqlfluff/sqlfluff/pull/3706) [@chronitis](https://github.com/chronitis) * Handle logging issues at teardown [#3703](https://github.com/sqlfluff/sqlfluff/pull/3703) [@alanmcruickshank](https://github.com/alanmcruickshank) * L028, L032: Fix bug where fixes were copying templated table names [#3699](https://github.com/sqlfluff/sqlfluff/pull/3699) [@barrywhart](https://github.com/barrywhart) * L042: Autofix sometimes results in "fix looping", hitting the linter "loop limit" [#3697](https://github.com/sqlfluff/sqlfluff/pull/3697) [@barrywhart](https://github.com/barrywhart) * L042: Address corner cases where fix corrupts the SQL [#3694](https://github.com/sqlfluff/sqlfluff/pull/3694) [@barrywhart](https://github.com/barrywhart) * T-SQL: Properly parse collation names [#3686](https://github.com/sqlfluff/sqlfluff/pull/3686) [@borchero](https://github.com/borchero) * Allow escaping single quotes in single-quoted literal with '' [#3682](https://github.com/sqlfluff/sqlfluff/pull/3682) [@pdebelak](https://github.com/pdebelak) * T-SQL: Fix indentation after JOIN/APPLY clauses with no ON statement [#3684](https://github.com/sqlfluff/sqlfluff/pull/3684) [@borchero](https://github.com/borchero) * T-SQL: Parse `DATEPART` date type as date type instead of column name [#3681](https://github.com/sqlfluff/sqlfluff/pull/3681) [@borchero](https://github.com/borchero) * T-SQL: Allow `COLLATE` clause in `JOIN` conditions [#3680](https://github.com/sqlfluff/sqlfluff/pull/3680) [@borchero](https://github.com/borchero) * T-SQL: Fix parsing of CREATE VIEW statements with column name syntax [#3669](https://github.com/sqlfluff/sqlfluff/pull/3669) [@borchero](https://github.com/borchero) * Fix typo in github issue template [#3674](https://github.com/sqlfluff/sqlfluff/pull/3674) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add Athena issue label [#3676](https://github.com/sqlfluff/sqlfluff/pull/3676) [@greg-finley](https://github.com/greg-finley) * Set issue dialect labels via Github Actions [#3666](https://github.com/sqlfluff/sqlfluff/pull/3666) [@greg-finley](https://github.com/greg-finley) * Allow configuration of processes from config [#3662](https://github.com/sqlfluff/sqlfluff/pull/3662) [@alanmcruickshank](https://github.com/alanmcruickshank) * Reposition before recursion in fixes to avoid internal error [#3658](https://github.com/sqlfluff/sqlfluff/pull/3658) [@alanmcruickshank](https://github.com/alanmcruickshank) * Use UUIDs for matching [#3661](https://github.com/sqlfluff/sqlfluff/pull/3661) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: Add dialect-specific bare functions [#3660](https://github.com/sqlfluff/sqlfluff/pull/3660) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Add `CALL` Support [#3659](https://github.com/sqlfluff/sqlfluff/pull/3659) [@WittierDinosaur](https://github.com/WittierDinosaur) * ANSI - Add support for `INTERSECT ALL` and `EXCEPT ALL` [#3657](https://github.com/sqlfluff/sqlfluff/pull/3657) [@WittierDinosaur](https://github.com/WittierDinosaur) * Option to show errors on fix [#3610](https://github.com/sqlfluff/sqlfluff/pull/3610) [@chaimt](https://github.com/chaimt) * L042: Fix internal error "Attempted to make a parent marker from multiple files" [#3655](https://github.com/sqlfluff/sqlfluff/pull/3655) [@barrywhart](https://github.com/barrywhart) * L026: Add support for `merge_statement` [#3654](https://github.com/sqlfluff/sqlfluff/pull/3654) [@barrywhart](https://github.com/barrywhart) * Add handling for Redshift `CONVERT` function data type argument [#3653](https://github.com/sqlfluff/sqlfluff/pull/3653) [@pdebelak](https://github.com/pdebelak) * Deduplicate files before and during templating [#3629](https://github.com/sqlfluff/sqlfluff/pull/3629) [@alanmcruickshank](https://github.com/alanmcruickshank) * Rationalise Rule Imports [#3631](https://github.com/sqlfluff/sqlfluff/pull/3631) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle Jinja `{% call ... %}` blocks [#3648](https://github.com/sqlfluff/sqlfluff/pull/3648) [@barrywhart](https://github.com/barrywhart) * SPARKSQL: Add Delta Lake Constraints syntax to `ALTER TABLE` [#3643](https://github.com/sqlfluff/sqlfluff/pull/3643) [@R7L208](https://github.com/R7L208) * Redshift: syntax for array unnesting with index [#3646](https://github.com/sqlfluff/sqlfluff/pull/3646) [@chronitis](https://github.com/chronitis) * Snowflake - `ALTER TABLE IF EXISTS` and `WHEN SYSTEM$STREAM_HAS_DATA()` [#3641](https://github.com/sqlfluff/sqlfluff/pull/3641) [@chrisalexeev](https://github.com/chrisalexeev) * L057: In BigQuery, allow hyphens by default [#3645](https://github.com/sqlfluff/sqlfluff/pull/3645) [@barrywhart](https://github.com/barrywhart) * Better messages for partial indentation in L003 [#3634](https://github.com/sqlfluff/sqlfluff/pull/3634) [@pdebelak](https://github.com/pdebelak) * Add `INTEGER` to `PrimitiveTypeSegment` for Sparksql [#3624](https://github.com/sqlfluff/sqlfluff/pull/3624) [@ciwassano](https://github.com/ciwassano) * Bump version in gettingstarted.rst via the release script [#3642](https://github.com/sqlfluff/sqlfluff/pull/3642) [@greg-finley](https://github.com/greg-finley) * Improve handling of BigQuery hyphenated table names [#3638](https://github.com/sqlfluff/sqlfluff/pull/3638) [@barrywhart](https://github.com/barrywhart) * update sqlfluff version in gettingstareted.rst [#3639](https://github.com/sqlfluff/sqlfluff/pull/3639) [@keyem4251](https://github.com/keyem4251) * L016: Ignore jinja comments if `ignore_comment_clauses=True` [#3637](https://github.com/sqlfluff/sqlfluff/pull/3637) [@barrywhart](https://github.com/barrywhart) * Add errors for redundant definitions. [#3626](https://github.com/sqlfluff/sqlfluff/pull/3626) [@alanmcruickshank](https://github.com/alanmcruickshank) * Object Literals [#3620](https://github.com/sqlfluff/sqlfluff/pull/3620) [@alanmcruickshank](https://github.com/alanmcruickshank) * Dialect Crumbs [#3625](https://github.com/sqlfluff/sqlfluff/pull/3625) [@alanmcruickshank](https://github.com/alanmcruickshank) * Consistent return codes [#3608](https://github.com/sqlfluff/sqlfluff/pull/3608) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@keyem4251](https://github.com/keyem4251) made their first contribution in [#3639](https://github.com/sqlfluff/sqlfluff/pull/3639) * [@ciwassano](https://github.com/ciwassano) made their first contribution in [#3624](https://github.com/sqlfluff/sqlfluff/pull/3624) * [@chronitis](https://github.com/chronitis) made their first contribution in [#3646](https://github.com/sqlfluff/sqlfluff/pull/3646) * [@chaimt](https://github.com/chaimt) made their first contribution in [#3610](https://github.com/sqlfluff/sqlfluff/pull/3610) * [@borchero](https://github.com/borchero) made their first contribution in [#3669](https://github.com/sqlfluff/sqlfluff/pull/3669) * [@sabrikaragonen](https://github.com/sabrikaragonen) made their first contribution in [#3714](https://github.com/sqlfluff/sqlfluff/pull/3714) * [@edgarrmondragon](https://github.com/edgarrmondragon) made their first contribution in [#3708](https://github.com/sqlfluff/sqlfluff/pull/3708) * [@imrehg](https://github.com/imrehg) made their first contribution in [#3734](https://github.com/sqlfluff/sqlfluff/pull/3734) * [@yoichi](https://github.com/yoichi) made their first contribution in [#3735](https://github.com/sqlfluff/sqlfluff/pull/3735) * [@RossOkuno](https://github.com/RossOkuno) made their first contribution in [#3741](https://github.com/sqlfluff/sqlfluff/pull/3741) * [@sirlark](https://github.com/sirlark) made their first contribution in [#3765](https://github.com/sqlfluff/sqlfluff/pull/3765) ## [1.2.1] - 2022-07-15 ## Highlights This is primarily a bugfix release to resolve an issue with the 1.2.0 release where the new version of `sqlfluff-templater-dbt` relied on functionality from the new version of `sqlfluff` but the package configuration had not been updated. Versions of the two packages are now pinned together. ## What’s Changed * Pin sqlfluff-templater-dbt via release script [#3613](https://github.com/sqlfluff/sqlfluff/pull/3613) [@greg-finley](https://github.com/greg-finley) * Specifying comma delimited is unnecessary [#3616](https://github.com/sqlfluff/sqlfluff/pull/3616) [@alanmcruickshank](https://github.com/alanmcruickshank) * Handle redshift temporary tables with # at the beginning of name [#3615](https://github.com/sqlfluff/sqlfluff/pull/3615) [@pdebelak](https://github.com/pdebelak) ## [1.2.0] - 2022-07-13 ## Highlights Major changes include: * Adding AWS Athena as a dialect. * A fix routine for L046 (whitespace in jinja tags), and the mechanisms for more source-only fixes in future. * By default, large files (over 20k characters) are now skipped by sqlfluff. This limit is configurable and disable-able but exists as a sensible default to avoid the performance overhead of linting *very* large files. * For the dbt templater, fatal compilation errors now no longer stop linting, and these files are now skipped instead. This enables projects to continue linting beyond the offending file and much better logging information to enable better debugging. ## What’s Changed * Improve documentation for custom implemented rules [#3604](https://github.com/sqlfluff/sqlfluff/pull/3603) [@Aditya-Tripuraneni](https://github.com/Aditya-Tripuraneni) * Add a skip and better logging for fatal dbt issues [#3603](https://github.com/sqlfluff/sqlfluff/pull/3603) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add large file check [#3600](https://github.com/sqlfluff/sqlfluff/pull/3600) [@alanmcruickshank](https://github.com/alanmcruickshank) * Oracle: extend support for `ALTER TABLE` [#3596](https://github.com/sqlfluff/sqlfluff/pull/3596) [@davidfuhr](https://github.com/davidfuhr) * Immutability fixes [#3428](https://github.com/sqlfluff/sqlfluff/pull/3428) [@alanmcruickshank](https://github.com/alanmcruickshank) * Struct type should be a segment [#3591](https://github.com/sqlfluff/sqlfluff/pull/3591) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix Bracketed Struct issue [#3590](https://github.com/sqlfluff/sqlfluff/pull/3590) [@alanmcruickshank](https://github.com/alanmcruickshank) * Allow spaces and multiple signs for numeric literals [#3581](https://github.com/sqlfluff/sqlfluff/pull/3581) [@tunetheweb](https://github.com/tunetheweb) * Add source fixing capability and fix routines for L046 [#3578](https://github.com/sqlfluff/sqlfluff/pull/3578) [@alanmcruickshank](https://github.com/alanmcruickshank) * Small grammar cleanup in team rollout docs [#3584](https://github.com/sqlfluff/sqlfluff/pull/3584) [@theianrobertson](https://github.com/theianrobertson) * Postgres: `CREATE COLLATION` support [#3571](https://github.com/sqlfluff/sqlfluff/pull/3571) [@greg-finley](https://github.com/greg-finley) * Redshift: Add `TOP X` to select clause modifiers [#3582](https://github.com/sqlfluff/sqlfluff/pull/3582) [@pdebelak](https://github.com/pdebelak) * Postgres: Small fixes to `COMMENT ON` [#3566](https://github.com/sqlfluff/sqlfluff/pull/3566) [@greg-finley](https://github.com/greg-finley) * Support MySQL system variables [#3576](https://github.com/sqlfluff/sqlfluff/pull/3576) [@qgallet](https://github.com/qgallet) * Allow no alias for selects in CTEs with a column list [#3580](https://github.com/sqlfluff/sqlfluff/pull/3580) [@pdebelak](https://github.com/pdebelak) * New dialect AWS Athena [#3551](https://github.com/sqlfluff/sqlfluff/pull/3551) [@cmotta](https://github.com/cmotta) * Split apart `fix_string()`. [#3568](https://github.com/sqlfluff/sqlfluff/pull/3568) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fix incorrect L022 with postgres dialect with CTE argument list [#3570](https://github.com/sqlfluff/sqlfluff/pull/3570) [@pdebelak](https://github.com/pdebelak) * Simplify lint fixing (prep for source fixes) [#3567](https://github.com/sqlfluff/sqlfluff/pull/3567) [@alanmcruickshank](https://github.com/alanmcruickshank) * Exclude .coverage.py from linting [#3564](https://github.com/sqlfluff/sqlfluff/pull/3564) [@zidder](https://github.com/zidder) * L016: `ignore_comment_clauses` not working for postgres dialect [#3549](https://github.com/sqlfluff/sqlfluff/pull/3549) [@barrywhart](https://github.com/barrywhart) * Groundwork for a fix routine for L046 [#3552](https://github.com/sqlfluff/sqlfluff/pull/3552) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add better handling for SQLFluffUserError when running core cli commands [#3431](https://github.com/sqlfluff/sqlfluff/pull/3431) [@alanmcruickshank](https://github.com/alanmcruickshank) ## New Contributors * [@pdebelak](https://github.com/pdebelak) made their first contribution in [#3570](https://github.com/sqlfluff/sqlfluff/pull/3570) * [@cmotta](https://github.com/cmotta) made their first contribution in [#3551](https://github.com/sqlfluff/sqlfluff/pull/3551) * [@qgallet](https://github.com/qgallet) made their first contribution in [#3576](https://github.com/sqlfluff/sqlfluff/pull/3576) * [@theianrobertson](https://github.com/theianrobertson) made their first contribution in [#3584](https://github.com/sqlfluff/sqlfluff/pull/3584) * [@davidfuhr](https://github.com/davidfuhr) made their first contribution in [#3596](https://github.com/sqlfluff/sqlfluff/pull/3596) * [@Aditya-Tripuraneni](https://github.com/Aditya-Tripuraneni) made their first contribution in [#3604](https://github.com/sqlfluff/sqlfluff/pull/3596) ## [1.1.0] - 2022-07-03 ## Highlights Major changes include: * L066 - New rule to allow you to set min/max length requirements for aliases to ensure they are meaningful * L062 - addition of `blocked_regex` as well as `blocked_words` * L025 - fix several corner cases where aliases were removed inappropriately * L059 is now disabled by default for Postgres * Many more dialect improvements and bug fixes. ## Highlights ## What’s Changed * L025: Derived query requires alias -- also handle UNION, etc. [#3548](https://github.com/sqlfluff/sqlfluff/pull/3548) [@barrywhart](https://github.com/barrywhart) * L025 should not remove aliases from derived queries [#3546](https://github.com/sqlfluff/sqlfluff/pull/3546) [@barrywhart](https://github.com/barrywhart) * T-SQL keyword functions should be treated as keywords [#3540](https://github.com/sqlfluff/sqlfluff/pull/3540) [@tunetheweb](https://github.com/tunetheweb) * Fix issue where "--nocolor" is ignored [#3536)(https://github.com/sqlfluff/sqlfluff/pull/3536) [@barrywhart](https://github.com/barrywhart) * Clickhouse: allow `FINAL` modifier [#3534](https://github.com/sqlfluff/sqlfluff/pull/3534) [@ThomAub](https://github.com/ThomAub) * L018 change to just check for newlines rather than alignment [#3499](https://github.com/sqlfluff/sqlfluff/pull/3499) [@zidder](https://github.com/zidder) * SparkSQL: Update terminator grammar for `HAVING`, `WHERE`, `GROUP BY` [#3526](https://github.com/sqlfluff/sqlfluff/pull/3526) [@R7L208](https://github.com/R7L208) * Fix L025 false positive for T-SQL `VALUES` clause [#3533](https://github.com/sqlfluff/sqlfluff/pull/3533) [@barrywhart](https://github.com/barrywhart) * New rule L066 for enforcing table alias lengths [#3384](https://github.com/sqlfluff/sqlfluff/pull/3384) [@f0rk](https://github.com/f0rk) * Redshift: `CALL` statement [#3529](https://github.com/sqlfluff/sqlfluff/pull/3529) [@greg-finley](https://github.com/greg-finley) * Core: Compile regexes at init time to avoid overhead [#3511](https://github.com/sqlfluff/sqlfluff/pull/3511) [@judahrand](https://github.com/judahrand) * Disable L059 by default for Postgres [#3528](https://github.com/sqlfluff/sqlfluff/pull/3528) [@tunetheweb](https://github.com/tunetheweb) * Core: Add `MultiStringParser` to match a collection of strings [#3510](https://github.com/sqlfluff/sqlfluff/pull/3510) [@judahrand](https://github.com/judahrand) * SQLite: `PRIMARY KEY AUTOINCREMENT` [#3527](https://github.com/sqlfluff/sqlfluff/pull/3527) [@greg-finley](https://github.com/greg-finley) * MySQL: Support `LOAD DATA` [#3518](https://github.com/sqlfluff/sqlfluff/pull/3518) [@greg-finley](https://github.com/greg-finley) * Redshift: `GRANT EXECUTE ON PROCEDURES` [#3516](https://github.com/sqlfluff/sqlfluff/pull/3516) [@greg-finley](https://github.com/greg-finley) * Allow `DEFAULT` expression in Redshift `ALTER TABLE ADD COLUMN` statements [#3513](https://github.com/sqlfluff/sqlfluff/pull/3513) [@menzenski](https://github.com/menzenski) * BigQuery: Fix parsing of Array creation from full subquery [#3502](https://github.com/sqlfluff/sqlfluff/pull/3502) [@judahrand](https://github.com/judahrand) * SparkSQL: Allow dateparts as table aliases [#3500](https://github.com/sqlfluff/sqlfluff/pull/3500) [@R7L208](https://github.com/R7L208) * Fix `load_macros_from_path` to actually support multiple paths [#3488](https://github.com/sqlfluff/sqlfluff/pull/3488) [@emancu](https://github.com/emancu) * Allow linter to apply fixes spanning more than 2 slices [#3492](https://github.com/sqlfluff/sqlfluff/pull/3492) [@barrywhart](https://github.com/barrywhart) * Fix L022 false positive when the CTE definition has a column list [#3490](https://github.com/sqlfluff/sqlfluff/pull/3490) [@barrywhart](https://github.com/barrywhart) * SparkSQL: Support for Delta `RESTORE` statement [#3486](https://github.com/sqlfluff/sqlfluff/pull/3486) [@R7L208](https://github.com/R7L208) * Add values function to `SET` clause [#3483](https://github.com/sqlfluff/sqlfluff/pull/3483) [@hgranthorner](https://github.com/hgranthorner) * SparkSQL: Support for `CONVERT TO DELTA` command [#3482](https://github.com/sqlfluff/sqlfluff/pull/3482) [@R7L208](https://github.com/R7L208) * BigQuery: Remaining procedural statements [#3473](https://github.com/sqlfluff/sqlfluff/pull/3473) [@tunetheweb](https://github.com/tunetheweb) * Postgres: support grouping sets [#3477](https://github.com/sqlfluff/sqlfluff/pull/3477) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Support for Delta syntax to create manifest files through the `GENERATE` command [#3478](https://github.com/sqlfluff/sqlfluff/pull/3478) [@R7L208](https://github.com/R7L208) * Add config for optionally indenting contents of `ON` blocks [#3471](https://github.com/sqlfluff/sqlfluff/pull/3471) [@PeterBalsdon](https://github.com/PeterBalsdon) * L026: check standalone aliases as well as table aliases [#3470](https://github.com/sqlfluff/sqlfluff/pull/3470) [@tunetheweb](https://github.com/tunetheweb) * L045: Add handling for nested queries and CTEs [#3468](https://github.com/sqlfluff/sqlfluff/pull/3468) [@barrywhart](https://github.com/barrywhart) * L062: add `blocked_regex` support [#3467](https://github.com/sqlfluff/sqlfluff/pull/3467) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Support for the Delta `DESCRIBE DETAIL` command [#3465](https://github.com/sqlfluff/sqlfluff/pull/3465) [@R7L208](https://github.com/R7L208) ## New Contributors * [@PeterBalsdon](https://github.com/PeterBalsdon) made their first contribution in [#3471](https://github.com/sqlfluff/sqlfluff/pull/3471) * [@hgranthorner](https://github.com/hgranthorner) made their first contribution in [#3483](https://github.com/sqlfluff/sqlfluff/pull/3483) * [@emancu](https://github.com/emancu) made their first contribution in [#3488](https://github.com/sqlfluff/sqlfluff/pull/3488) * [@judahrand](https://github.com/judahrand) made their first contribution in [#3502](https://github.com/sqlfluff/sqlfluff/pull/3502) * [@f0rk](https://github.com/f0rk) made their first contribution in [#3384](https://github.com/sqlfluff/sqlfluff/pull/3384) * [@zidder](https://github.com/zidder) made their first contribution in [#3499](https://github.com/sqlfluff/sqlfluff/pull/3499) * [@ThomAub](https://github.com/ThomAub) made their first contribution in [#3534](https://github.com/sqlfluff/sqlfluff/pull/3534) ## [1.0.0] - 2022-06-17 ## Highlights This is the first _stable_ release of SQLFluff 🎉🎉🎉. - _Does this mean there are no more bugs?_ **No.** - _Does this mean we're going to stop developing new features?_ **No.** - _Does this mean that this is a tool that is now broadly usable for many teams?_ **Yes.** We've intentionally chosen to release 1.0.0 at a time of relative stability within SQLFluff and not at a time when new big structural changes are being made. This means that there's a good chance that this release is broadly usable. This also recognises that through the hard work of a _huge_ number of contributors that we've built out this from a fringe tool, to something which gets over 500k downloads a month and over 4k stars on Github. There's still a lot to do, and some more exciting things on the horizon. If you want to be part of this and join the team of contributors, come and hang out in our [slack community](https://join.slack.com/t/sqlfluff/shared_invite/zt-o1f4x0e8-pZzarAIlQmKj_6ZwD16w0g) or on our [twitter account](https://twitter.com/SQLFluff) where people can help you get started. If you're a long time user, keep submitting bug reports and inputting on [issues on Github](https://github.com/sqlfluff/sqlfluff/issues). If you've never used SQLFluff before, or are hesitant about starting to use it in your day to day work, now might be a good time to try it. We have guides on how to [get started with the tool](https://docs.sqlfluff.com/en/stable/gettingstarted.html), and how to [get started with rolling out to a team](https://docs.sqlfluff.com/en/stable/teamrollout.html) in our docs. ## What’s Changed * Swap to skip file if not found [#3464](https://github.com/sqlfluff/sqlfluff/pull/3464) [@alanmcruickshank](https://github.com/alanmcruickshank) * Postgres: treat `GENERATE_SERIES` as a value table function [#3463](https://github.com/sqlfluff/sqlfluff/pull/3463) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Support multiple `CONSTRAINTS` in `CREATE DOMAIN` [#3460](https://github.com/sqlfluff/sqlfluff/pull/3460) [@tunetheweb](https://github.com/tunetheweb) * Redshift: Add `ANYELEMENT` support [#3458](https://github.com/sqlfluff/sqlfluff/pull/3458) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Optional select clause elements and better `ON CONFLICT` support [#3452](https://github.com/sqlfluff/sqlfluff/pull/3452) [@tunetheweb](https://github.com/tunetheweb) * Add ClickHouse as a dialect [#3448](https://github.com/sqlfluff/sqlfluff/pull/3448) [@tunetheweb](https://github.com/tunetheweb) * Postgres: allow keywords in qualified column references [#3450](https://github.com/sqlfluff/sqlfluff/pull/3450) [@tunetheweb](https://github.com/tunetheweb) * Remove Baron Schwatz Dead Link [#3453](https://github.com/sqlfluff/sqlfluff/pull/3453) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Finish `ALTER TYPE` [#3451](https://github.com/sqlfluff/sqlfluff/pull/3451) [@greg-finley](https://github.com/greg-finley) * SparkSQL: Add Delta Syntax for `DESCRIBE HISTORY` statement [#3447](https://github.com/sqlfluff/sqlfluff/pull/3447) [@R7L208](https://github.com/R7L208) * Snowflake: Support Stage data file parameters in `FROM` clauses [#3446](https://github.com/sqlfluff/sqlfluff/pull/3446) [@tunetheweb](https://github.com/tunetheweb) * Redshift: Support Object unpivoting [#3441](https://github.com/sqlfluff/sqlfluff/pull/3441) [@tunetheweb](https://github.com/tunetheweb) * Python script to automate release [#3403](https://github.com/sqlfluff/sqlfluff/pull/3403) [@greg-finley](https://github.com/greg-finley) * Remove Delta Lake Reference in README.md [#3444](https://github.com/sqlfluff/sqlfluff/pull/3444) [@R7L208](https://github.com/R7L208) * Add `databricks` dialect as an alias for `sparksql` dialect [#3440](https://github.com/sqlfluff/sqlfluff/pull/3440) [@R7L208](https://github.com/R7L208) * Make all Postgres identifiers quotable [#3442](https://github.com/sqlfluff/sqlfluff/pull/3442) [@tunetheweb](https://github.com/tunetheweb) * Update JinjaAnalyzer and JinjaTracer to handle `{% block %}` [#3436](https://github.com/sqlfluff/sqlfluff/pull/3436) [@barrywhart](https://github.com/barrywhart) * SparkSQL: Add support for Delta `VACUUM` statement [#3439](https://github.com/sqlfluff/sqlfluff/pull/3439) [@R7L208](https://github.com/R7L208) * Warning for parsing errors extended to all dialects [#3411](https://github.com/sqlfluff/sqlfluff/pull/3411) [@mdahlman](https://github.com/mdahlman) * Handle templater validation errors more gracefully [#3433](https://github.com/sqlfluff/sqlfluff/pull/3433) [@alanmcruickshank](https://github.com/alanmcruickshank) * MYSQL: allow for escaped single quotes [#3424](https://github.com/sqlfluff/sqlfluff/pull/3424) [@mdahlman](https://github.com/mdahlman) * L027: Fix false positives by reverting the PR for issue #2992: Check table aliases exist [#3435](https://github.com/sqlfluff/sqlfluff/pull/3435) [@barrywhart](https://github.com/barrywhart) * Allow `numeric_dollar` templater to have curly braces, update `dollar` + `numeric_dollar` templater examples in docs [#3432](https://github.com/sqlfluff/sqlfluff/pull/3432) [@menzenski](https://github.com/menzenski) * Allow Redshift `IDENTITY` column `(seed, step)` to be optional [#3430](https://github.com/sqlfluff/sqlfluff/pull/3430) [@menzenski](https://github.com/menzenski) * L036: Make wildcard behavior configurable [#3426](https://github.com/sqlfluff/sqlfluff/pull/3426) [@barrywhart](https://github.com/barrywhart) * L034: Don't autofix if numeric column references [#3423](https://github.com/sqlfluff/sqlfluff/pull/3423) [@barrywhart](https://github.com/barrywhart) * L036: Treat wildcard as multiple select targets (i.e. separate line) [#3422](https://github.com/sqlfluff/sqlfluff/pull/3422) [@barrywhart](https://github.com/barrywhart) * Snowflake: IDENTIFIER pseudo-function [#3409](https://github.com/sqlfluff/sqlfluff/pull/3409) [@mdahlman](https://github.com/mdahlman) * SNOWFLAKE: Fully referenced object names in clone statements [#3414](https://github.com/sqlfluff/sqlfluff/pull/3414) [@mdahlman](https://github.com/mdahlman) * Unpin coverage now issue with 6.3 has been resolved [#3393](https://github.com/sqlfluff/sqlfluff/pull/3393) [@tunetheweb](https://github.com/tunetheweb) * L045: handle `UPDATE` statements with CTEs [#3397](https://github.com/sqlfluff/sqlfluff/pull/3397) [@tunetheweb](https://github.com/tunetheweb) * L027: Add support for `ignore_words` [#3398](https://github.com/sqlfluff/sqlfluff/pull/3398) [@dmohns](https://github.com/dmohns) * Postgres: Allow `CREATE FUNCTION` to use Expressions in default values [#3408](https://github.com/sqlfluff/sqlfluff/pull/3408) [@tunetheweb](https://github.com/tunetheweb) * Fix bug in `apply_fixes()` with leading/trailing whitespace [#3407](https://github.com/sqlfluff/sqlfluff/pull/3407) [@barrywhart](https://github.com/barrywhart) * Redshift: Correct `ALTER TABLE` syntax [#3395](https://github.com/sqlfluff/sqlfluff/pull/3395) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Parse index with column sort [#3405](https://github.com/sqlfluff/sqlfluff/pull/3405) [@greg-finley](https://github.com/greg-finley) * MySQL: Improve SET Syntax for Variable Assignment [#3394](https://github.com/sqlfluff/sqlfluff/pull/3394) [@mdahlman](https://github.com/mdahlman) * Handle Postgres-style type casts when using the `colon_nospaces` templating style [#3383](https://github.com/sqlfluff/sqlfluff/pull/3383) [@benji-york](https://github.com/benji-york) * Capitalization in help message [#3385](https://github.com/sqlfluff/sqlfluff/pull/3385) [@mdahlman](https://github.com/mdahlman) * MySQL: Update keywords [#3381](https://github.com/sqlfluff/sqlfluff/pull/3381) [@mdahlman](https://github.com/mdahlman) * Teradata: Database statement and Set Session Database [#3382](https://github.com/sqlfluff/sqlfluff/pull/3382) [@mdahlman](https://github.com/mdahlman) ## New Contributors * [@benji-york](https://github.com/benji-york) made their first contribution in [#3383](https://github.com/sqlfluff/sqlfluff/pull/3383) * [@menzenski](https://github.com/menzenski) made their first contribution in [#3430](https://github.com/sqlfluff/sqlfluff/pull/3430) ## [0.13.2] - 2022-05-20 ## Highlights Major changes include: * Fix bug causing L003 to report indentation errors for templated code - sorry we know that one's caused many of you some grief :-( * Initial support of SOQL (Salesforce Object Query Language). * Additional Placeholder templating options. * Start of BigQuery procedural language support (starting simple `FOR` statements and `CREATE PROCEDURE` statements). * New rule L065 to put set operators onto new lines. * Many more dialect improvements and bug fixes. ## What’s Changed * All dialects: Allow `RESPECT NULLS`/`IGNORE NULLS` in window functions [#3376](https://github.com/sqlfluff/sqlfluff/pull/3376) [@tunetheweb](https://github.com/tunetheweb) * Postgres: correct `RETURNS TABLE` column type [#3379](https://github.com/sqlfluff/sqlfluff/pull/3379) [@tunetheweb](https://github.com/tunetheweb) * L065: Add rule for set operators surrounded by newlines [#3330](https://github.com/sqlfluff/sqlfluff/pull/3330) [@dmohns](https://github.com/dmohns) * L064: Apply preferred quote-style for partially templated quoted literals [#3300](https://github.com/sqlfluff/sqlfluff/pull/3300) [@dmohns](https://github.com/dmohns) * BigQuery: Support Stored Procedures [#3369](https://github.com/sqlfluff/sqlfluff/pull/3369) [@tunetheweb](https://github.com/tunetheweb) * MySQL extra Boolean operators (`&&`, `||`, `!`) [#3359](https://github.com/sqlfluff/sqlfluff/pull/3359) [@mdahlman](https://github.com/mdahlman) * Postgres and Redshift: Support `LOCK [TABLE]` [#3350](https://github.com/sqlfluff/sqlfluff/pull/3350) [@tunetheweb](https://github.com/tunetheweb) * Placeholder updates: Allow optional braces in dollar placeholders, add `colon_nospaces`, and cast to string [#3354](https://github.com/sqlfluff/sqlfluff/pull/3354) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Basic `FOR..IN..DO...END FOR` support [#3340](https://github.com/sqlfluff/sqlfluff/pull/3340) [@tunetheweb](https://github.com/tunetheweb) * L025: exclude `VALUES` clauses [#3358](https://github.com/sqlfluff/sqlfluff/pull/3358) [@tunetheweb](https://github.com/tunetheweb) * GitHub Actions: Update existing PR on new runs [#3367](https://github.com/sqlfluff/sqlfluff/pull/3367) [@greg-finley](https://github.com/greg-finley) * GitHub Actions: Copy draft release notes to CHANGELOG [#3360](https://github.com/sqlfluff/sqlfluff/pull/3360) [@greg-finley](https://github.com/greg-finley) * GitHub Action to set version number [#3347](https://github.com/sqlfluff/sqlfluff/pull/3347) [@greg-finley](https://github.com/greg-finley) * Postgres and Redshift: support `ALTER SCHEMA` [#3346](https://github.com/sqlfluff/sqlfluff/pull/3346) [@mdahlman](https://github.com/mdahlman) * MySQL: better `SELECT..INTO` support [#3351](https://github.com/sqlfluff/sqlfluff/pull/3351) [@tunetheweb](https://github.com/tunetheweb) * Postgres: support better function calls in `CREATE TRIGGER` [#3349](https://github.com/sqlfluff/sqlfluff/pull/3349) [@tunetheweb](https://github.com/tunetheweb) * Misc rule doc updates [#3352](https://github.com/sqlfluff/sqlfluff/pull/3352) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Move `CASE` keyword to Unreserved list [#3353](https://github.com/sqlfluff/sqlfluff/pull/3353) [@tunetheweb](https://github.com/tunetheweb) * MySQL: Added support for multiple variables in `SET` statement. [#3328](https://github.com/sqlfluff/sqlfluff/pull/3328) [@cgkoutzigiannis](https://github.com/cgkoutzigiannis) * SOQL: Support `date_n_literal` [#3344](https://github.com/sqlfluff/sqlfluff/pull/3344) [@greg-finley](https://github.com/greg-finley) * Update Docs: Getting Started and Index [#3339](https://github.com/sqlfluff/sqlfluff/pull/3339) [@mdahlman](https://github.com/mdahlman) * SOQL: Disable L026 rule [#3338](https://github.com/sqlfluff/sqlfluff/pull/3338) [@greg-finley](https://github.com/greg-finley) * Fix critical parse error logged after L003 fix [#3337](https://github.com/sqlfluff/sqlfluff/pull/3337) [@barrywhart](https://github.com/barrywhart) * SOQL: Disallow non-`SELECT` statements [#3329](https://github.com/sqlfluff/sqlfluff/pull/3329) [@greg-finley](https://github.com/greg-finley) * ci: bump github actions [#3336](https://github.com/sqlfluff/sqlfluff/pull/3336) [@Fdawgs](https://github.com/Fdawgs) * Start SOQL dialect [#3312](https://github.com/sqlfluff/sqlfluff/pull/3312) [@greg-finley](https://github.com/greg-finley) * Hive: support `CLUSTER`, `DISTRIBUTE`, `SORT BY` [#3304](https://github.com/sqlfluff/sqlfluff/pull/3304) [@barunpuri](https://github.com/barunpuri) * Fix typo in Configuration documentation [#3319](https://github.com/sqlfluff/sqlfluff/pull/3319) [@mdahlman](https://github.com/mdahlman) * L011: Support `MERGE` statements [#3292](https://github.com/sqlfluff/sqlfluff/pull/3292) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Add workaround to fix false-positves of L063 [#3306](https://github.com/sqlfluff/sqlfluff/pull/3306) [@dmohns](https://github.com/dmohns) * Snowflake: `REMOVE` statement rework [#3308](https://github.com/sqlfluff/sqlfluff/pull/3308) [@jmc-bbk](https://github.com/jmc-bbk) * Snowflake: `PUT` statement [#3307](https://github.com/sqlfluff/sqlfluff/pull/3307) [@jmc-bbk](https://github.com/jmc-bbk) * Snowflake: `GET` statement [#3305](https://github.com/sqlfluff/sqlfluff/pull/3305) [@jmc-bbk](https://github.com/jmc-bbk) * Snowflake: Support `ALTER EXTERNAL TABLE` [#3302](https://github.com/sqlfluff/sqlfluff/pull/3302) [@jmc-bbk](https://github.com/jmc-bbk) * T-SQL: Fix `PIVOT` placement [#3298](https://github.com/sqlfluff/sqlfluff/pull/3298) [@jpers36](https://github.com/jpers36) * Cleanup role references [#3287](https://github.com/sqlfluff/sqlfluff/pull/3287) [@tunetheweb](https://github.com/tunetheweb) * Adding Typeform and videoask into inthewild.rst [#3296](https://github.com/sqlfluff/sqlfluff/pull/3296) [@omonereo-tf](https://github.com/omonereo-tf) * Snowflake: `LIST` statement enhancement [#3295](https://github.com/sqlfluff/sqlfluff/pull/3295) [@jmc-bbk](https://github.com/jmc-bbk) * MySQL: Support `CREATE USER` [#3289](https://github.com/sqlfluff/sqlfluff/pull/3289) [@greg-finley](https://github.com/greg-finley) * Snowflake: CREATE STAGE grammar enhancement for file formats [#3293](https://github.com/sqlfluff/sqlfluff/pull/3293) [@jmc-bbk](https://github.com/jmc-bbk) * T-SQL: Complete support for `DELETE` statement [#3285](https://github.com/sqlfluff/sqlfluff/pull/3285) [@pguyot](https://github.com/pguyot) * MySQL: Support account names [#3286](https://github.com/sqlfluff/sqlfluff/pull/3286) [@greg-finley](https://github.com/greg-finley) * L028: In T-SQL dialect, table variables cannot be used to qualify references [#3283](https://github.com/sqlfluff/sqlfluff/pull/3283) [@barrywhart](https://github.com/barrywhart) * L007: An operator on a line by itself is okay [#3281](https://github.com/sqlfluff/sqlfluff/pull/3281) [@barrywhart](https://github.com/barrywhart) * L046 (spaces around Jinja tags) should check all slices in a segment [#3279](https://github.com/sqlfluff/sqlfluff/pull/3279) [@barrywhart](https://github.com/barrywhart) * L003 bug fix: Not ignoring templated newline [#3278](https://github.com/sqlfluff/sqlfluff/pull/3278) [@barrywhart](https://github.com/barrywhart) ## New Contributors * [@omonereo-tf](https://github.com/omonereo-tf) made their first contribution in [#3296](https://github.com/sqlfluff/sqlfluff/pull/3296) * [@mdahlman](https://github.com/mdahlman) made their first contribution in [#3319](https://github.com/sqlfluff/sqlfluff/pull/3319) * [@cgkoutzigiannis](https://github.com/cgkoutzigiannis) made their first contribution in [#3328](https://github.com/sqlfluff/sqlfluff/pull/3328) ## [0.13.1] - 2022-05-06 ## Highlights Major changes include: * Addition of "rule groups" (currently `core` and `all`) to allow ease of turning on and off groups of rules. * Addition of `db2` dialect * PRS errors are now highlighted in red. * Many bugs fixes and dialect improvements ## What’s Changed * Allow optional `AS` in `MERGE` statements using `SELECT` [#3276](https://github.com/sqlfluff/sqlfluff/pull/3276) [@tunetheweb](https://github.com/tunetheweb) * Add groups each rule is in to Rules documentation [#3272](https://github.com/sqlfluff/sqlfluff/pull/3272) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Enhanced `EXPORT DATA` statement [#3267](https://github.com/sqlfluff/sqlfluff/pull/3267) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: `CREATE TABLE` support for `COPY` and `LIKE` [#3266](https://github.com/sqlfluff/sqlfluff/pull/3266) [@tunetheweb](https://github.com/tunetheweb) * Improve error on missing keywords [#3268](https://github.com/sqlfluff/sqlfluff/pull/3268) [@greg-finley](https://github.com/greg-finley) * MySQL: Add `FLUSH` support [#3269](https://github.com/sqlfluff/sqlfluff/pull/3269) [@greg-finley](https://github.com/greg-finley) * Postgres: Add `ALTER TYPE` support [#3265](https://github.com/sqlfluff/sqlfluff/pull/3265) [@greg-finley](https://github.com/greg-finley) * Bug fix: L036 handle single-column `SELECT` with comment on same line as `SELECT` keyword [#3259](https://github.com/sqlfluff/sqlfluff/pull/3259) [@barrywhart](https://github.com/barrywhart) * Put working example in the README [#3261](https://github.com/sqlfluff/sqlfluff/pull/3261) [@greg-finley](https://github.com/greg-finley) * Snowflake: Add `CREATE FILE FORMAT` Support [#3104](https://github.com/sqlfluff/sqlfluff/pull/3104) [@jmc-bbk](https://github.com/jmc-bbk) * Bug fix: Disable L059 in snowflake dialect [#3260](https://github.com/sqlfluff/sqlfluff/pull/3260) [@barrywhart](https://github.com/barrywhart) * Remove redundant `Anything()` from `match_grammars` [#3258](https://github.com/sqlfluff/sqlfluff/pull/3258) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Add `DOMAIN` support [#3257](https://github.com/sqlfluff/sqlfluff/pull/3257) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Allow optional brackets with `EXECUTE` [#3255](https://github.com/sqlfluff/sqlfluff/pull/3255) [@pguyot](https://github.com/pguyot) * Add rule groups and a core rules group [#3142](https://github.com/sqlfluff/sqlfluff/pull/3142) [@pwildenhain](https://github.com/pwildenhain) * MySQL: Better `UNSIGNED` support [#3250](https://github.com/sqlfluff/sqlfluff/pull/3250) [@tunetheweb](https://github.com/tunetheweb) * MySQL (and others): Support `DROP TEMPORARY TABLE` [#3251](https://github.com/sqlfluff/sqlfluff/pull/3251) [@tunetheweb](https://github.com/tunetheweb) * Add Db2 dialect [#3231](https://github.com/sqlfluff/sqlfluff/pull/3231) [@ddresslerlegalplans](https://github.com/ddresslerlegalplans) * BigQuery: Add `CREATE EXTERNAL TABLE` statement [#3241](https://github.com/sqlfluff/sqlfluff/pull/3241) [@dmohns](https://github.com/dmohns) * SQLite: Add support for expressions in `CREATE INDEX` columns [#3240](https://github.com/sqlfluff/sqlfluff/pull/3240) [@tunetheweb](https://github.com/tunetheweb) * Fix exception in `check_still_complete` and matching in `StartsWith` [#3236](https://github.com/sqlfluff/sqlfluff/pull/3236) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Add Support for `DROP` Statements [#3238](https://github.com/sqlfluff/sqlfluff/pull/3238) [@chrisalexeev](https://github.com/chrisalexeev) * Allow YAML generation script to accept arguments when run through `tox` [#3233](https://github.com/sqlfluff/sqlfluff/pull/3233) [@tunetheweb](https://github.com/tunetheweb) * Bug fix: Cleanly catch and report errors during `load_macros_from_path` [#3239](https://github.com/sqlfluff/sqlfluff/pull/3239) [@barrywhart](https://github.com/barrywhart) * Indent procedure parameters [#3234](https://github.com/sqlfluff/sqlfluff/pull/3234) [@fdw](https://github.com/fdw) * Enhance `apply_fixes()` to automatically fix violations of `can_start_end_non_code` [#3232](https://github.com/sqlfluff/sqlfluff/pull/3232) [@barrywhart](https://github.com/barrywhart) * T-SQL: Fix `for xml path` identifier [#3230](https://github.com/sqlfluff/sqlfluff/pull/3230) [@fdw](https://github.com/fdw) * SparkSQL: Additional Delta Merge Test Cases [#3228](https://github.com/sqlfluff/sqlfluff/pull/3228) [@R7L208](https://github.com/R7L208) * Fix bug where L018 warns inappropriately if CTE definition includes a column list [#3227](https://github.com/sqlfluff/sqlfluff/pull/3227) [@barrywhart](https://github.com/barrywhart) * BigQuery: Better `STRUCT` support [#3217](https://github.com/sqlfluff/sqlfluff/pull/3217) [@tunetheweb](https://github.com/tunetheweb) * Fix bug where L003 and L036 fixes caused a parse error [#3221](https://github.com/sqlfluff/sqlfluff/pull/3221) [@barrywhart](https://github.com/barrywhart) * Make `IF EXISTS` work with `UNION` selects [#3218](https://github.com/sqlfluff/sqlfluff/pull/3218) [@fdw](https://github.com/fdw) * Fix bug where the `fix_even_unparsable` setting was not being respected in `.sqlfluff` [#3220](https://github.com/sqlfluff/sqlfluff/pull/3220) [@barrywhart](https://github.com/barrywhart) * BigQuery: Better `DELETE` table support [#3224](https://github.com/sqlfluff/sqlfluff/pull/3224) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: `ALTER MATERIALIZED VIEW` statement [#3215](https://github.com/sqlfluff/sqlfluff/pull/3215) [@jmc-bbk](https://github.com/jmc-bbk) * BigQuery: recognise `DATE`, `DATETIME` and `TIME` as a date parts for `EXTRACT` [#3209](https://github.com/sqlfluff/sqlfluff/pull/3209) [@tunetheweb](https://github.com/tunetheweb) * Postgres: enhanced `UPDATE` statement support [#3203](https://github.com/sqlfluff/sqlfluff/pull/3203) [@tunetheweb](https://github.com/tunetheweb) * Prevent Date Constructors from being changed to double quotes by L064 [#3212](https://github.com/sqlfluff/sqlfluff/pull/3212) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Fix `DROP EXTENSION` syntax [#3213](https://github.com/sqlfluff/sqlfluff/pull/3213) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Handle `FLATTEN()` table function aliases correctly in L025, L027, L028 [#3194](https://github.com/sqlfluff/sqlfluff/pull/3194) [@barrywhart](https://github.com/barrywhart) * Snowflake: Function `LANGUAGE SQL` [#3202](https://github.com/sqlfluff/sqlfluff/pull/3202) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Enhanced `CREATE MATERIALIZED VIEW` [#3204](https://github.com/sqlfluff/sqlfluff/pull/3204) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Support basic `FOR XML` statements [#3193](https://github.com/sqlfluff/sqlfluff/pull/3193) [@fdw](https://github.com/fdw) * T-SQL: Fix cursor syntax [#3192](https://github.com/sqlfluff/sqlfluff/pull/3192) [@fdw](https://github.com/fdw) * Snowflake: `REMOVE` statement enhancement [#3191](https://github.com/sqlfluff/sqlfluff/pull/3191) [@jmc-bbk](https://github.com/jmc-bbk) * Snowflake: Moved `VIEW` to unreserved keywords [#3190](https://github.com/sqlfluff/sqlfluff/pull/3190) [@WittierDinosaur](https://github.com/WittierDinosaur) * BigQuery: Support `EXPORT DATA` [#3177](https://github.com/sqlfluff/sqlfluff/pull/3177) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Fix exception when using variable names in `FROM` clause [#3175](https://github.com/sqlfluff/sqlfluff/pull/3175) [@tunetheweb](https://github.com/tunetheweb) * Fix bug where `encoding` setting in .sqlfluff file was not being respected [#3170](https://github.com/sqlfluff/sqlfluff/pull/3170) [@barrywhart](https://github.com/barrywhart) * Highlight `PRS` errors in red [#3168](https://github.com/sqlfluff/sqlfluff/pull/3168) [@OTooleMichael](https://github.com/OTooleMichael) * Remove unnecessary `StartsWith` and make `terminator` mandatory when using it [#3165](https://github.com/sqlfluff/sqlfluff/pull/3165) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Support Composite Types [#3167](https://github.com/sqlfluff/sqlfluff/pull/3167) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Support opening, closing, deallocating and fetching cursors [#3166](https://github.com/sqlfluff/sqlfluff/pull/3166) [@fdw](https://github.com/fdw) * T-SQL: Add declaration of cursors [#3164](https://github.com/sqlfluff/sqlfluff/pull/3164) [@fdw](https://github.com/fdw) * Missed #3151 from CHANGELOG [#3163](https://github.com/sqlfluff/sqlfluff/pull/3163) [@tunetheweb](https://github.com/tunetheweb) * Bug fix: L028 sometimes makes incorrect fix when there are subqueries [#3156](https://github.com/sqlfluff/sqlfluff/pull/3156) [@barrywhart](https://github.com/barrywhart) * T-SQL: Support `OUTPUT INTO` [#3162](https://github.com/sqlfluff/sqlfluff/pull/3162) [@fdw](https://github.com/fdw) * T-SQL: Add `CREATE TYPE` statement [#3154](https://github.com/sqlfluff/sqlfluff/pull/3154) [@fdw](https://github.com/fdw) * Hive: Support`TABLESAMPLE` [#3159](https://github.com/sqlfluff/sqlfluff/pull/3159) [@barunpuri](https://github.com/barunpuri) * Hive: Support back quoted identifier and literal [#3158](https://github.com/sqlfluff/sqlfluff/pull/3158) [@barunpuri](https://github.com/barunpuri) * T-SQL: Add table hints to `INSERT` and `DELETE` [#3155](https://github.com/sqlfluff/sqlfluff/pull/3155) [@fdw](https://github.com/fdw) ## New Contributors * [@ddresslerlegalplans](https://github.com/ddresslerlegalplans) made their first contribution in [#3231](https://github.com/sqlfluff/sqlfluff/pull/3231) * [@greg-finley](https://github.com/greg-finley) made their first contribution in [#3261](https://github.com/sqlfluff/sqlfluff/pull/3261) ## [0.13.0] - 2022-04-22 ## Highlights Major changes include: * New Rule (L064) for preferred quotes for quoted literals * Rule speed improvements and fixing performance regression from 0.12.0 * Add configuration option to disallow hanging indents in L003 * Add `ignore_words_regex` configuration option for rules * New GitHub Annotations option * Many bug fixes and dialect improvements ## What’s Changed * Redshift: Fix CREATE TABLE column constraints and COPY [#3151](https://github.com/sqlfluff/sqlfluff/pull/3151) [@tunetheweb](https://github.com/tunetheweb) * New Rule L064: Consistent usage of preferred quotes for quoted literals [#3118](https://github.com/sqlfluff/sqlfluff/pull/3118) [@dmohns](https://github.com/dmohns) * L025 bug fix: stop incorrectly flagging on nested inner joins [#3145](https://github.com/sqlfluff/sqlfluff/pull/3145) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Add labels, as well as `GRANT`/`DENY`/`REVOKE` [#3149](https://github.com/sqlfluff/sqlfluff/pull/3149) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: allow bracketless `VALUES` in `FROM` clauses [#3141](https://github.com/sqlfluff/sqlfluff/pull/3141) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Support `TRY_CONVERT` [#3143](https://github.com/sqlfluff/sqlfluff/pull/3143) [@fdw](https://github.com/fdw) * T-SQL: Support `NVARCHAR(MAX)` [#3130](https://github.com/sqlfluff/sqlfluff/pull/3130) [@fdw](https://github.com/fdw) * Allow column-less `INSERT INTO` with bracketed `SELECT` in ANSI and BigQuery [#3139](https://github.com/sqlfluff/sqlfluff/pull/3139) [@tunetheweb](https://github.com/tunetheweb) * Hive: Support dynamic partition insert [#3126](https://github.com/sqlfluff/sqlfluff/pull/3126) [@barunpuri](https://github.com/barunpuri) * T-SQL - `ALTER TABLE` - add support for `WITH CHECK ADD CONSTRAINT` and `CHECK CONSTRAINT` [#3132](https://github.com/sqlfluff/sqlfluff/pull/3132) [@nevado](https://github.com/nevado) * TSQL: Support names for transactions [#3129](https://github.com/sqlfluff/sqlfluff/pull/3129) [@fdw](https://github.com/fdw) * Snowflake: `StartsWith()` in `FromExpressionElementSegment` caused performance issues for large queries [#3128](https://github.com/sqlfluff/sqlfluff/pull/3128) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix parsing of Compound Statements [#3121](https://github.com/sqlfluff/sqlfluff/pull/3121) [@jonyscathe](https://github.com/jonyscathe) * SparkSQL: Update to support all valid Literal Types [#3102](https://github.com/sqlfluff/sqlfluff/pull/3102) [@R7L208](https://github.com/R7L208) * TSQL: Exclude non-function-name keywords from function names [#3112](https://github.com/sqlfluff/sqlfluff/pull/3112) [@jpers36](https://github.com/jpers36) * ANSI `AT TIME ZONE` parsing improvements [#3115](https://github.com/sqlfluff/sqlfluff/pull/3115) [@tunetheweb](https://github.com/tunetheweb) * When fixing a file, preserve the input file's permissions [#3114](https://github.com/sqlfluff/sqlfluff/pull/3114) [@barrywhart](https://github.com/barrywhart) * Bug: L058 (flatten nested `CASE`) triggers incorrectly (the `ELSE` contains additional code) [#3113](https://github.com/sqlfluff/sqlfluff/pull/3113) [@barrywhart](https://github.com/barrywhart) * Bug fix: Handle "lint" human-format file output correctly [#3109](https://github.com/sqlfluff/sqlfluff/pull/3109) [@barrywhart](https://github.com/barrywhart) * L003: Add configuration option to disallow hanging indents [#3063](https://github.com/sqlfluff/sqlfluff/pull/3063) [@dmohns](https://github.com/dmohns) * Add native Github-actions output [#3107](https://github.com/sqlfluff/sqlfluff/pull/3107) [@dmohns](https://github.com/dmohns) * Improved signed literal parsing [#3108](https://github.com/sqlfluff/sqlfluff/pull/3108) [@tunetheweb](https://github.com/tunetheweb) * Don't allow fixes to span template blocks [#3105](https://github.com/sqlfluff/sqlfluff/pull/3105) [@barrywhart](https://github.com/barrywhart) * Add `ignore_words_regex` configuration option [#3098](https://github.com/sqlfluff/sqlfluff/pull/3098) [@dmohns](https://github.com/dmohns) * Redshift: Better `AT TIME ZONE` support [#3087](https://github.com/sqlfluff/sqlfluff/pull/3087) [@tunetheweb](https://github.com/tunetheweb) * Fix In The Wild typo [#3100](https://github.com/sqlfluff/sqlfluff/pull/3100) [@sivaraam](https://github.com/sivaraam) * Snowflake: Add Create Storage Integration grammar. [#3075](https://github.com/sqlfluff/sqlfluff/pull/3075) [@jmc-bbk](https://github.com/jmc-bbk) * ANSI: Allow `indented_using_on` in `MERGE` statements `ON` [#3096](https://github.com/sqlfluff/sqlfluff/pull/3096) [@dmohns](https://github.com/dmohns) * Postgres: Support `COLLATE` in more clauses [#3095](https://github.com/sqlfluff/sqlfluff/pull/3095) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Support `NORMALIZE` function [#3086](https://github.com/sqlfluff/sqlfluff/pull/3086) [@tunetheweb](https://github.com/tunetheweb) * ANSI (and other dialects): Add `DROP FUNCTION` support [#3082](https://github.com/sqlfluff/sqlfluff/pull/3082) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Support `DROP EXTENSION` [#3083](https://github.com/sqlfluff/sqlfluff/pull/3083) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Fix bug in Describe Statement [#3076](https://github.com/sqlfluff/sqlfluff/pull/3076) [@jmc-bbk](https://github.com/jmc-bbk) * Update individual rules to take advantage of core rule processing changes [#3041](https://github.com/sqlfluff/sqlfluff/pull/3041) [@barrywhart](https://github.com/barrywhart) * L003 forgives non misbalanced Jinja tags [#3065](https://github.com/sqlfluff/sqlfluff/pull/3065) [@OTooleMichael](https://github.com/OTooleMichael) * Fix tsql dialect `EXEC = @Variable StoredProc` Failed Parsing Bug (#3070) [#3077](https://github.com/sqlfluff/sqlfluff/pull/3077) [@MartynJones87](https://github.com/MartynJones87) * Snowflake Dialect: Add External Function DDL [#3071](https://github.com/sqlfluff/sqlfluff/pull/3071) [@chrisalexeev](https://github.com/chrisalexeev) * SparkSQL: Support for Delta `UPDATE` statement syntax [#3073](https://github.com/sqlfluff/sqlfluff/pull/3073) [@R7L208](https://github.com/R7L208) * SparkSQL: Test cases for Delta `DELETE FROM` syntax [#3072](https://github.com/sqlfluff/sqlfluff/pull/3072) [@R7L208](https://github.com/R7L208) * Postgres: Support quoted `LANGUAGE` params [#3068](https://github.com/sqlfluff/sqlfluff/pull/3068) [@tunetheweb](https://github.com/tunetheweb) * Fix bug handling Jinja set with multiple vars, e.g.: `{% set a, b = 1, 2 %}` [#3066](https://github.com/sqlfluff/sqlfluff/pull/3066) [@barrywhart](https://github.com/barrywhart) * L007 should ignore templated newlines [#3067](https://github.com/sqlfluff/sqlfluff/pull/3067) [@barrywhart](https://github.com/barrywhart) * Allow aliases to pass L028 [#3062](https://github.com/sqlfluff/sqlfluff/pull/3062) [@tunetheweb](https://github.com/tunetheweb) * Refactor core rule processing for flexibility and speed [#3061](https://github.com/sqlfluff/sqlfluff/pull/3061) [@barrywhart](https://github.com/barrywhart) * Add editorconfig and precommit for SQL and YML files [#3058](https://github.com/sqlfluff/sqlfluff/pull/3058) [@tunetheweb](https://github.com/tunetheweb) * Rule L003 performance: Cache the line number and last newline position [#3060](https://github.com/sqlfluff/sqlfluff/pull/3060) [@barrywhart](https://github.com/barrywhart) * Fixed documentation for `sql_file_exts` example [#3059](https://github.com/sqlfluff/sqlfluff/pull/3059) [@KulykDmytro](https://github.com/KulykDmytro) * BigQuery: Support `SAFE` functions [#3048](https://github.com/sqlfluff/sqlfluff/pull/3048) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Fix `UNNEST` for L025 [#3054](https://github.com/sqlfluff/sqlfluff/pull/3054) [@tunetheweb](https://github.com/tunetheweb) * Exasol: `CREATE/DROP/ALTER USER/ROLE` clean up for consistency [#3045](https://github.com/sqlfluff/sqlfluff/pull/3045) [@tunetheweb](https://github.com/tunetheweb) * Postgres add `ALTER ROLE`/`ALTER USER` support [#3043](https://github.com/sqlfluff/sqlfluff/pull/3043) [@mrf](https://github.com/mrf) * Add CarePay to SQLFluff in the wild [#3038](https://github.com/sqlfluff/sqlfluff/pull/3038) [@pvonglehn](https://github.com/pvonglehn) * Postgres: Add `ON CONFLICT` Grammar [#3027](https://github.com/sqlfluff/sqlfluff/pull/3027) [@jmc-bbk](https://github.com/jmc-bbk) * Add dialect to Docker test [#3033](https://github.com/sqlfluff/sqlfluff/pull/3033) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@chrisalexeev](https://github.com/chrisalexeev) made their first contribution in [#3071](https://github.com/sqlfluff/sqlfluff/pull/3071) * [@MartynJones87](https://github.com/MartynJones87) made their first contribution in [#3077](https://github.com/sqlfluff/sqlfluff/pull/3077) * [@sivaraam](https://github.com/sivaraam) made their first contribution in [#3100](https://github.com/sqlfluff/sqlfluff/pull/3100) * [@jonyscathe](https://github.com/jonyscathe) made their first contribution in [#3121](https://github.com/sqlfluff/sqlfluff/pull/3121) * [@barunpuri](https://github.com/barunpuri) made their first contribution in [#3126](https://github.com/sqlfluff/sqlfluff/pull/3126) ## [0.12.0] - 2022-04-07 ## Highlights Major changes include: * Dialect is now mandatory, either in command line, or in config **BREAKING CHANGE** * Rename `spark3` dialect to `sparksql` **BREAKING CHANGE** * L027 now checks tables references exist **BREAKING CHANGE** * New rule L063 to allow Datatypes to have a different capitalisation policy from L010. **BREAKING CHANGE** * Refactor and performance improvements of Delimited and L003 * Many dialect improvements and fixes ## What’s Changed * MySQL: Allow `JOIN`s in `UPDATE` expressions [#3031](https://github.com/sqlfluff/sqlfluff/pull/3031) [@zapion](https://github.com/zapion) * Fix bug in patch generation for segments made of templated + literal fixes [#3030](https://github.com/sqlfluff/sqlfluff/pull/3030) [@barrywhart](https://github.com/barrywhart) * Formatters code cleanup [#3029](https://github.com/sqlfluff/sqlfluff/pull/3029) [@barrywhart](https://github.com/barrywhart) * Postgres better `CREATE USER`/`CREATE ROLE` support [#3016](https://github.com/sqlfluff/sqlfluff/pull/3016) [@mrf](https://github.com/mrf) * SparkSQL: Add `MERGE` syntax [#3025](https://github.com/sqlfluff/sqlfluff/pull/3025) [@PhilippLange](https://github.com/PhilippLange) * Remove Delimited workarounds [#3024](https://github.com/sqlfluff/sqlfluff/pull/3024) [@tunetheweb](https://github.com/tunetheweb) * Add `exclude` option for `Ref` grammar [#3028](https://github.com/sqlfluff/sqlfluff/pull/3028) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Adding support for Delta Lake table schema updates [#3013](https://github.com/sqlfluff/sqlfluff/pull/3013) [@R7L208](https://github.com/R7L208) * L027: Check table aliases exists [#2998](https://github.com/sqlfluff/sqlfluff/pull/2998) [@dmohns](https://github.com/dmohns) * Snowflake: Added support for `REMOVE` statements [#3026](https://github.com/sqlfluff/sqlfluff/pull/3026) [@WittierDinosaur](https://github.com/WittierDinosaur) * BigQuery: Support `WEEK` function with days of weeks [#3021](https://github.com/sqlfluff/sqlfluff/pull/3021) [@tunetheweb](https://github.com/tunetheweb) * Sparksql quoted identifier in `STRUCT` [#3023](https://github.com/sqlfluff/sqlfluff/pull/3023) [@PhilippLange](https://github.com/PhilippLange) * Force user to specify a dialect [#2995](https://github.com/sqlfluff/sqlfluff/pull/2995) [@barrywhart](https://github.com/barrywhart) * BigQuery: Parse `CREATE TABLE` with trailing comma [#3018](https://github.com/sqlfluff/sqlfluff/pull/3018) [@dmohns](https://github.com/dmohns) * Snowflake: Add `IS (NOT) DISTINCT FROM` test cases [#3014](https://github.com/sqlfluff/sqlfluff/pull/3014) [@kd2718](https://github.com/kd2718) * BigQuery: Add support for column `OPTIONS` in `STRUCT` definitions [#3017](https://github.com/sqlfluff/sqlfluff/pull/3017) [@dmohns](https://github.com/dmohns) * PostgreSQL: added support for `CREATE ROLE` and `DROP ROLE` statements [#3010](https://github.com/sqlfluff/sqlfluff/pull/3010) [@dnim](https://github.com/dnim) * Separate slow CI job to it's own workflow [#3012](https://github.com/sqlfluff/sqlfluff/pull/3012) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Test cases for Delta Variation of Writing a table [#3009](https://github.com/sqlfluff/sqlfluff/pull/3009) [@R7L208](https://github.com/R7L208) * Snowflake: Added support for `CLUSTER BY` and other `CREATE TABLE` improvements [#3008](https://github.com/sqlfluff/sqlfluff/pull/3008) [@WittierDinosaur](https://github.com/WittierDinosaur) * Support `TRIM` function parameters [#3007](https://github.com/sqlfluff/sqlfluff/pull/3007) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Support `AT TIME ZONE` in `EXTRACT` [#3004](https://github.com/sqlfluff/sqlfluff/pull/3004) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Move some keywords to unreserved [#3002](https://github.com/sqlfluff/sqlfluff/pull/3002) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Allow quoted variable names in `DECLARE` [#3006](https://github.com/sqlfluff/sqlfluff/pull/3006) [@dmohns](https://github.com/dmohns) * T-SQL: allow optional `AS` keyword in parameters def [#3001](https://github.com/sqlfluff/sqlfluff/pull/3001) [@pguyot](https://github.com/pguyot) * T-SQL: add support for `RETURNS @var TABLE` syntax [#3000](https://github.com/sqlfluff/sqlfluff/pull/3000) [@pguyot](https://github.com/pguyot) * T-SQL: add support for parenthesized nested joins [#2993](https://github.com/sqlfluff/sqlfluff/pull/2993) [@pguyot](https://github.com/pguyot) * dbt: Read builtins from code [#2988](https://github.com/sqlfluff/sqlfluff/pull/2988) [@dmohns](https://github.com/dmohns) * T-SQL: fix table type in `DECLARE` statements [#2999](https://github.com/sqlfluff/sqlfluff/pull/2999) [@pguyot](https://github.com/pguyot) * T-SQL: allow leading `GO` [#2997](https://github.com/sqlfluff/sqlfluff/pull/2997) [@pguyot](https://github.com/pguyot) * T-SQL: add support for assignment operators [#2996](https://github.com/sqlfluff/sqlfluff/pull/2996) [@pguyot](https://github.com/pguyot) * BigQuery: Add more `MERGE` statement variants [#2989](https://github.com/sqlfluff/sqlfluff/pull/2989) [@dmohns](https://github.com/dmohns) * L041: Fix bug when there is a newline after the select clause modifier [#2981](https://github.com/sqlfluff/sqlfluff/pull/2981) [@jmc-bbk](https://github.com/jmc-bbk) * Rule L045 doesn't recognise CTE usage in a subquery when rule L042 is enabled [#2980](https://github.com/sqlfluff/sqlfluff/pull/2980) [@barrywhart](https://github.com/barrywhart) * dbt: Make `is_incremental()` defaults consistent [#2985](https://github.com/sqlfluff/sqlfluff/pull/2985) [@dmohns](https://github.com/dmohns) * Rename Grammars for consistency [#2986](https://github.com/sqlfluff/sqlfluff/pull/2986) [@tunetheweb](https://github.com/tunetheweb) * Added support for MySQL `UPDATE` Statements [#2982](https://github.com/sqlfluff/sqlfluff/pull/2982) [@WittierDinosaur](https://github.com/WittierDinosaur) * Redshift: Added `CREATE EXTERNAL SCHEMA`, bugfix in `PARTITION BY` [#2983](https://github.com/sqlfluff/sqlfluff/pull/2983) [@WittierDinosaur](https://github.com/WittierDinosaur) * Added `ALTER INDEX` and `REINDEX` to Postgres, Some Grammar Cleaning [#2979](https://github.com/sqlfluff/sqlfluff/pull/2979) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL grammar deduplication [#2967](https://github.com/sqlfluff/sqlfluff/pull/2967) [@tunetheweb](https://github.com/tunetheweb) * L003 Refactor [#2884](https://github.com/sqlfluff/sqlfluff/pull/2884) [@OTooleMichael](https://github.com/OTooleMichael) * Delimited Refactor [#2831](https://github.com/sqlfluff/sqlfluff/pull/2831) [@WittierDinosaur](https://github.com/WittierDinosaur) * SparkSQL: Support for querying snapshots when reading data with Delta Lake [#2972](https://github.com/sqlfluff/sqlfluff/pull/2972) [@R7L208](https://github.com/R7L208) * Fix bug in L063 for BigQuery `STRUCT` params [#2975](https://github.com/sqlfluff/sqlfluff/pull/2975) [@tunetheweb](https://github.com/tunetheweb) * Fix assertion error in dbt templater when file ends with whitespace strip (`-%}`) [#2976](https://github.com/sqlfluff/sqlfluff/pull/2976) [@barrywhart](https://github.com/barrywhart) * Pass dbt vars to dbt [#2923](https://github.com/sqlfluff/sqlfluff/pull/2923) [@tcholewik](https://github.com/tcholewik) * BigQuery: Add support for column `OPTIONS` [#2973](https://github.com/sqlfluff/sqlfluff/pull/2973) [@dmohns](https://github.com/dmohns) * BigQuery: Allow expressions in `OPTIONS` clauses [#2971](https://github.com/sqlfluff/sqlfluff/pull/2971) [@dmohns](https://github.com/dmohns) * Bump black to 22.3.0 on pre-commit [#2969](https://github.com/sqlfluff/sqlfluff/pull/2969) [@pguyot](https://github.com/pguyot) * T-SQL: Redefine `DatatypeIdentifierSegment` [#2959](https://github.com/sqlfluff/sqlfluff/pull/2959) [@alanmcruickshank](https://github.com/alanmcruickshank) * T-SQL: Add support for `WAITFOR` statement [#2968](https://github.com/sqlfluff/sqlfluff/pull/2968) [@pguyot](https://github.com/pguyot) * T-SQL: Add `WHILE` statement support [#2966](https://github.com/sqlfluff/sqlfluff/pull/2966) [@pguyot](https://github.com/pguyot) * T-SQL: `INTO` is optional within `INSERT` statement [#2963](https://github.com/sqlfluff/sqlfluff/pull/2963) [@pguyot](https://github.com/pguyot) * Add basic `IS (NOT) DISTINCT FROM` support in most dialects [#2962](https://github.com/sqlfluff/sqlfluff/pull/2962) [@tunetheweb](https://github.com/tunetheweb) * SparkSQL: Create Table Delta Lake Variant [#2954](https://github.com/sqlfluff/sqlfluff/pull/2954) [@R7L208](https://github.com/R7L208) * T-SQL: Add support for `CREATE`/`DROP`/`DISABLE` `TRIGGER` [#2957](https://github.com/sqlfluff/sqlfluff/pull/2957) [@tunetheweb](https://github.com/tunetheweb) * Bug: L042 modifies parse tree even during "lint" [#2955](https://github.com/sqlfluff/sqlfluff/pull/2955) [@barrywhart](https://github.com/barrywhart) * Allow multiple post function clauses in Postgres and Redshift [#2952](https://github.com/sqlfluff/sqlfluff/pull/2952) [@aviv](https://github.com/aviv) * Fix bug in L022 for trailing comments in CTE [#2946](https://github.com/sqlfluff/sqlfluff/pull/2946) [@tunetheweb](https://github.com/tunetheweb) * More dialect checking, fixes, inheritance cleanup [#2942](https://github.com/sqlfluff/sqlfluff/pull/2942) [@barrywhart](https://github.com/barrywhart) * T-SQL: Support `OUTPUT` Params and `GOTO` Statements [#2949](https://github.com/sqlfluff/sqlfluff/pull/2949) [@tunetheweb](https://github.com/tunetheweb) * BREAKING CHANGE: change existing dialect name from `spark3` to `sparksql` [#2924](https://github.com/sqlfluff/sqlfluff/pull/2924) [@R7L208](https://github.com/R7L208) * Add Symend to SQLFluff In The Wild [#2940](https://github.com/sqlfluff/sqlfluff/pull/2940) [@HeyZiko](https://github.com/HeyZiko) * Simplify segment creation and inheritance in dialects [#2933](https://github.com/sqlfluff/sqlfluff/pull/2933) [@barrywhart](https://github.com/barrywhart) * Snowflake: Add `ALTER STREAM` support [#2939](https://github.com/sqlfluff/sqlfluff/pull/2939) [@HeyZiko](https://github.com/HeyZiko) * T-SQL: Handle multiple nested joins [#2938](https://github.com/sqlfluff/sqlfluff/pull/2938) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Add `CREATE STREAM` support [#2936](https://github.com/sqlfluff/sqlfluff/pull/2936) [@HeyZiko](https://github.com/HeyZiko) * T-SQL: Support nested joins [#2928](https://github.com/sqlfluff/sqlfluff/pull/2928) [@tunetheweb](https://github.com/tunetheweb) * To replace base dialect segment class, must subclass or provide same stuff [#2930](https://github.com/sqlfluff/sqlfluff/pull/2930) [@barrywhart](https://github.com/barrywhart) * Add new rule L063 to allow separate capitalisation policy for Datatypes [#2931](https://github.com/sqlfluff/sqlfluff/pull/2931) [@tunetheweb](https://github.com/tunetheweb) * Adds support for column definitions in table alias expressions [#2932](https://github.com/sqlfluff/sqlfluff/pull/2932) [@derickl](https://github.com/derickl) * BigQuery: support numeric aliases in `UNPIVOT` clauses [#2925](https://github.com/sqlfluff/sqlfluff/pull/2925) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Supported nested `MERGE` statements [#2926](https://github.com/sqlfluff/sqlfluff/pull/2926) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@HeyZiko](https://github.com/HeyZiko) made their first contribution in [#2936](https://github.com/sqlfluff/sqlfluff/pull/2936) * [@aviv](https://github.com/aviv) made their first contribution in [#2952](https://github.com/sqlfluff/sqlfluff/pull/2952) * [@pguyot](https://github.com/pguyot) made their first contribution in [#2963](https://github.com/sqlfluff/sqlfluff/pull/2963) * [@dmohns](https://github.com/dmohns) made their first contribution in [#2971](https://github.com/sqlfluff/sqlfluff/pull/2971) * [@tcholewik](https://github.com/tcholewik) made their first contribution in [#2923](https://github.com/sqlfluff/sqlfluff/pull/2923) * [@jmc-bbk](https://github.com/jmc-bbk) made their first contribution in [#2981](https://github.com/sqlfluff/sqlfluff/pull/2981) * [@dnim](https://github.com/dnim) made their first contribution in [#3010](https://github.com/sqlfluff/sqlfluff/pull/3010) * [@kd2718](https://github.com/kd2718) made their first contribution in [#3014](https://github.com/sqlfluff/sqlfluff/pull/3014) * [@mrf](https://github.com/mrf) made their first contribution in [#3016](https://github.com/sqlfluff/sqlfluff/pull/3016) * [@zapion](https://github.com/zapion) made their first contribution in [#3031](https://github.com/sqlfluff/sqlfluff/pull/3031) ## [0.11.2] - 2022-03-25 ## What’s Changed * Added Support For Snowflake Inline Comments [#2919](https://github.com/sqlfluff/sqlfluff/pull/2919) [@WittierDinosaur](https://github.com/WittierDinosaur) * Spark3: Fix bug which did not allow multiple joins [#2917](https://github.com/sqlfluff/sqlfluff/pull/2917) [@tunetheweb](https://github.com/tunetheweb) * Added Snowflake Alter View Support [#2915](https://github.com/sqlfluff/sqlfluff/pull/2915) [@WittierDinosaur](https://github.com/WittierDinosaur) * Adjust L010 to ignore nulls and booleans covered by L040 [#2913](https://github.com/sqlfluff/sqlfluff/pull/2913) [@tunetheweb](https://github.com/tunetheweb) * Fix bug where L043 corrupts SQL [#2908](https://github.com/sqlfluff/sqlfluff/pull/2908) [@barrywhart](https://github.com/barrywhart) * Jinja: Add support for Block Assignments [#2907](https://github.com/sqlfluff/sqlfluff/pull/2907) [@barrywhart](https://github.com/barrywhart) * L042 fix fails with missing function `get_identifier()` on Postgres, Redshift dialects [#2899](https://github.com/sqlfluff/sqlfluff/pull/2899) [@barrywhart](https://github.com/barrywhart) * BigQuery: Better Set Operators support [#2901](https://github.com/sqlfluff/sqlfluff/pull/2901) [@tunetheweb](https://github.com/tunetheweb) * Hive: support for complex types in `cast` `rowtype` definition [#2896](https://github.com/sqlfluff/sqlfluff/pull/2896) [@KulykDmytro](https://github.com/KulykDmytro) * Hive: added `json` type support [#2894](https://github.com/sqlfluff/sqlfluff/pull/2894) [@KulykDmytro](https://github.com/KulykDmytro) * Hive: fix incorrect L027 error for rowtype attribute name [#2893](https://github.com/sqlfluff/sqlfluff/pull/2893) [@KulykDmytro](https://github.com/KulykDmytro) * Hive: Add `ARRAY` support [#2891](https://github.com/sqlfluff/sqlfluff/pull/2891) [@tunetheweb](https://github.com/tunetheweb) * Implemented `PIVOT`/`UNPIVOT` Redshift + Fixed Snowflake Bug + Standardized `PIVOT`/`UNPIVOT` Parsing [#2888](https://github.com/sqlfluff/sqlfluff/pull/2888) [@PLBMR](https://github.com/PLBMR) * Fix AssertionError in dbt templater when file ends with multiple newlines [#2887](https://github.com/sqlfluff/sqlfluff/pull/2887) [@barrywhart](https://github.com/barrywhart) * Hive: Row typecasting in `cast` function [#2889](https://github.com/sqlfluff/sqlfluff/pull/2889) [@KulykDmytro](https://github.com/KulykDmytro) * dbt templater should gracefully skip macro files [#2886](https://github.com/sqlfluff/sqlfluff/pull/2886) [@barrywhart](https://github.com/barrywhart) * Disable L031 on BigQuery due to complex backtick / project name behavior [#2882](https://github.com/sqlfluff/sqlfluff/pull/2882) [@barrywhart](https://github.com/barrywhart) * Documentation: Update dbt templater docs with more detail about pros and cons [#2885](https://github.com/sqlfluff/sqlfluff/pull/2885) [@barrywhart](https://github.com/barrywhart) * BigQuery: Better `STRUCT` Array Support [#2881](https://github.com/sqlfluff/sqlfluff/pull/2881) [@tunetheweb](https://github.com/tunetheweb) * L042: Detect violations when column is templated [#2879](https://github.com/sqlfluff/sqlfluff/pull/2879) [@barrywhart](https://github.com/barrywhart) * Improve parsing of `BETWEEN` statements [#2878](https://github.com/sqlfluff/sqlfluff/pull/2878) [@MarcAntoineSchmidtQC](https://github.com/MarcAntoineSchmidtQC) ## [0.11.1] - 2022-03-17 ## Highlights Major changes include: * A number of changes to `fix` code to make these more robust * Improvements to templating blocks * `generate_parse_fixture_yml` options to allow quicker, partial regeneration of YML files * Numerous rule fixes including adding auto fix to L042 * Numerous grammar changes ## What’s Changed * Spark3: Support for `SHOW` statements [#2864](https://github.com/sqlfluff/sqlfluff/pull/2864) [@R7L208](https://github.com/R7L208) * Add Markerr to list of organizations using SQLFluff in the wild [#2874](https://github.com/sqlfluff/sqlfluff/pull/2874) [@kdw2126](https://github.com/kdw2126) * Refactor JinjaTracer: Split into two classes, break up `_slice_template()` function [#2870](https://github.com/sqlfluff/sqlfluff/pull/2870) [@barrywhart](https://github.com/barrywhart) * BigQuery: support Parameterized Numeric Literals [#2872](https://github.com/sqlfluff/sqlfluff/pull/2872) [@tunetheweb](https://github.com/tunetheweb) * L042 autofix [#2860](https://github.com/sqlfluff/sqlfluff/pull/2860) [@OTooleMichael](https://github.com/OTooleMichael) * Redshift: transaction statement [#2852](https://github.com/sqlfluff/sqlfluff/pull/2852) [@rpr-ableton](https://github.com/rpr-ableton) * JinjaTracer fix for endif/endfor inside "set" or "macro" blocks [#2868](https://github.com/sqlfluff/sqlfluff/pull/2868) [@barrywhart](https://github.com/barrywhart) * L009: Handle adding newline after `{% endif %}` at end of file [#2862](https://github.com/sqlfluff/sqlfluff/pull/2862) [@barrywhart](https://github.com/barrywhart) * Redshift: Add support for `AT TIME ZONE` [#2863](https://github.com/sqlfluff/sqlfluff/pull/2863) [@tunetheweb](https://github.com/tunetheweb) * L032 bug fix and fix improvement [#2859](https://github.com/sqlfluff/sqlfluff/pull/2859) [@OTooleMichael](https://github.com/OTooleMichael) * Refactor JinjaTracer; store lex output as individual strings where possible [#2856](https://github.com/sqlfluff/sqlfluff/pull/2856) [@barrywhart](https://github.com/barrywhart) * Add ability to regenerate subsets of fixture YAMLs (by dialect, or new only) [#2850](https://github.com/sqlfluff/sqlfluff/pull/2850) [@OTooleMichael](https://github.com/OTooleMichael) * Fix bug with Jinja and dbt `{% set %}` blocks [#2849](https://github.com/sqlfluff/sqlfluff/pull/2849) [@barrywhart](https://github.com/barrywhart) * Bug fix: `ValueError: Position Not Found for lint/parse/fix` in JinjaTracer [#2846](https://github.com/sqlfluff/sqlfluff/pull/2846) [@barrywhart](https://github.com/barrywhart) * Reduce unnecessary setting run ci [#2847](https://github.com/sqlfluff/sqlfluff/pull/2847) [@zhongjiajie](https://github.com/zhongjiajie) * Spark3: statements to `SET` and `RESET` spark runtime configurations [#2839](https://github.com/sqlfluff/sqlfluff/pull/2839) [@R7L208](https://github.com/R7L208) * BigQuery - prevent L006 flagging hyphenated table references [#2842](https://github.com/sqlfluff/sqlfluff/pull/2842) [@tunetheweb](https://github.com/tunetheweb) * T-SQL fix `CONVERT` function definition [#2843](https://github.com/sqlfluff/sqlfluff/pull/2843) [@tunetheweb](https://github.com/tunetheweb) * Change rule test script from bash to python [#2840](https://github.com/sqlfluff/sqlfluff/pull/2840) [@OTooleMichael](https://github.com/OTooleMichael) * Spark3: Support `DESCRIBE` statement [#2837](https://github.com/sqlfluff/sqlfluff/pull/2837) [@R7L208](https://github.com/R7L208) * Spark3: Refactor `REFRESH` statements into one class [#2838](https://github.com/sqlfluff/sqlfluff/pull/2838) [@R7L208](https://github.com/R7L208) * Prevent rules incorrectly returning conflicting fixes to same position [#2830](https://github.com/sqlfluff/sqlfluff/pull/2830) [@barrywhart](https://github.com/barrywhart) * Redshift and BigQuery: Update dateparts values and functions [#2829](https://github.com/sqlfluff/sqlfluff/pull/2829) [@rpr-ableton](https://github.com/rpr-ableton) * MySQL add `NOW` support [#2825](https://github.com/sqlfluff/sqlfluff/pull/2825) [@tunetheweb](https://github.com/tunetheweb) * MySQL `DELETE FROM` support [#2823](https://github.com/sqlfluff/sqlfluff/pull/2823) [@tunetheweb](https://github.com/tunetheweb) * Rule L059 bug with `IF` [#2824](https://github.com/sqlfluff/sqlfluff/pull/2824) [@tunetheweb](https://github.com/tunetheweb) * Prevent exceptions when running `fix` on dialect fixtures [#2818](https://github.com/sqlfluff/sqlfluff/pull/2818) [@tunetheweb](https://github.com/tunetheweb) * Spark3: Support to handle `CACHE` AND `UNCACHE` auxiliary statements [#2814](https://github.com/sqlfluff/sqlfluff/pull/2814) [@R7L208](https://github.com/R7L208) * Fix L036 error on `CREATE VIEW AS SELECT` [#2816](https://github.com/sqlfluff/sqlfluff/pull/2816) [@tunetheweb](https://github.com/tunetheweb) * Fixes for the new post-fix parse check [#2813](https://github.com/sqlfluff/sqlfluff/pull/2813) [@barrywhart](https://github.com/barrywhart) * Add initial `MERGE` syntax to most dialects [#2807](https://github.com/sqlfluff/sqlfluff/pull/2807) [@PhilippLange](https://github.com/PhilippLange) * Automated tests should fail if a lint fix introduces a parse error [#2809](https://github.com/sqlfluff/sqlfluff/pull/2809) [@barrywhart](https://github.com/barrywhart) ## New Contributors * [@kdw2126](https://github.com/kdw2126) made their first contribution in [#2874](https://github.com/sqlfluff/sqlfluff/pull/2874) ## [0.11.0] - 2022-03-07 ## Highlights Major changes include: * Changes rule L030 to use `extended_capitalisation_policy` to support PascalCase **BREAKING CHANGE** * Fixes dbt error on ephemeral models * Log warnings for fixes that seem to corrupt the parse SQL as may cause incorrect fixes in other rules. * Bug fix to rule L011 for `implicit` aliases * Bug fix to rule L019 for commas besides templated code * Rule L051 can now optionally be applied to `LEFT`/`RIGHT`/`OUTER JOIN`s * Improvements to Test Suite * Many dialect improvements ## What’s Changed * Exasol: Fix `INTERVAL` literals / expression [#2804](https://github.com/sqlfluff/sqlfluff/pull/2804) [@sti0](https://github.com/sti0) * Exasol: Add `IDLE_TIMEOUT` and `SNAPSHOT_MODE` [#2805](https://github.com/sqlfluff/sqlfluff/pull/2805) [@sti0](https://github.com/sti0) * Exasol: Support value range clause within `INSERT` statements (7.1+) [#2802](https://github.com/sqlfluff/sqlfluff/pull/2802) [@sti0](https://github.com/sti0) * Exasol: Add lua adapter scripts (7.1+) [#2801](https://github.com/sqlfluff/sqlfluff/pull/2801) [@sti0](https://github.com/sti0) * Exasol: Add openid support for create/alter user (7.1+) [#2800](https://github.com/sqlfluff/sqlfluff/pull/2800) [@sti0](https://github.com/sti0) * Exasol: New consumer group params and unreserved keywords (7.1+) [#2799](https://github.com/sqlfluff/sqlfluff/pull/2799) [@sti0](https://github.com/sti0) * Snowflake: Complete `INSERT` grammar [#2798](https://github.com/sqlfluff/sqlfluff/pull/2798) [@jpy-git](https://github.com/jpy-git) * Fix Postgres `VALUES`, make Spark3 `VALUES` consistent [#2797](https://github.com/sqlfluff/sqlfluff/pull/2797) [@jpy-git](https://github.com/jpy-git) * Postgres: `INSERT DEFAULT` value [#2796](https://github.com/sqlfluff/sqlfluff/pull/2796) [@jpy-git](https://github.com/jpy-git) * Postgres: Make `AS` optional in Postgres `DELETE` [#2794](https://github.com/sqlfluff/sqlfluff/pull/2794) [@jpy-git](https://github.com/jpy-git) * BigQuery support `UNEST` aliases [#2793](https://github.com/sqlfluff/sqlfluff/pull/2793) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Add all range operators [#2789](https://github.com/sqlfluff/sqlfluff/pull/2789) [@jpy-git](https://github.com/jpy-git) * Snowflake: Complete `DELETE FROM` grammar [#2792](https://github.com/sqlfluff/sqlfluff/pull/2792) [@jpy-git](https://github.com/jpy-git) * Postgres: Complete `DELETE FROM` grammar [#2791](https://github.com/sqlfluff/sqlfluff/pull/2791) [@jpy-git](https://github.com/jpy-git) * Postgres: Add `RETURNING` grammar to `INSERT INTO` statement [#2790](https://github.com/sqlfluff/sqlfluff/pull/2790) [@jpy-git](https://github.com/jpy-git) * Snowflake: Complete `PATTERN` grammar [#2788](https://github.com/sqlfluff/sqlfluff/pull/2788) [@jpy-git](https://github.com/jpy-git) * Redshift: add `[ALTER/CREATE/DROP] PROCEDURE` segments [#2774](https://github.com/sqlfluff/sqlfluff/pull/2774) [@rpr-ableton](https://github.com/rpr-ableton) * Spark3: Support for `ANALYZE TABLE` statement [#2780](https://github.com/sqlfluff/sqlfluff/pull/2780) [@R7L208](https://github.com/R7L208) * Snowflake: Add `MATCH_RECOGNIZE` clause [#2781](https://github.com/sqlfluff/sqlfluff/pull/2781) [@jpy-git](https://github.com/jpy-git) * Snowflake: Complete `LIMIT` grammar [#2784](https://github.com/sqlfluff/sqlfluff/pull/2784) [@jpy-git](https://github.com/jpy-git) * Rough autofix for L028 [#2757](https://github.com/sqlfluff/sqlfluff/pull/2757) [@OTooleMichael](https://github.com/OTooleMichael) * Spark3 bug: Create with complex data types (#2761) [#2782](https://github.com/sqlfluff/sqlfluff/pull/2782) [@PhilippLange](https://github.com/PhilippLange) * Snowflake: Complete `LIKE` grammar [#2779](https://github.com/sqlfluff/sqlfluff/pull/2779) [@jpy-git](https://github.com/jpy-git) * Spark3: Auxiliary`FILE` and `JAR` statements [#2778](https://github.com/sqlfluff/sqlfluff/pull/2778) [@R7L208](https://github.com/R7L208) * Snowflake: Refine `SET`/`UNSET` `MASKING POLICY` grammar [#2775](https://github.com/sqlfluff/sqlfluff/pull/2775) [@jpy-git](https://github.com/jpy-git) * L049 bug: correct over zealous `=` --> `IS` [#2760](https://github.com/sqlfluff/sqlfluff/pull/2760) [@OTooleMichael](https://github.com/OTooleMichael) * Make extension case insensitive [#2773](https://github.com/sqlfluff/sqlfluff/pull/2773) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Add dollar quoted string literal [#2770](https://github.com/sqlfluff/sqlfluff/pull/2770) [@jpy-git](https://github.com/jpy-git) * Bug fix: L036 corrupts `SELECT DISTINCT id` query [#2768](https://github.com/sqlfluff/sqlfluff/pull/2768) [@barrywhart](https://github.com/barrywhart) * Snowflake: Add `CHANGES` clause [#2764](https://github.com/sqlfluff/sqlfluff/pull/2764) [@jpy-git](https://github.com/jpy-git) * Spark3: Support for `EXPLAIN` statement [#2767](https://github.com/sqlfluff/sqlfluff/pull/2767) [@R7L208](https://github.com/R7L208) * Snowflake: Add `CONNECT BY` clause [#2763](https://github.com/sqlfluff/sqlfluff/pull/2763) [@jpy-git](https://github.com/jpy-git) * Spark3: Support for `TRANSFORM` clause [#2762](https://github.com/sqlfluff/sqlfluff/pull/2762) [@R7L208](https://github.com/R7L208) * Snowflake: Fix `GROUP BY {CUBE|ROLLUP|GROUPING SETS}` parsing [#2759](https://github.com/sqlfluff/sqlfluff/pull/2759) [@jpy-git](https://github.com/jpy-git) * BigQuery: allow identifiers starting with dash [#2756](https://github.com/sqlfluff/sqlfluff/pull/2756) [@tunetheweb](https://github.com/tunetheweb) * Add `ignore_words` options to L057 and L059 [#2753](https://github.com/sqlfluff/sqlfluff/pull/2753) [@tunetheweb](https://github.com/tunetheweb) * L012 bug fix for T-SQL alternative alias types [#2750](https://github.com/sqlfluff/sqlfluff/pull/2750) [@tunetheweb](https://github.com/tunetheweb) * Spark3: Support for `PIVOT` clause [#2752](https://github.com/sqlfluff/sqlfluff/pull/2752) [@R7L208](https://github.com/R7L208) * Update Redshift reserved keywords list [#2751](https://github.com/sqlfluff/sqlfluff/pull/2751) [@rpr-ableton](https://github.com/rpr-ableton) * L007 autofix [#2735](https://github.com/sqlfluff/sqlfluff/pull/2735) [@OTooleMichael](https://github.com/OTooleMichael) * L032 fixable in easy cases [#2737](https://github.com/sqlfluff/sqlfluff/pull/2737) [@OTooleMichael](https://github.com/OTooleMichael) * Fix dbt templater runtime error in `inject_ctes_into_sql()` [#2748](https://github.com/sqlfluff/sqlfluff/pull/2748) [@barrywhart](https://github.com/barrywhart) * L059: Exasol: Allow quotes around passwords in `CREATE USER` [#2744](https://github.com/sqlfluff/sqlfluff/pull/2744) [@sti0](https://github.com/sti0) * Improve docs for `load_macros_from_path` [#2743](https://github.com/sqlfluff/sqlfluff/pull/2743) [@barrywhart](https://github.com/barrywhart) * Make L045 (Query defines a CTE but does not use it) case insensitive [#2746](https://github.com/sqlfluff/sqlfluff/pull/2746) [@barrywhart](https://github.com/barrywhart) * Add L049 test for T-SQL alternate alias syntax (`=`) [#2745](https://github.com/sqlfluff/sqlfluff/pull/2745) [@barrywhart](https://github.com/barrywhart) * `BaseSegment.pos_marker` is typed as non optional but sometimes set to `None` [#2741](https://github.com/sqlfluff/sqlfluff/pull/2741) [@barrywhart](https://github.com/barrywhart) * Support Pascal case for L030 [#2739](https://github.com/sqlfluff/sqlfluff/pull/2739) [@tunetheweb](https://github.com/tunetheweb) * Postgres, Redshift: Support `SIMILAR TO` pattern matching expressions [#2732](https://github.com/sqlfluff/sqlfluff/pull/2732) [@PLBMR](https://github.com/PLBMR) * Forgive shorthand cast only / bracket only expressions from L013 [#2729](https://github.com/sqlfluff/sqlfluff/pull/2729) [@OTooleMichael](https://github.com/OTooleMichael) * L052: Refactor `_eval()` into individual functions to improve readability [#2733](https://github.com/sqlfluff/sqlfluff/pull/2733) [@barrywhart](https://github.com/barrywhart) * L018: Move closing parenthesis to next line [#2734](https://github.com/sqlfluff/sqlfluff/pull/2734) [@barrywhart](https://github.com/barrywhart) * Improve rule yaml tests: assert that `fix_str` passes the rule [#2624](https://github.com/sqlfluff/sqlfluff/pull/2624) [@juhoautio](https://github.com/juhoautio) * Extend rule L051 to `LEFT`/`RIGHT`/`OUTER` `JOIN`s [#2719](https://github.com/sqlfluff/sqlfluff/pull/2719) [@rpr-ableton](https://github.com/rpr-ableton) * T-SQL: Allow aliases with `=` [#2727](https://github.com/sqlfluff/sqlfluff/pull/2727) [@fdw](https://github.com/fdw) * T-SQL: Support table variables [#2728](https://github.com/sqlfluff/sqlfluff/pull/2728) [@fdw](https://github.com/fdw) * Support for checking violations in YAML rule tests [#2718](https://github.com/sqlfluff/sqlfluff/pull/2718) [@juhoautio](https://github.com/juhoautio) * Roll back PR #2610 [#2726](https://github.com/sqlfluff/sqlfluff/pull/2726) [@barrywhart](https://github.com/barrywhart) * Redshift: Allow whitespace around cast operators [#2721](https://github.com/sqlfluff/sqlfluff/pull/2721) [@PLBMR](https://github.com/PLBMR) * Support database links in Oracle [#2725](https://github.com/sqlfluff/sqlfluff/pull/2725) [@tunetheweb](https://github.com/tunetheweb) * Rule L019: Ignore comma placement violations if the adjacent code is templated [#2717](https://github.com/sqlfluff/sqlfluff/pull/2717) [@barrywhart](https://github.com/barrywhart) * T-SQL: Add drop constraint syntax [#2724](https://github.com/sqlfluff/sqlfluff/pull/2724) [@fdw](https://github.com/fdw) * ANSI: Support optionally bracketed CTE [#2716](https://github.com/sqlfluff/sqlfluff/pull/2716) [@OTooleMichael](https://github.com/OTooleMichael) * Spark3: Test cases for `CASE` clause [#2714](https://github.com/sqlfluff/sqlfluff/pull/2714) [@R7L208](https://github.com/R7L208) * Spark3: Support for `WINDOW` functions [#2711](https://github.com/sqlfluff/sqlfluff/pull/2711) [@R7L208](https://github.com/R7L208) * T-SQL: Add variables as options for `RAISERROR` parameters [#2709](https://github.com/sqlfluff/sqlfluff/pull/2709) [@jpers36](https://github.com/jpers36) * T-SQL: Add `OPTION` clause to `UPDATE` [#2707](https://github.com/sqlfluff/sqlfluff/pull/2707) [@jpers36](https://github.com/jpers36) * Spark3: Test cases for `WHERE` clause [#2704](https://github.com/sqlfluff/sqlfluff/pull/2704) [@R7L208](https://github.com/R7L208) * Spark3: test cases for Table-Valued Functions [#2703](https://github.com/sqlfluff/sqlfluff/pull/2703) [@R7L208](https://github.com/R7L208) * T-SQL: Allow for optionally bracketed `PARTITION BY` elements [#2702](https://github.com/sqlfluff/sqlfluff/pull/2702) [@jpers36](https://github.com/jpers36) * T-SQL: Fix `SET TRANSACTION ISOLATION LEVEL` parsing [#2701](https://github.com/sqlfluff/sqlfluff/pull/2701) [@jpers36](https://github.com/jpers36) * Migrate tricky L004 tests to python [#2681](https://github.com/sqlfluff/sqlfluff/pull/2681) [@juhoautio](https://github.com/juhoautio) * Core linter enhancement: Check for successful parse after applying fixes [#2657](https://github.com/sqlfluff/sqlfluff/pull/2657) [@barrywhart](https://github.com/barrywhart) * Spark3: Support for `LATERAL VIEW` clause [#2687](https://github.com/sqlfluff/sqlfluff/pull/2687) [@R7L208](https://github.com/R7L208) * Document python requirement for tox/mypy & remove basepython from conf [#2644](https://github.com/sqlfluff/sqlfluff/pull/2644) [@juhoautio](https://github.com/juhoautio) * Fix rule L011 for implicit aliases [#2683](https://github.com/sqlfluff/sqlfluff/pull/2683) [@tunetheweb](https://github.com/tunetheweb) * Pin markupsafe to prevent CI failures [#2685](https://github.com/sqlfluff/sqlfluff/pull/2685) [@tunetheweb](https://github.com/tunetheweb) * Exasol: Allow `CROSS` joins [#2680](https://github.com/sqlfluff/sqlfluff/pull/2680) [@sti0](https://github.com/sti0) * Exasol: Improve function formatting [#2678](https://github.com/sqlfluff/sqlfluff/pull/2678) [@sti0](https://github.com/sti0) * T-SQL: Add indentation for `CREATE` `INDEX`/`STATISTICS` [#2679](https://github.com/sqlfluff/sqlfluff/pull/2679) [@jpers36](https://github.com/jpers36) * Spark3: Support for `TABLESAMPLE` clause [#2674](https://github.com/sqlfluff/sqlfluff/pull/2674) [@R7L208](https://github.com/R7L208) * T-SQL: Improve `RAISERROR` functionality [#2672](https://github.com/sqlfluff/sqlfluff/pull/2672) [@jpers36](https://github.com/jpers36) * Snowflake dialect update for `MERGE INTO` predicates [#2670](https://github.com/sqlfluff/sqlfluff/pull/2670) [@The-Loud](https://github.com/The-Loud) * Assert that fix_str is set [#2663](https://github.com/sqlfluff/sqlfluff/pull/2663) [@juhoautio](https://github.com/juhoautio) ## New Contributors * [@The-Loud](https://github.com/The-Loud) made their first contribution in [#2670](https://github.com/sqlfluff/sqlfluff/pull/2670) * [@OTooleMichael](https://github.com/OTooleMichael) made their first contribution in [#2716](https://github.com/sqlfluff/sqlfluff/pull/2716) * [@PhilippLange](https://github.com/PhilippLange) made their first contribution in [#2782](https://github.com/sqlfluff/sqlfluff/pull/2782) ## [0.10.1] - 2022-02-15 ## Highlights Major changes include: * Improvements to rules L023, L045, L048, L052, L059 to make them more accurate. * If `sqlfluff fix` cannot find a stable fix after `runaway_limit` iterations (default 10) then no fixes will be applied. * Addition of `--write-output` config to command line so prevent errors corrupting output. * Various dialect improvements ## What’s Changed * Redshift: Support DATETIME as a valid datatype [#2665](https://github.com/sqlfluff/sqlfluff/pull/2665) [@PLBMR](https://github.com/PLBMR) * Support L033 for RedShift [#2661](https://github.com/sqlfluff/sqlfluff/pull/2661) [@tunetheweb](https://github.com/tunetheweb) * Fix parsing types and add check to test in future [#2652](https://github.com/sqlfluff/sqlfluff/pull/2652) [@tunetheweb](https://github.com/tunetheweb) * Spark3: Support for `SORT BY` Clause [#2651](https://github.com/sqlfluff/sqlfluff/pull/2651) [@R7L208](https://github.com/R7L208) * Migrate issue template from markdown to yaml [#2626](https://github.com/sqlfluff/sqlfluff/pull/2626) [@zhongjiajie](https://github.com/zhongjiajie) * L048 - handle more statements and exclude casting operators [#2642](https://github.com/sqlfluff/sqlfluff/pull/2642) [@tunetheweb](https://github.com/tunetheweb) * MySQL support `CURRENT_TIMESTAMP()` in `CREATE TABLE` [#2648](https://github.com/sqlfluff/sqlfluff/pull/2648) [@tunetheweb](https://github.com/tunetheweb) * Postgres enhanced `DELETE FROM` syntax [#2643](https://github.com/sqlfluff/sqlfluff/pull/2643) [@tunetheweb](https://github.com/tunetheweb) * Bug fix: L025 should consider BigQuery `QUALIFY` clause [#2647](https://github.com/sqlfluff/sqlfluff/pull/2647) [@barrywhart](https://github.com/barrywhart) * Bug fix: L025 overlooking `JOIN ON` clause if join expression in parentheses [#2645](https://github.com/sqlfluff/sqlfluff/pull/2645) [@barrywhart](https://github.com/barrywhart) * L045 not reporting unused CTEs if the query uses templating [#2641](https://github.com/sqlfluff/sqlfluff/pull/2641) [@barrywhart](https://github.com/barrywhart) * Fix IndexError in L001 [#2640](https://github.com/sqlfluff/sqlfluff/pull/2640) [@barrywhart](https://github.com/barrywhart) * L052: If require_final_semicolon is set, ensure semicolon after ALL statements [#2610](https://github.com/sqlfluff/sqlfluff/pull/2610) [@barrywhart](https://github.com/barrywhart) * L023 to also fix extra newlines in CTE [#2623](https://github.com/sqlfluff/sqlfluff/pull/2623) [@juhoautio](https://github.com/juhoautio) * Spark3: Enhancements for Set Operators [#2622](https://github.com/sqlfluff/sqlfluff/pull/2622) [@R7L208](https://github.com/R7L208) * Doc a better choice for default env [#2630](https://github.com/sqlfluff/sqlfluff/pull/2630) [@juhoautio](https://github.com/juhoautio) * Ensure ordering of fix compatible and config in rules docs [#2620](https://github.com/sqlfluff/sqlfluff/pull/2620) [@zhongjiajie](https://github.com/zhongjiajie) * Pin python version for tox -e mypy [#2629](https://github.com/sqlfluff/sqlfluff/pull/2629) [@juhoautio](https://github.com/juhoautio) * Hitting the linter loop limit should be treated as an error [#2628](https://github.com/sqlfluff/sqlfluff/pull/2628) [@barrywhart](https://github.com/barrywhart) * Allow file output directly from cli [#2625](https://github.com/sqlfluff/sqlfluff/pull/2625) [@alanmcruickshank](https://github.com/alanmcruickshank) * BigQuery `UNPIVOT` and `PIVOT` fixes [#2619](https://github.com/sqlfluff/sqlfluff/pull/2619) [@tunetheweb](https://github.com/tunetheweb) * L059 quoted identifiers bug [#2614](https://github.com/sqlfluff/sqlfluff/pull/2614) [@tunetheweb](https://github.com/tunetheweb) * Snowflake dialect: Adjust snowflake array access [#2621](https://github.com/sqlfluff/sqlfluff/pull/2621) [@alanmcruickshank](https://github.com/alanmcruickshank) * Spark3: Test Cases for `ORDER BY` in `SELECT` [#2618](https://github.com/sqlfluff/sqlfluff/pull/2618) [@R7L208](https://github.com/R7L208) * Fix typos in 0.10.0 changelog [#2605](https://github.com/sqlfluff/sqlfluff/pull/2605) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Indent `IF` clause expression segments [#2615](https://github.com/sqlfluff/sqlfluff/pull/2615) [@jpers36](https://github.com/jpers36) * Spark3: Enhancements for `LIMIT` Clause [#2612](https://github.com/sqlfluff/sqlfluff/pull/2612) [@R7L208](https://github.com/R7L208) * Allow Bare Functions in column constraints [#2607](https://github.com/sqlfluff/sqlfluff/pull/2607) [@tunetheweb](https://github.com/tunetheweb) * Add Oracle at and double at sign (execution symbol) [#2608](https://github.com/sqlfluff/sqlfluff/pull/2608) [@r0fls](https://github.com/r0fls) * Spark3: Enhancements to `LIKE` clause [#2604](https://github.com/sqlfluff/sqlfluff/pull/2604) [@R7L208](https://github.com/R7L208) ## [0.10.0] - 2022-02-10 ## Highlights Major changes include: * Dropping support of DBT < 0.20 **BREAKING CHANGE** * `sqlfluff fix` no will no longer fix SQL containing parsing or templating errors **BREAKING CHANGE** * New rule L062 to allow blocking of list of configurable words (e.g. syntax, or schemas, or tables you do not want people to use) * Lots and lots of docs improvements * Looser requirements for `click` python package ## What’s Changed * L046: Detect Jinja spacing issues where segment begins with literal content [#2603](https://github.com/sqlfluff/sqlfluff/pull/2603) [@barrywhart](https://github.com/barrywhart) * MySQL Add BINARY support [#2602](https://github.com/sqlfluff/sqlfluff/pull/2602) [@tunetheweb](https://github.com/tunetheweb) * Support indenting WINDOWS clauses and (optionally) CTEs [#2601](https://github.com/sqlfluff/sqlfluff/pull/2601) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Support expressions in arrays [#2599](https://github.com/sqlfluff/sqlfluff/pull/2599) [@tunetheweb](https://github.com/tunetheweb) * BigQuery support Array of Structs [#2598](https://github.com/sqlfluff/sqlfluff/pull/2598) [@tunetheweb](https://github.com/tunetheweb) * Support wildcards in triggers [#2597](https://github.com/sqlfluff/sqlfluff/pull/2597) [@tunetheweb](https://github.com/tunetheweb) * Support CTEs in CREATE VIEW statements [#2596](https://github.com/sqlfluff/sqlfluff/pull/2596) [@tunetheweb](https://github.com/tunetheweb) * SQLite Support more CREATE TRIGGER options [#2594](https://github.com/sqlfluff/sqlfluff/pull/2594) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Support Column Comments in Alter Table statements [#2593](https://github.com/sqlfluff/sqlfluff/pull/2593) [@tunetheweb](https://github.com/tunetheweb) * Redshift: Add DATETIME as reserved keyword [#2591](https://github.com/sqlfluff/sqlfluff/pull/2591) [@tunetheweb](https://github.com/tunetheweb) * Support LIMIT and ORDER BY clauses in Values clauses [#2590](https://github.com/sqlfluff/sqlfluff/pull/2590) [@tunetheweb](https://github.com/tunetheweb) * L016: New option "ignore_comment_clause" to ignore column COMMENTs, etc. [#2589](https://github.com/sqlfluff/sqlfluff/pull/2589) [@barrywhart](https://github.com/barrywhart) * Bug fix: L016 ("Line is too long") should consider length of prior fixes [#2587](https://github.com/sqlfluff/sqlfluff/pull/2587) [@barrywhart](https://github.com/barrywhart) * Add mysql INSERT ON DUPLICATE KEY [#2494](https://github.com/sqlfluff/sqlfluff/pull/2494) [@rpr-ableton](https://github.com/rpr-ableton) * Snowflake ALTER TABLE: Add multiple columns [#2578](https://github.com/sqlfluff/sqlfluff/pull/2578) [@erevear](https://github.com/erevear) * MySQL: UNIQUE KEY in CREATE TABLE [#2525](https://github.com/sqlfluff/sqlfluff/pull/2525) [@jpy-git](https://github.com/jpy-git) * Spark3: JOIN clause enhancements [#2570](https://github.com/sqlfluff/sqlfluff/pull/2570) [@R7L208](https://github.com/R7L208) * Bug fix: L003 should fix indentation for templated code [#2580](https://github.com/sqlfluff/sqlfluff/pull/2580) [@barrywhart](https://github.com/barrywhart) * Exasol: Improve `COMMENT` and `WITH [NO] DATA` clause usage. [#2583](https://github.com/sqlfluff/sqlfluff/pull/2583) [@sti0](https://github.com/sti0) * Exasol: Allow multiple `LOCAL` keywords in `WHERE` clause [#2582](https://github.com/sqlfluff/sqlfluff/pull/2582) [@sti0](https://github.com/sti0) * Exasol: Allow `LOCAL` keyword within `PREFERRING` clause [#2579](https://github.com/sqlfluff/sqlfluff/pull/2579) [@sti0](https://github.com/sti0) * Add/Improve docs for config settings: "ignore", "ignore_templated_areas" [#2574](https://github.com/sqlfluff/sqlfluff/pull/2574) [@barrywhart](https://github.com/barrywhart) * Look for .sqlfluffignore in current directory [#2573](https://github.com/sqlfluff/sqlfluff/pull/2573) [@barrywhart](https://github.com/barrywhart) * Snowflake: L054 should ignore "WITHIN GROUP" clauses [#2571](https://github.com/sqlfluff/sqlfluff/pull/2571) [@barrywhart](https://github.com/barrywhart) * Redshift: Support Redshift SUPER Data Types [#2564](https://github.com/sqlfluff/sqlfluff/pull/2564) [@PLBMR](https://github.com/PLBMR) * Capitalization rules (L010, L014, L030, L040) should ignore templated code [#2566](https://github.com/sqlfluff/sqlfluff/pull/2566) [@barrywhart](https://github.com/barrywhart) * T-SQL: Add Frame clause unreserved keywords [#2562](https://github.com/sqlfluff/sqlfluff/pull/2562) [@jpers36](https://github.com/jpers36) * Simple API: Fix bug where omitted parameters still override .sqlfluff [#2563](https://github.com/sqlfluff/sqlfluff/pull/2563) [@barrywhart](https://github.com/barrywhart) * Spark3: Add Direct File Query [#2553](https://github.com/sqlfluff/sqlfluff/pull/2553) [@R7L208](https://github.com/R7L208) * Redshift dialect: replace AnyNumberOf with AnySetOf where it makes sense [#2561](https://github.com/sqlfluff/sqlfluff/pull/2561) [@rpr-ableton](https://github.com/rpr-ableton) * jinja and dbt templaters: More robust handling of whitespace control [#2559](https://github.com/sqlfluff/sqlfluff/pull/2559) [@barrywhart](https://github.com/barrywhart) * Improve how "sqlfluff fix" handles templating and parse errors [#2546](https://github.com/sqlfluff/sqlfluff/pull/2546) [@barrywhart](https://github.com/barrywhart) * Jinja and dbt templater: Fix "list index out of range" error [#2555](https://github.com/sqlfluff/sqlfluff/pull/2555) [@barrywhart](https://github.com/barrywhart) * Fix typo in sqlfluffignore docs [#2551](https://github.com/sqlfluff/sqlfluff/pull/2551) [@tunetheweb](https://github.com/tunetheweb) * Correct parsing for BigQuery `SELECT REPLACE` clauses. [#2550](https://github.com/sqlfluff/sqlfluff/pull/2550) [@elyobo](https://github.com/elyobo) * Rules documentation improvements [#2542](https://github.com/sqlfluff/sqlfluff/pull/2542) [@tunetheweb](https://github.com/tunetheweb) * Remove requirement for Click>=8 [#2547](https://github.com/sqlfluff/sqlfluff/pull/2547) [@tunetheweb](https://github.com/tunetheweb) * Allow L059 to be configured to always prefer quoted identifiers [#2537](https://github.com/sqlfluff/sqlfluff/pull/2537) [@niconoe-](https://github.com/niconoe-) * Adds new rule L062 to allow blocking of certain words [#2540](https://github.com/sqlfluff/sqlfluff/pull/2540) [@tunetheweb](https://github.com/tunetheweb) * Update to latest Black, drop support for dbt < 0.20 [#2536](https://github.com/sqlfluff/sqlfluff/pull/2536) [@barrywhart](https://github.com/barrywhart) * dbt templater: Fix bug where profile wasn't found if DBT_PROFILES_DIR contained uppercase letters [#2539](https://github.com/sqlfluff/sqlfluff/pull/2539) [@barrywhart](https://github.com/barrywhart) * Spark3: Added segments & grammar needed for hints [#2528](https://github.com/sqlfluff/sqlfluff/pull/2528) [@R7L208](https://github.com/R7L208) * Spark3: parse some VALUES clauses [#2245](https://github.com/sqlfluff/sqlfluff/pull/2245) [@mcannamela](https://github.com/mcannamela) * T-SQL: Allow multiple params in SET statements [#2535](https://github.com/sqlfluff/sqlfluff/pull/2535) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Add indentation for SET statement [#2531](https://github.com/sqlfluff/sqlfluff/pull/2531) [@jpers36](https://github.com/jpers36) * Add additional documentation on dbt-adapter in pre-commit [#2530](https://github.com/sqlfluff/sqlfluff/pull/2530) [@robertdefilippi](https://github.com/robertdefilippi) * T-SQL: Add indentation for UPDATE statement [#2532](https://github.com/sqlfluff/sqlfluff/pull/2532) [@jpers36](https://github.com/jpers36) * Fix Snowflake Unordered Select Clause [#2529](https://github.com/sqlfluff/sqlfluff/pull/2529) [@tunetheweb](https://github.com/tunetheweb) * Fix Quoted Literals for Postgres and Redshift affecting rule L039 [#2526](https://github.com/sqlfluff/sqlfluff/pull/2526) [@tunetheweb](https://github.com/tunetheweb) * Postgres specific CTEDefinitionSegment [#2524](https://github.com/sqlfluff/sqlfluff/pull/2524) [@jpy-git](https://github.com/jpy-git) ## New Contributors * [@robertdefilippi](https://github.com/robertdefilippi) made their first contribution in [#2530](https://github.com/sqlfluff/sqlfluff/pull/2530) * [@niconoe-](https://github.com/niconoe-) made their first contribution in [#2537](https://github.com/sqlfluff/sqlfluff/pull/2537) * [@elyobo](https://github.com/elyobo) made their first contribution in [#2550](https://github.com/sqlfluff/sqlfluff/pull/2550) * [@erevear](https://github.com/erevear) made their first contribution in [#2578](https://github.com/sqlfluff/sqlfluff/pull/2578) ## [0.9.4] - 2022-01-30 ## Highlights Major changes include: * dbt performance improvements * Fix `click` dependency error. * Better datepart versus identifier parsing. * Fix some Jinja errors. * Various grammar fixes and improvements ## What’s Changed * Spark3: test cases for HAVING clause in SELECT statement [#2518](https://github.com/sqlfluff/sqlfluff/pull/2517) [@R7L208](https://github.com/R7L208) * Update click version requirement in setup.cfg to match that in requirements.txt [#2518](https://github.com/sqlfluff/sqlfluff/pull/2518) [@barrywhart](https://github.com/barrywhart) * Postgres: Implement DO Statements + Refactored Language Clause [#2511](https://github.com/sqlfluff/sqlfluff/pull/2511) [@PLBMR](https://github.com/PLBMR) * Spark3: Support for Grouping Sets, `CUBE` and `ROLLUP` in `GROUP BY` clause of `SELECT` statement [#2505](https://github.com/sqlfluff/sqlfluff/pull/2505) [@R7L208](https://github.com/R7L208) * Refactor date part functions [#2510](https://github.com/sqlfluff/sqlfluff/pull/2510) [@tunetheweb](https://github.com/tunetheweb) * Postgres: EXPLAIN ANALYSE allows British spelling [#2507](https://github.com/sqlfluff/sqlfluff/pull/2507) [@jpy-git](https://github.com/jpy-git) * "noqa": Add support for ignoring template (TMP) and parse (PRS) errors [#2509](https://github.com/sqlfluff/sqlfluff/pull/2509) [@barrywhart](https://github.com/barrywhart) * Freeze Black due to incompatibility between 22.1 and flake8-black [#2513](https://github.com/sqlfluff/sqlfluff/pull/2513) [@tunetheweb](https://github.com/tunetheweb) * Support NATURAL JOINS [#2506](https://github.com/sqlfluff/sqlfluff/pull/2506) [@tunetheweb](https://github.com/tunetheweb) * dbt Docker environment: Mount the test profiles.yml at ~/.dbt [#2502](https://github.com/sqlfluff/sqlfluff/pull/2502) [@barrywhart](https://github.com/barrywhart) * Add dbt_artifacts package to in the wild docs [#2504](https://github.com/sqlfluff/sqlfluff/pull/2504) [@NiallRees](https://github.com/NiallRees) * Spark3: Support `DISTRIBUTE BY` clause in `SELECT` statement [#2503](https://github.com/sqlfluff/sqlfluff/pull/2503) [@R7L208](https://github.com/R7L208) * dbt templater: For performance reasons, cache the database connection across models [#2498](https://github.com/sqlfluff/sqlfluff/pull/2498) [@barrywhart](https://github.com/barrywhart) * Bug fix: Defining and using Jinja macro in the same file causes runtime error [#2499](https://github.com/sqlfluff/sqlfluff/pull/2499) [@barrywhart](https://github.com/barrywhart) * Spark3: Support `CLUSTER BY` clause in `SELECT` statement [#2491](https://github.com/sqlfluff/sqlfluff/pull/2491) [@R7L208](https://github.com/R7L208) * Grammar: Adds support for COPY statement for Postgres dialect [#2481](https://github.com/sqlfluff/sqlfluff/pull/2481) [@derickl](https://github.com/derickl) * Add raiserror for T-SQL [#2490](https://github.com/sqlfluff/sqlfluff/pull/2490) [@fdw](https://github.com/fdw) * Enforce parentheses for function definitions in T-SQL [#2489](https://github.com/sqlfluff/sqlfluff/pull/2489) [@fdw](https://github.com/fdw) * Add guards to prevent rule crashes [#2488](https://github.com/sqlfluff/sqlfluff/pull/2488) [@barrywhart](https://github.com/barrywhart) ## New Contributors * [@PLBMR](https://github.com/PLBMR) made their first contribution in [#2511](https://github.com/sqlfluff/sqlfluff/pull/2511) ## [0.9.3] - 2022-01-26 ## Highlights Major changes include: * Add `ignore_words` option for rules L010, L014, L029, L030, L040 * Fix some issues in 0.9.2 preventing some queries linting ## What’s Changed * Prevent L031 throwing exception on unparsable code [#2486](https://github.com/sqlfluff/sqlfluff/pull/2486) [@tunetheweb](https://github.com/tunetheweb) * Add linting of fixtures SQL for critical rules errors to tox [#2473](https://github.com/sqlfluff/sqlfluff/pull/2473) [@tunetheweb](https://github.com/tunetheweb) * Fix L039 for T-SQL comparison operator using space [#2485](https://github.com/sqlfluff/sqlfluff/pull/2485) [@tunetheweb](https://github.com/tunetheweb) * Fix bug in get_alias causing rule Critical errors for T-SQL [#2479](https://github.com/sqlfluff/sqlfluff/pull/2479) [@tunetheweb](https://github.com/tunetheweb) * Tweak GitHub templates [#2471](https://github.com/sqlfluff/sqlfluff/pull/2471) [@tunetheweb](https://github.com/tunetheweb) * Small speed improvement to L054 [#2476](https://github.com/sqlfluff/sqlfluff/pull/2476) [@tunetheweb](https://github.com/tunetheweb) * L003: Revisit recent change to improve speed [#2474](https://github.com/sqlfluff/sqlfluff/pull/2474) [@barrywhart](https://github.com/barrywhart) * Fix select_crawler issue with some Exasol statements [#2470](https://github.com/sqlfluff/sqlfluff/pull/2470) [@tunetheweb](https://github.com/tunetheweb) * Cleanup date logic by removing DatePartClause and using DatetimeUnitSegment instead [#2464](https://github.com/sqlfluff/sqlfluff/pull/2464) [@tunetheweb](https://github.com/tunetheweb) * Fix L044 exception when final statement has no SELECT [#2468](https://github.com/sqlfluff/sqlfluff/pull/2468) [@tunetheweb](https://github.com/tunetheweb) * Support T-SQL system variables (e.g. @@rowcount) [#2463](https://github.com/sqlfluff/sqlfluff/pull/2463) [@tunetheweb](https://github.com/tunetheweb) * Add base rule to developing rules page [#2462](https://github.com/sqlfluff/sqlfluff/pull/2462) [@tunetheweb](https://github.com/tunetheweb) * L003: Ignore indentation of lines that only exist in templated space [#2460](https://github.com/sqlfluff/sqlfluff/pull/2460) [@barrywhart](https://github.com/barrywhart) * Ignore words for various rules [#2459](https://github.com/sqlfluff/sqlfluff/pull/2459) [@tunetheweb](https://github.com/tunetheweb) * Support Foreign Key options for MySQL [#2461](https://github.com/sqlfluff/sqlfluff/pull/2461) [@tunetheweb](https://github.com/tunetheweb) * Exclude WINDOW clauses from L054 [#2455](https://github.com/sqlfluff/sqlfluff/pull/2455) [@tunetheweb](https://github.com/tunetheweb) * Fix bug with L026 for simple deletes [#2458](https://github.com/sqlfluff/sqlfluff/pull/2458) [@tunetheweb](https://github.com/tunetheweb) * Spark3: test cases for Common Table Expressions [#2454](https://github.com/sqlfluff/sqlfluff/pull/2454) [@R7L208](https://github.com/R7L208) * Fix T-SQL's IDENTITY_INSERT syntax [#2452](https://github.com/sqlfluff/sqlfluff/pull/2452) [@fdw](https://github.com/fdw) * T-SQL: Support stored procedures in insert statements [#2451](https://github.com/sqlfluff/sqlfluff/pull/2451) [@fdw](https://github.com/fdw) * Spark3: Support for `LOAD DATA` statements [#2450](https://github.com/sqlfluff/sqlfluff/pull/2450) [@R7L208](https://github.com/R7L208) ## [0.9.2] - 2022-01-24 ## Highlights We are pleased to include 110 improvements and fixes in this release, and welcome 7 new contributors to the code. Major changes include: * Initial Oracle support (note: SQL, but not PL/SQL) * Fix more dbt 1.0.0 connection issues * Improved configuration documentation * New rule (L059) to flag unnecessary quoted identifiers * New rule (L060) to prefer `COALESCE` instead of `IFNULL` or `NVL` * New rule (L061) to prefer `!=` over `<>` * Many rule fixes * Many dialect improvements ## What’s Changed * Add Postgres DROP PROCEDURE support [#2446](https://github.com/sqlfluff/sqlfluff/pull/2446) [@rpr-ableton](https://github.com/rpr-ableton) * MySQL Alter table ADD/DROP/RENAME INDEX support [#2443](https://github.com/sqlfluff/sqlfluff/pull/2443) [@tunetheweb](https://github.com/tunetheweb) * Add basic CREATE PROCEDURE support to Postgres [#2441](https://github.com/sqlfluff/sqlfluff/pull/2441) [@tunetheweb](https://github.com/tunetheweb) * Indent T-SQL DECLARE and EXEC statements [#2439](https://github.com/sqlfluff/sqlfluff/pull/2439) [@tunetheweb](https://github.com/tunetheweb) * Hive alternative types: INTEGER, DEC, NUMERIC [#2438](https://github.com/sqlfluff/sqlfluff/pull/2438) [@tunetheweb](https://github.com/tunetheweb) * Implement Snowflake Dateparts [#2437](https://github.com/sqlfluff/sqlfluff/pull/2437) [@tunetheweb](https://github.com/tunetheweb) * Fix rule L028 for T-SQL for params [#2442](https://github.com/sqlfluff/sqlfluff/pull/2442) [@tunetheweb](https://github.com/tunetheweb) * Support CREATE UNIQUE INDEX [#2440](https://github.com/sqlfluff/sqlfluff/pull/2440) [@tunetheweb](https://github.com/tunetheweb) * Make BigQuery typeless STRUCTs Expressions [#2435](https://github.com/sqlfluff/sqlfluff/pull/2435) [@tunetheweb](https://github.com/tunetheweb) * T-SQL support default params and no RETURN value [#2434](https://github.com/sqlfluff/sqlfluff/pull/2434) [@tunetheweb](https://github.com/tunetheweb) * "sqlfluff fix" should report any parse errors found [#2423](https://github.com/sqlfluff/sqlfluff/pull/2423) [@barrywhart](https://github.com/barrywhart) * Redshift VACUUM support [#2433](https://github.com/sqlfluff/sqlfluff/pull/2433) [@rpr-ableton](https://github.com/rpr-ableton) * Add Oracle PROMPT statement [#2413](https://github.com/sqlfluff/sqlfluff/pull/2413) [@r0fls](https://github.com/r0fls) * Spark3: Support for `INSERT OVERWRITE DIRECTORY` with Hive Format [#2389](https://github.com/sqlfluff/sqlfluff/pull/2389) [@R7L208](https://github.com/R7L208) * Exasol: Fix escaped identifiers [#2431](https://github.com/sqlfluff/sqlfluff/pull/2431) [@sti0](https://github.com/sti0) * Exasol: Fix `LOCAL.ALIAS` Syntax [#2430](https://github.com/sqlfluff/sqlfluff/pull/2430) [@sti0](https://github.com/sti0) * Exasol: Allow quoted identifier for various statements. [#2428](https://github.com/sqlfluff/sqlfluff/pull/2428) [@sti0](https://github.com/sti0) * Misc grammar improvements for Snowflake [#2421](https://github.com/sqlfluff/sqlfluff/pull/2421) [@chwiese](https://github.com/chwiese) * New rule L061 to use != over <> [#2409](https://github.com/sqlfluff/sqlfluff/pull/2409) [@sti0](https://github.com/sti0) * Correct TRANS to TRAN [#2425](https://github.com/sqlfluff/sqlfluff/pull/2425) [@fdw](https://github.com/fdw) * Remove the "heuristic" slicer, as it was replaced by JinjaTracer [#2422](https://github.com/sqlfluff/sqlfluff/pull/2422) [@barrywhart](https://github.com/barrywhart) * L060: More specific description [#2419](https://github.com/sqlfluff/sqlfluff/pull/2419) [@jpy-git](https://github.com/jpy-git) * Fix code formatting in Rule docs [#2418](https://github.com/sqlfluff/sqlfluff/pull/2418) [@tunetheweb](https://github.com/tunetheweb) * Allow UPDATE SET statements in RedShift [#2417](https://github.com/sqlfluff/sqlfluff/pull/2417) [@tunetheweb](https://github.com/tunetheweb) * Add Redshift cursor DECLARE, FETCH & CLOSE support [#2414](https://github.com/sqlfluff/sqlfluff/pull/2414) [@rpr-ableton](https://github.com/rpr-ableton) * Add Redshift ANALYZE COMPRESSION support [#2412](https://github.com/sqlfluff/sqlfluff/pull/2412) [@rpr-ableton](https://github.com/rpr-ableton) * ANSI Values statement fixes [#2404](https://github.com/sqlfluff/sqlfluff/pull/2404) [@jpy-git](https://github.com/jpy-git) * Exasol: Overhaul drop statements [#2407](https://github.com/sqlfluff/sqlfluff/pull/2407) [@sti0](https://github.com/sti0) * L044, L045: Handle Exasol VALUES clause [#2400](https://github.com/sqlfluff/sqlfluff/pull/2400) [@barrywhart](https://github.com/barrywhart) * L060: Use COALESCE instead of IFNULL or NVL. [#2405](https://github.com/sqlfluff/sqlfluff/pull/2405) [@jpy-git](https://github.com/jpy-git) * Postgres: Fix Values alias regression [#2401](https://github.com/sqlfluff/sqlfluff/pull/2401) [@jpy-git](https://github.com/jpy-git) * Align line length in Python code to 88 characters [#2264](https://github.com/sqlfluff/sqlfluff/pull/2264) [@chwiese](https://github.com/chwiese) * Jinja templater: Allow "load_macros_from_path" to be a comma-separated list of paths [#2387](https://github.com/sqlfluff/sqlfluff/pull/2387) [@barrywhart](https://github.com/barrywhart) * Add "TRANS" keyword for T-SQL [#2399](https://github.com/sqlfluff/sqlfluff/pull/2399) [@fdw](https://github.com/fdw) * Docstrings: Replace double backtics with single quote for lint results. [#2386](https://github.com/sqlfluff/sqlfluff/pull/2386) [@jpy-git](https://github.com/jpy-git) * Spark3: Support for `INSERT OVERWRITE DIRECTORY` statements [#2385](https://github.com/sqlfluff/sqlfluff/pull/2385) [@R7L208](https://github.com/R7L208) * Fix unnecessary white underline in doc site [#2383](https://github.com/sqlfluff/sqlfluff/pull/2383) [@tunetheweb](https://github.com/tunetheweb) * Rolls back some code cleanup that caused coverage report to show gaps [#2384](https://github.com/sqlfluff/sqlfluff/pull/2384) [@barrywhart](https://github.com/barrywhart) * Fix "connection already closed" issue with dbt 1.0 and dbt_utils [#2382](https://github.com/sqlfluff/sqlfluff/pull/2382) [@barrywhart](https://github.com/barrywhart) * Spark3: Support for `INSERT [TABLE]` data manipulation statements [#2290](https://github.com/sqlfluff/sqlfluff/pull/2290) [@R7L208](https://github.com/R7L208) * Comment out line in bug report template [#2378](https://github.com/sqlfluff/sqlfluff/pull/2378) [@jpy-git](https://github.com/jpy-git) * Postgres: EXPLAIN statement updates [#2374](https://github.com/sqlfluff/sqlfluff/pull/2374) [@jpy-git](https://github.com/jpy-git) * Make TABLE a non-reserved word in Postgres [#2377](https://github.com/sqlfluff/sqlfluff/pull/2377) [@tunetheweb](https://github.com/tunetheweb) * Snowflake COLUMN is not a reserved word [#2376](https://github.com/sqlfluff/sqlfluff/pull/2376) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Complete ASA Table Index Clause functionality [#2373](https://github.com/sqlfluff/sqlfluff/pull/2373) [@jpers36](https://github.com/jpers36) * Add support for Jinja import and include [#2355](https://github.com/sqlfluff/sqlfluff/pull/2355) [@barrywhart](https://github.com/barrywhart) * Add Redshift INTERVAL datatype support [#2366](https://github.com/sqlfluff/sqlfluff/pull/2366) [@rpr-ableton](https://github.com/rpr-ableton) * Whitespace concatenated string literals for MySQL, Postgres and Redshift [#2356](https://github.com/sqlfluff/sqlfluff/pull/2356) [@jpy-git](https://github.com/jpy-git) * Fix L026 false positive on "SELECT INTO" statement [#2371](https://github.com/sqlfluff/sqlfluff/pull/2371) [@barrywhart](https://github.com/barrywhart) * Exclude EMIT clauses from rule L013 [#2364](https://github.com/sqlfluff/sqlfluff/pull/2364) [@tunetheweb](https://github.com/tunetheweb) * Functional API: Segments.recursive_crawl [#2369](https://github.com/sqlfluff/sqlfluff/pull/2369) [@jpy-git](https://github.com/jpy-git) * Complete Redshift CREATE EXTERNAL TABLE support [#2354](https://github.com/sqlfluff/sqlfluff/pull/2354) [@rpr-ableton](https://github.com/rpr-ableton) * L041: Fix duplicate DISTINCT corruption [#2365](https://github.com/sqlfluff/sqlfluff/pull/2365) [@jpy-git](https://github.com/jpy-git) * Bigquery Create View with Options [#2359](https://github.com/sqlfluff/sqlfluff/pull/2359) [@tunetheweb](https://github.com/tunetheweb) * L026: Handle DML statements and multiple levels of nesting [#2336](https://github.com/sqlfluff/sqlfluff/pull/2336) [@barrywhart](https://github.com/barrywhart) * Postgres & MySQL: cleanup AliasExpressionSegment [#2353](https://github.com/sqlfluff/sqlfluff/pull/2353) [@jpy-git](https://github.com/jpy-git) * Redefine MySQL Interval segment [#2351](https://github.com/sqlfluff/sqlfluff/pull/2351) [@rpr-ableton](https://github.com/rpr-ableton) * Postgres: INSERT INTO table alias [#2349](https://github.com/sqlfluff/sqlfluff/pull/2349) [@jpy-git](https://github.com/jpy-git) * L043: Remove redundant CASE statement replacing NULLS with NULLS [#2346](https://github.com/sqlfluff/sqlfluff/pull/2346) [@jpy-git](https://github.com/jpy-git) * Add RedShift DATASHARE support [#2350](https://github.com/sqlfluff/sqlfluff/pull/2350) [@rpr-ableton](https://github.com/rpr-ableton) * Various documentation updates [#2347](https://github.com/sqlfluff/sqlfluff/pull/2347) [@tunetheweb](https://github.com/tunetheweb) * Snowflake ALTER TABLE: Drop multiple columns [#2348](https://github.com/sqlfluff/sqlfluff/pull/2348) [@jpy-git](https://github.com/jpy-git) * Configuration doc: add rule configuration section [#2291](https://github.com/sqlfluff/sqlfluff/pull/2291) [@juhoautio](https://github.com/juhoautio) * Redshift: create model, show model & data types [#2338](https://github.com/sqlfluff/sqlfluff/pull/2338) [@rpr-ableton](https://github.com/rpr-ableton) * L059: Unnecessary quoted identifier [#2341](https://github.com/sqlfluff/sqlfluff/pull/2341) [@jpy-git](https://github.com/jpy-git) * L043: Use simple replace to apply fixes [#2343](https://github.com/sqlfluff/sqlfluff/pull/2343) [@jpy-git](https://github.com/jpy-git) * T-SQL: Add functionality to PARTITION BY clause [#2335](https://github.com/sqlfluff/sqlfluff/pull/2335) [@jpers36](https://github.com/jpers36) * L039 casting operator postgres fix [#2334](https://github.com/sqlfluff/sqlfluff/pull/2334) [@jpy-git](https://github.com/jpy-git) * `AnySetOf` grammar [#2326](https://github.com/sqlfluff/sqlfluff/pull/2326) [@jpy-git](https://github.com/jpy-git) * Redshift: update CREATE TABLE AS match_grammar [#2333](https://github.com/sqlfluff/sqlfluff/pull/2333) [@rpr-ableton](https://github.com/rpr-ableton) * Redshift CREATE EXTERNAL TABLE: TABLE PROPERTIES [#2330](https://github.com/sqlfluff/sqlfluff/pull/2330) [@jpy-git](https://github.com/jpy-git) * Snowflake: Flush out `ALTER TABLE`'s `tableColumnAction` grammar [#2332](https://github.com/sqlfluff/sqlfluff/pull/2332) [@wong-codaio](https://github.com/wong-codaio) * Snowflake ALTER TABLE: Add clusteringAction [#2329](https://github.com/sqlfluff/sqlfluff/pull/2329) [@jpy-git](https://github.com/jpy-git) * Snowflake ALTER TABLE: Add searchOptimizationAction [#2328](https://github.com/sqlfluff/sqlfluff/pull/2328) [@jpy-git](https://github.com/jpy-git) * Fix numeric literal grammar for Postgres/MySQL/Exasol [#2324](https://github.com/sqlfluff/sqlfluff/pull/2324) [@jpy-git](https://github.com/jpy-git) * L039: Remove spaces between comparison operators (T-SQL) [#2325](https://github.com/sqlfluff/sqlfluff/pull/2325) [@jpy-git](https://github.com/jpy-git) * Enable setting a target of a dbt profile [#2236](https://github.com/sqlfluff/sqlfluff/pull/2236) [@yu-iskw](https://github.com/yu-iskw) * Snowflake: Add support for column rename [#2327](https://github.com/sqlfluff/sqlfluff/pull/2327) [@wong-codaio](https://github.com/wong-codaio) * Snowflake: Added `AlterTableStatement` specific for Snowflake [#2267](https://github.com/sqlfluff/sqlfluff/pull/2267) [@wong-codaio](https://github.com/wong-codaio) * Full REFERENCES grammar for CREATE TABLE statement [#2315](https://github.com/sqlfluff/sqlfluff/pull/2315) [@jpy-git](https://github.com/jpy-git) * Fix Spark numeric literals [#2317](https://github.com/sqlfluff/sqlfluff/pull/2317) [@jpy-git](https://github.com/jpy-git) * Change type of Snowflake stage paths to fix issues with L044 [#2320](https://github.com/sqlfluff/sqlfluff/pull/2320) [@chwiese](https://github.com/chwiese) * Add Bytes Quoted Literals to Spark dialect [#2312](https://github.com/sqlfluff/sqlfluff/pull/2312) [@jpy-git](https://github.com/jpy-git) * Fix L044 assertion failure with delete stmt & cte [#2321](https://github.com/sqlfluff/sqlfluff/pull/2321) [@barrywhart](https://github.com/barrywhart) * L003 should consider only *literal* leading whitespace (ignore templated) [#2304](https://github.com/sqlfluff/sqlfluff/pull/2304) [@barrywhart](https://github.com/barrywhart) * Redshift: update reserved keywords [#2318](https://github.com/sqlfluff/sqlfluff/pull/2318) [@rpr-ableton](https://github.com/rpr-ableton) * docs: Document how to run SQLFluff with local changes to test them [#2316](https://github.com/sqlfluff/sqlfluff/pull/2316) [@kayman-mk](https://github.com/kayman-mk) * Update redshift unreserved keywords [#2310](https://github.com/sqlfluff/sqlfluff/pull/2310) [@jpy-git](https://github.com/jpy-git) * Fix spark and hive quoted literals [#2311](https://github.com/sqlfluff/sqlfluff/pull/2311) [@jpy-git](https://github.com/jpy-git) * Oracle Dialect [#2293](https://github.com/sqlfluff/sqlfluff/pull/2293) [@r0fls](https://github.com/r0fls) * Redshift dialect: add COPY and UNLOAD statements [#2307](https://github.com/sqlfluff/sqlfluff/pull/2307) [@rpr-ableton](https://github.com/rpr-ableton) * L052: Fix case where no preceding segments and mulitline [#2279](https://github.com/sqlfluff/sqlfluff/pull/2279) [@jpy-git](https://github.com/jpy-git) * Update rule L049 to handle EXEC assignments [#2308](https://github.com/sqlfluff/sqlfluff/pull/2308) [@tunetheweb](https://github.com/tunetheweb) * Remove DATE, DATETIME and TIME from BigQuery DatePart [#2283](https://github.com/sqlfluff/sqlfluff/pull/2283) [@tunetheweb](https://github.com/tunetheweb) * Fix #1292: nocolor and verbose can work in config files [#2300](https://github.com/sqlfluff/sqlfluff/pull/2300) [@cympfh](https://github.com/cympfh) * Allow pyproject.toml as extra_config_path [#2305](https://github.com/sqlfluff/sqlfluff/pull/2305) [@jpy-git](https://github.com/jpy-git) * L009: Handle adding newline after trailing templated code [#2298](https://github.com/sqlfluff/sqlfluff/pull/2298) [@barrywhart](https://github.com/barrywhart) * added missing "t" in doc for Rule_L020 [#2294](https://github.com/sqlfluff/sqlfluff/pull/2294) [@Xilorole](https://github.com/Xilorole) * docs: Document configuration keyword for rule L054 [#2288](https://github.com/sqlfluff/sqlfluff/pull/2288) [@tomasfarias](https://github.com/tomasfarias) * Update L009 to operate in raw, not templated space [#2285](https://github.com/sqlfluff/sqlfluff/pull/2285) [@barrywhart](https://github.com/barrywhart) * Redshift CREATE LIBRARY statements [#2277](https://github.com/sqlfluff/sqlfluff/pull/2277) [@rpr-ableton](https://github.com/rpr-ableton) * L025 with 'bigquery' dialect: Correctly interpret calling functions with a table as a parameter [#2278](https://github.com/sqlfluff/sqlfluff/pull/2278) [@barrywhart](https://github.com/barrywhart) * Spark3: Coverage for `REFRESH` auxiliary statements [#2282](https://github.com/sqlfluff/sqlfluff/pull/2282) [@R7L208](https://github.com/R7L208) * Spark3: Coverage for `USE DATABASE` statement. [#2276](https://github.com/sqlfluff/sqlfluff/pull/2276) [@R7L208](https://github.com/R7L208) * Fix link for editing 'In The Wild' page with new base branch, `main` [#2280](https://github.com/sqlfluff/sqlfluff/pull/2280) [@barnett](https://github.com/barnett) * Optionally allow additional configurable characters in L057 [#2274](https://github.com/sqlfluff/sqlfluff/pull/2274) [@tunetheweb](https://github.com/tunetheweb) * L025 should look at subqueries [#2273](https://github.com/sqlfluff/sqlfluff/pull/2273) [@barrywhart](https://github.com/barrywhart) * Add coverage for `TRUNCATE` statement in Spark3 dialect [#2272](https://github.com/sqlfluff/sqlfluff/pull/2272) [@R7L208](https://github.com/R7L208) * Upgrade `click` version to 8.0+ to support `click.shell_completion` [#2271](https://github.com/sqlfluff/sqlfluff/pull/2271) [@wong-codaio](https://github.com/wong-codaio) * Improve release checklist to make releases easier [#2263](https://github.com/sqlfluff/sqlfluff/pull/2263) [@tunetheweb](https://github.com/tunetheweb) ## New Contributors * [@barnett](https://github.com/barnett) made their first contribution in [#2280](https://github.com/sqlfluff/sqlfluff/pull/2280) * [@tomasfarias](https://github.com/tomasfarias) made their first contribution in [#2288](https://github.com/sqlfluff/sqlfluff/pull/2288) * [@Xilorole](https://github.com/Xilorole) made their first contribution in [#2294](https://github.com/sqlfluff/sqlfluff/pull/2294) * [@cympfh](https://github.com/cympfh) made their first contribution in [#2300](https://github.com/sqlfluff/sqlfluff/pull/2300) * [@r0fls](https://github.com/r0fls) made their first contribution in [#2293](https://github.com/sqlfluff/sqlfluff/pull/2293) * [@yu-iskw](https://github.com/yu-iskw) made their first contribution in [#2236](https://github.com/sqlfluff/sqlfluff/pull/2236) * [@fdw](https://github.com/fdw) made their first contribution in [#2399](https://github.com/sqlfluff/sqlfluff/pull/2399) ## [0.9.1] - 2022-01-08 ## Highlights * Fix dbt 1.0.0 connection issue * Fix some SQL corruption issues with templated code * New components to simplify creating rules * Remove support for Python 3.6 ## What’s Changed * Fix delimited identifier parsing for spark3 [#2111](https://github.com/sqlfluff/sqlfluff/pull/2111) [@mcannamela](https://github.com/mcannamela) * Stop numeric literal from splitting valid naked identifiers. [#2114](https://github.com/sqlfluff/sqlfluff/pull/2114) [@jpy-git](https://github.com/jpy-git) * Grammar: Add CREATE USER/GROUP statement to Redshift dialect [#2115](https://github.com/sqlfluff/sqlfluff/pull/2115) [@jpy-git](https://github.com/jpy-git) * Fix mypy type raise in L003 [#2127](https://github.com/sqlfluff/sqlfluff/pull/2127) [@barrywhart](https://github.com/barrywhart) * Add ability to parse multiple GO/semicolon delimiters [#2124](https://github.com/sqlfluff/sqlfluff/pull/2124) [@jpy-git](https://github.com/jpy-git) * Allowed array/struct values in `default` definition of `declare` [#2120](https://github.com/sqlfluff/sqlfluff/pull/2120) [@KulykDmytro](https://github.com/KulykDmytro) * Normalise input newlines [#2128](https://github.com/sqlfluff/sqlfluff/pull/2128) [@jpy-git](https://github.com/jpy-git) * Clean up all files using the pre-commit hook [#2123](https://github.com/sqlfluff/sqlfluff/pull/2123) [@kayman-mk](https://github.com/kayman-mk) * Refined LintFix API [#2133](https://github.com/sqlfluff/sqlfluff/pull/2133) [@jpy-git](https://github.com/jpy-git) * Hotfix for LintFix comparisons [#2138](https://github.com/sqlfluff/sqlfluff/pull/2138) [@jpy-git](https://github.com/jpy-git) * Lint spaces in qualified names [#2130](https://github.com/sqlfluff/sqlfluff/pull/2130) [@jpers36](https://github.com/jpers36) * Remove support for Python 3.6 (it's "end of life" December 23, 2021) [#2141](https://github.com/sqlfluff/sqlfluff/pull/2141) [@barrywhart](https://github.com/barrywhart) * Fully remove python3.6 references [#2142](https://github.com/sqlfluff/sqlfluff/pull/2142) [@jpy-git](https://github.com/jpy-git) * Fix L022 to not flag CTE column definitions [#2139](https://github.com/sqlfluff/sqlfluff/pull/2139) [@jpy-git](https://github.com/jpy-git) * docs: set `dbt_modules` to `dbt_packages` [#2143](https://github.com/sqlfluff/sqlfluff/pull/2143) [@ciklista](https://github.com/ciklista) * Hive: add INTERVAL syntax [#2144](https://github.com/sqlfluff/sqlfluff/pull/2144) [@juhoautio](https://github.com/juhoautio) * Fix mypy error on python 3.7 [#2147](https://github.com/sqlfluff/sqlfluff/pull/2147) [@juhoautio](https://github.com/juhoautio) * Update PR template to reference tox generate-fixture-yml command [#2148](https://github.com/sqlfluff/sqlfluff/pull/2148) [@jpy-git](https://github.com/jpy-git) * Update index.rst notable changes with 0.9.0 details [#2132](https://github.com/sqlfluff/sqlfluff/pull/2132) [@jpy-git](https://github.com/jpy-git) * Add ALTER USER and ALTER GROUP to redshift dialect [#2131](https://github.com/sqlfluff/sqlfluff/pull/2131) [@jpy-git](https://github.com/jpy-git) * Add complete DESCRIBE grammar to Snowflake dialect [#2149](https://github.com/sqlfluff/sqlfluff/pull/2149) [@jpy-git](https://github.com/jpy-git) * Fix bug with BigQuery UNPIVOT [#2156](https://github.com/sqlfluff/sqlfluff/pull/2156) [@tunetheweb](https://github.com/tunetheweb) * Make L057 compatible with BigQuery [#2151](https://github.com/sqlfluff/sqlfluff/pull/2151) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Proper Indentation of ELSE IF [#2157](https://github.com/sqlfluff/sqlfluff/pull/2157) [@jpers36](https://github.com/jpers36) * Linter Test Name Duplication [#2158](https://github.com/sqlfluff/sqlfluff/pull/2158) [@jpers36](https://github.com/jpers36) * Add struct support for `hive` and `redshift` (L026, L028) [#2154](https://github.com/sqlfluff/sqlfluff/pull/2154) [@KulykDmytro](https://github.com/KulykDmytro) * Postgres - Support functions prepended with _ and containing $ [#2159](https://github.com/sqlfluff/sqlfluff/pull/2159) [@jpy-git](https://github.com/jpy-git) * T-SQL: function parsing/linting [#2155](https://github.com/sqlfluff/sqlfluff/pull/2155) [@jpers36](https://github.com/jpers36) * T-SQL: Add THROW statement [#2163](https://github.com/sqlfluff/sqlfluff/pull/2163) [@jpers36](https://github.com/jpers36) * Add yamllint to project [#2162](https://github.com/sqlfluff/sqlfluff/pull/2162) [@tunetheweb](https://github.com/tunetheweb) * Fix outdated docstring in dialects_test [#2166](https://github.com/sqlfluff/sqlfluff/pull/2166) [@juhoautio](https://github.com/juhoautio) * Minor comment fixes [#2179](https://github.com/sqlfluff/sqlfluff/pull/2179) [@juhoautio](https://github.com/juhoautio) * L010 to apply to date_part (capitalization policy for time units) [#2167](https://github.com/sqlfluff/sqlfluff/pull/2167) [@juhoautio](https://github.com/juhoautio) * ALTER GROUP fix to accommodate quoted objects [#2188](https://github.com/sqlfluff/sqlfluff/pull/2188) [@tdstark](https://github.com/tdstark) * Lexer: add non-breaking spaces to whitespace [#2189](https://github.com/sqlfluff/sqlfluff/pull/2189) [@jpers36](https://github.com/jpers36) * Grammar: Add COMMENT statement to Snowflake [#2173](https://github.com/sqlfluff/sqlfluff/pull/2173) [@jpy-git](https://github.com/jpy-git) * Grammar: Add DISCARD statement to Postgres dialect [#2175](https://github.com/sqlfluff/sqlfluff/pull/2175) [@jpy-git](https://github.com/jpy-git) * Grammar: Add UNDROP statement to Snowflake dialect [#2177](https://github.com/sqlfluff/sqlfluff/pull/2177) [@jpy-git](https://github.com/jpy-git) * Grammar: Add UNSET statement to Snowflake dialect [#2181](https://github.com/sqlfluff/sqlfluff/pull/2181) [@jpy-git](https://github.com/jpy-git) * Grammar: Add RESET statement to Postgres dialect [#2182](https://github.com/sqlfluff/sqlfluff/pull/2182) [@jpy-git](https://github.com/jpy-git) * Grammar: Add LOAD statement to Postgres dialect [#2183](https://github.com/sqlfluff/sqlfluff/pull/2183) [@jpy-git](https://github.com/jpy-git) * Grammar: Fix TRUNCATE statement in Snowflake dialect [#2184](https://github.com/sqlfluff/sqlfluff/pull/2184) [@jpy-git](https://github.com/jpy-git) * Grammar: Add HELP statement to MySQL dialect [#2191](https://github.com/sqlfluff/sqlfluff/pull/2191) [@jpy-git](https://github.com/jpy-git) * Grammar: Add PURGE BINARY LOGS statement to MySQL dialect [#2193](https://github.com/sqlfluff/sqlfluff/pull/2193) [@jpy-git](https://github.com/jpy-git) * Grammar: Add RESET MASTER statement to MySQL dialect [#2194](https://github.com/sqlfluff/sqlfluff/pull/2194) [@jpy-git](https://github.com/jpy-git) * Grammar: Add RENAME TABLE statement to MySQL dialect [#2195](https://github.com/sqlfluff/sqlfluff/pull/2195) [@jpy-git](https://github.com/jpy-git) * Grammar: Tidy up transaction statements in Snowflake dialect [#2196](https://github.com/sqlfluff/sqlfluff/pull/2196) [@jpy-git](https://github.com/jpy-git) * Modifying Redshift USER/GROUP Statements To Use `ObjectReferenceSegment` [#2190](https://github.com/sqlfluff/sqlfluff/pull/2190) [@tdstark](https://github.com/tdstark) * Grammar: Fix TRUNCATE statement in Postgres dialect [#2185](https://github.com/sqlfluff/sqlfluff/pull/2185) [@jpy-git](https://github.com/jpy-git) * Grammar: Add LISTEN, NOTIFY, and UNLISTEN statements to Postgres dialect [#2174](https://github.com/sqlfluff/sqlfluff/pull/2174) [@jpy-git](https://github.com/jpy-git) * Grammar: Tidy up Snowflake/MySQL/HIVE USE statements [#2187](https://github.com/sqlfluff/sqlfluff/pull/2187) [@jpy-git](https://github.com/jpy-git) * Make Snowflake keywords unreserved: account, organization, pivot [#2172](https://github.com/sqlfluff/sqlfluff/pull/2172) [@jpy-git](https://github.com/jpy-git) * Grammar: Add SET sql_log_bin statement to MySQL dialect [#2192](https://github.com/sqlfluff/sqlfluff/pull/2192) [@jpy-git](https://github.com/jpy-git) * Grammar: Add CALL statement to Snowflake dialect [#2176](https://github.com/sqlfluff/sqlfluff/pull/2176) [@jpy-git](https://github.com/jpy-git) * L027 Fix self referring column alias edge case [#2171](https://github.com/sqlfluff/sqlfluff/pull/2171) [@jpy-git](https://github.com/jpy-git) * T-SQL: Remove dependency on ANSI keyword lists [#2170](https://github.com/sqlfluff/sqlfluff/pull/2170) [@jpers36](https://github.com/jpers36) * Grammar: Add Table Maintenance Statements to MySQL dialect [#2198](https://github.com/sqlfluff/sqlfluff/pull/2198) [@jpy-git](https://github.com/jpy-git) * Adding CREATE TABLE AS to Redshift [#2205](https://github.com/sqlfluff/sqlfluff/pull/2205) [@tdstark](https://github.com/tdstark) * T-SQL: Add support for ALTER TABLE ALTER COLUMN [#2208](https://github.com/sqlfluff/sqlfluff/pull/2208) [@jpers36](https://github.com/jpers36) * Remove oyaml in favour of pyyaml [#2210](https://github.com/sqlfluff/sqlfluff/pull/2210) [@jpy-git](https://github.com/jpy-git) * Support Spark `CREATE TABLE LIKE` syntax [#2207](https://github.com/sqlfluff/sqlfluff/pull/2207) [@R7L208](https://github.com/R7L208) * Add override for linguist to include SQL in language statistics [#2214](https://github.com/sqlfluff/sqlfluff/pull/2214) [@jpy-git](https://github.com/jpy-git) * Add type stubs for appdirs and colorama to improve mypy coverage [#2211](https://github.com/sqlfluff/sqlfluff/pull/2211) [@jpy-git](https://github.com/jpy-git) * Remove cached-property in favour of stdlib functools implementation [#2212](https://github.com/sqlfluff/sqlfluff/pull/2212) [@jpy-git](https://github.com/jpy-git) * Restructure CASE segment (extract WHEN and ELSE into their own segment types) [#2213](https://github.com/sqlfluff/sqlfluff/pull/2213) [@barrywhart](https://github.com/barrywhart) * Add types-regex package for type checking [#2216](https://github.com/sqlfluff/sqlfluff/pull/2216) [@jpy-git](https://github.com/jpy-git) * Snowflake: Split out `CREATE VIEW` into its own segment [#2217](https://github.com/sqlfluff/sqlfluff/pull/2217) [@wong-codaio](https://github.com/wong-codaio) * Grammar: Fix multi-character comparison operators [#2197](https://github.com/sqlfluff/sqlfluff/pull/2197) [@jpy-git](https://github.com/jpy-git) * Snowflake: Support TOP N select clause modifier [#2222](https://github.com/sqlfluff/sqlfluff/pull/2222) [@wong-codaio](https://github.com/wong-codaio) * Fix CLI arguments to allow for autocompletion [#2218](https://github.com/sqlfluff/sqlfluff/pull/2218) [@jpy-git](https://github.com/jpy-git) * Simplify rule creation by adding a functional API to RuleContext [#2126](https://github.com/sqlfluff/sqlfluff/pull/2126) [@barrywhart](https://github.com/barrywhart) * Simplify nested cases [#2223](https://github.com/sqlfluff/sqlfluff/pull/2223) [@barrywhart](https://github.com/barrywhart) * Reword lint message for L058 per review [#2226](https://github.com/sqlfluff/sqlfluff/pull/2226) [@barrywhart](https://github.com/barrywhart) * Update BaseRule.discard_unsafe_fixes() to avoid touching templated code [#2220](https://github.com/sqlfluff/sqlfluff/pull/2220) [@barrywhart](https://github.com/barrywhart) * Add L059 - Capitalization on Data Types [#2227](https://github.com/sqlfluff/sqlfluff/pull/2227) [@tdstark](https://github.com/tdstark) * T-SQL: Table valued functions [#2233](https://github.com/sqlfluff/sqlfluff/pull/2233) [@jpers36](https://github.com/jpers36) * Don't allow fixes to COPY code from templated regions [#2231](https://github.com/sqlfluff/sqlfluff/pull/2231) [@barrywhart](https://github.com/barrywhart) * Fix several small issues with rule docs [#2234](https://github.com/sqlfluff/sqlfluff/pull/2234) [@barrywhart](https://github.com/barrywhart) * postgres: Add datatypes [#2121](https://github.com/sqlfluff/sqlfluff/pull/2121) [@kayman-mk](https://github.com/kayman-mk) * Combine L059 and L010 [#2238](https://github.com/sqlfluff/sqlfluff/pull/2238) [@tdstark](https://github.com/tdstark) * Fix L044 assertion failure: "SELECT *" with no "FROM" clause [#2239](https://github.com/sqlfluff/sqlfluff/pull/2239) [@barrywhart](https://github.com/barrywhart) * Docs: Make Specific Rules docstring more user friendly [#2241](https://github.com/sqlfluff/sqlfluff/pull/2241) [@jpy-git](https://github.com/jpy-git) * Fix a bug handling Jinja "{% set %}" blocks with a templated block inside [#2240](https://github.com/sqlfluff/sqlfluff/pull/2240) [@barrywhart](https://github.com/barrywhart) * Redshift lint create external table statements [#2229](https://github.com/sqlfluff/sqlfluff/pull/2229) [@tinder-albertyue](https://github.com/tinder-albertyue) * Update tox.ini for best practices [#2243](https://github.com/sqlfluff/sqlfluff/pull/2243) [@jpy-git](https://github.com/jpy-git) * Docs: Make code blocks consistent [#2242](https://github.com/sqlfluff/sqlfluff/pull/2242) [@jpy-git](https://github.com/jpy-git) * Add support for nested Jinja macros [#2246](https://github.com/sqlfluff/sqlfluff/pull/2246) [@barrywhart](https://github.com/barrywhart) * Support `DROP` DDL statements for Spark3 [#2215](https://github.com/sqlfluff/sqlfluff/pull/2215) [@R7L208](https://github.com/R7L208) * Docker Compose environment for SQLFluff developers [#2254](https://github.com/sqlfluff/sqlfluff/pull/2254) [@barrywhart](https://github.com/barrywhart) * T-SQL: Add OFFSET unreserved keyword [#2258](https://github.com/sqlfluff/sqlfluff/pull/2258) [@jpers36](https://github.com/jpers36) * Fix connection issue in dbt 1.0.0 [#2230](https://github.com/sqlfluff/sqlfluff/pull/2230) [@NiallRees](https://github.com/NiallRees) * Redshift CREATE SCHEMA statements [#2252](https://github.com/sqlfluff/sqlfluff/pull/2252) [@rpr-ableton](https://github.com/rpr-ableton) * Enhance Snowflake COPY INTO [#2250](https://github.com/sqlfluff/sqlfluff/pull/2250) [@chwiese](https://github.com/chwiese) * Coverage for 'REPAIR' Statements for Hive & Spark3 dialect [#2256](https://github.com/sqlfluff/sqlfluff/pull/2256) [@R7L208](https://github.com/R7L208) ## New Contributors * [@mcannamela](https://github.com/mcannamela) made their first contribution in [#2111](https://github.com/sqlfluff/sqlfluff/pull/2111) * [@ciklista](https://github.com/ciklista) made their first contribution in [#2143](https://github.com/sqlfluff/sqlfluff/pull/2143) * [@juhoautio](https://github.com/juhoautio) made their first contribution in [#2144](https://github.com/sqlfluff/sqlfluff/pull/2144) * [@tinder-albertyue](https://github.com/tinder-albertyue) made their first contribution in [#2229](https://github.com/sqlfluff/sqlfluff/pull/2229) * [@rpr-ableton](https://github.com/rpr-ableton) made their first contribution in [#2252](https://github.com/sqlfluff/sqlfluff/pull/2252) ## [0.9.0] - 2021-12-13 ## What’s Changed This release brings about several great new additions including: - dbt 1.0.0 compatibility. - CLI and Simple API parameters to provide custom paths to config files. - Refinement to Simple API to return parse output in JSON format rather than as an internal SQLFluff object (**BREAKING CHANGE**). - An [Official SQLFluff Docker Image](https://hub.docker.com/r/sqlfluff/sqlfluff). - Grammar improvements across various dialects. - A new rule (L057) to check for non-alphanumeric values in identifiers. There have also been many bug fixes and improvements to the CI and development processes. ## 🚀 Enhancements * T-SQL: Reserved Keyword cleanup [#2100](https://github.com/sqlfluff/sqlfluff/pull/2100) [@jpers36](https://github.com/jpers36) * Add wiki links to CONTRIBUTING.md [#2106](https://github.com/sqlfluff/sqlfluff/pull/2106) [@tunetheweb](https://github.com/tunetheweb) * Add snowflake create stage and alter stage statements + RegexParser case fix [#2098](https://github.com/sqlfluff/sqlfluff/pull/2098) [@chwiese](https://github.com/chwiese) * Allow for more value types in ALTER TABLE ALTER COLUMN SET DEFAULT statement [#2101](https://github.com/sqlfluff/sqlfluff/pull/2101) [@derickl](https://github.com/derickl) * Grammar: Adds support for ALTER VIEW statement for Postgres dialect [#2096](https://github.com/sqlfluff/sqlfluff/pull/2096) [@derickl](https://github.com/derickl) * Add example for using JSON output of Simple API parse function [#2099](https://github.com/sqlfluff/sqlfluff/pull/2099) [@jpy-git](https://github.com/jpy-git) * Allow optional keywords in create table unique constraints [#2077](https://github.com/sqlfluff/sqlfluff/pull/2077) [@kayman-mk](https://github.com/kayman-mk) * Grammar: Adds support for ALTER FUNCTION statement for Postgres dialect [#2090](https://github.com/sqlfluff/sqlfluff/pull/2090) [@derickl](https://github.com/derickl) * Grammar: adds support for CREATE/ALTER/DROP DATABASE for Postgres dialect [#2081](https://github.com/sqlfluff/sqlfluff/pull/2081) [@derickl](https://github.com/derickl) * Update parse method of Simple API to output JSON parse tree [#2082](https://github.com/sqlfluff/sqlfluff/pull/2082) [@jpy-git](https://github.com/jpy-git) * T-SQL dialect: add parsing for MERGE statement [#2057](https://github.com/sqlfluff/sqlfluff/pull/2057) [@tkachenkomaria244](https://github.com/tkachenkomaria244) * Simple API config path [#2080](https://github.com/sqlfluff/sqlfluff/pull/2080) [@jpy-git](https://github.com/jpy-git) * dbt 1.0.0 compatibility [#2079](https://github.com/sqlfluff/sqlfluff/pull/2079) [@alanmcruickshank](https://github.com/alanmcruickshank) * Parse `on delete` and `on update` clause for create table constraints [#2076](https://github.com/sqlfluff/sqlfluff/pull/2076) [@kayman-mk](https://github.com/kayman-mk) * Pre-commit: Add hook for doc8 [#2074](https://github.com/sqlfluff/sqlfluff/pull/2074) [@jpy-git](https://github.com/jpy-git) * Grammar: Fix typo in Alter Table parser in Postgres dialect [#2072](https://github.com/sqlfluff/sqlfluff/pull/2072) [@derickl](https://github.com/derickl) * Grammar: Adds support for materialized views for postgres dialect [#2041](https://github.com/sqlfluff/sqlfluff/pull/2041) [@derickl](https://github.com/derickl) * Add basic pre-commit config [#2067](https://github.com/sqlfluff/sqlfluff/pull/2067) [@jpy-git](https://github.com/jpy-git) * CLI: Add --ignore-local-config flag [#2061](https://github.com/sqlfluff/sqlfluff/pull/2061) [@jpy-git](https://github.com/jpy-git) * T-SQL: INSERT INTO [#2054](https://github.com/sqlfluff/sqlfluff/pull/2054) [@jpers36](https://github.com/jpers36) * Add --disable-noqa option to CLI and config [#2043](https://github.com/sqlfluff/sqlfluff/pull/2043) [@jpy-git](https://github.com/jpy-git) * T-SQL: TRY/CATCH [#2044](https://github.com/sqlfluff/sqlfluff/pull/2044) [@jpers36](https://github.com/jpers36) * enabled arrays support in `declare` and `set` statements for `bigquery` dialect [#2038](https://github.com/sqlfluff/sqlfluff/pull/2038) [@KulykDmytro](https://github.com/KulykDmytro) * L008 refactor [#2004](https://github.com/sqlfluff/sqlfluff/pull/2004) [@jpy-git](https://github.com/jpy-git) * Support __init__.py for library_path [#1976](https://github.com/sqlfluff/sqlfluff/pull/1976) [@Tonkonozhenko](https://github.com/Tonkonozhenko) * L052: Redefine semi-colon newline to multiline newline [#2022](https://github.com/sqlfluff/sqlfluff/pull/2022) [@jpy-git](https://github.com/jpy-git) * Grammar: Remove hash inline comment from Postgres [#2035](https://github.com/sqlfluff/sqlfluff/pull/2035) [@jpy-git](https://github.com/jpy-git) * `noqa` enhancement: Enable glob rule matching for inline comments [#2002](https://github.com/sqlfluff/sqlfluff/pull/2002) [@jpy-git](https://github.com/jpy-git) * T-SQL (ASA): Allow for table identifier in DELETE clause [#2031](https://github.com/sqlfluff/sqlfluff/pull/2031) [@jpers36](https://github.com/jpers36) * T-SQL (ASA): Fix CTAS with WITH statement [#2028](https://github.com/sqlfluff/sqlfluff/pull/2028) [@jpers36](https://github.com/jpers36) * Grammar: Parse multiple grants [#2023](https://github.com/sqlfluff/sqlfluff/pull/2023) [@jpy-git](https://github.com/jpy-git) * Add tsql nested block comment support and add regex package dependency [#2027](https://github.com/sqlfluff/sqlfluff/pull/2027) [@jpy-git](https://github.com/jpy-git) * Grammar: Add complete Snowflake datetime units [#2026](https://github.com/sqlfluff/sqlfluff/pull/2026) [@jpy-git](https://github.com/jpy-git) * Grammar: Add DROP POLICY statement to postgres dialect [#2024](https://github.com/sqlfluff/sqlfluff/pull/2024) [@jpy-git](https://github.com/jpy-git) * Grammar: Add complete datetime units to postgres dialect [#2025](https://github.com/sqlfluff/sqlfluff/pull/2025) [@jpy-git](https://github.com/jpy-git) * Grammar: Postgres CREATE POLICY [#2021](https://github.com/sqlfluff/sqlfluff/pull/2021) [@jpy-git](https://github.com/jpy-git) * Speed up CI [#1957](https://github.com/sqlfluff/sqlfluff/pull/1957) [@pwildenhain](https://github.com/pwildenhain) * Add support for Snowflake create/alter SQL and js UDF [#1993](https://github.com/sqlfluff/sqlfluff/pull/1993) [@chwiese](https://github.com/chwiese) * Add encoding CLI argument [#1994](https://github.com/sqlfluff/sqlfluff/pull/1994) [@jpy-git](https://github.com/jpy-git) * T-SQL: Spaces allowed in comparison operators [#1965](https://github.com/sqlfluff/sqlfluff/pull/1965) [@jpers36](https://github.com/jpers36) * Add Snowflake schema options [#1950](https://github.com/sqlfluff/sqlfluff/pull/1950) [@chwiese](https://github.com/chwiese) * CLI/`.sqlfluff` enhancement: Rule globs [#1972](https://github.com/sqlfluff/sqlfluff/pull/1972) [@jpy-git](https://github.com/jpy-git) * Add config CLI argument to lint, fix, and parse [#1986](https://github.com/sqlfluff/sqlfluff/pull/1986) [@jpy-git](https://github.com/jpy-git) * Add type hints to simple API [#1951](https://github.com/sqlfluff/sqlfluff/pull/1951) [@jpy-git](https://github.com/jpy-git) * New rule to flag special characters in identifiers [#1958](https://github.com/sqlfluff/sqlfluff/pull/1958) [@jpers36](https://github.com/jpers36) * Allow column references in IN statement [#1971](https://github.com/sqlfluff/sqlfluff/pull/1971) [@tunetheweb](https://github.com/tunetheweb) * Remove config.ini in favor of setup.cfg [#1966](https://github.com/sqlfluff/sqlfluff/pull/1966) [@jpy-git](https://github.com/jpy-git) * Convert sqlfluff-templater-dbt setup.py to setup.cfg [#1963](https://github.com/sqlfluff/sqlfluff/pull/1963) [@jpy-git](https://github.com/jpy-git) * Official Docker image: Dockerfile and Github Actions workflow [#1945](https://github.com/sqlfluff/sqlfluff/pull/1945) [@jpy-git](https://github.com/jpy-git) * Move package metadata to setup.cfg [#1960](https://github.com/sqlfluff/sqlfluff/pull/1960) [@jpy-git](https://github.com/jpy-git) ## 🐛 Bug Fixes * Fix tsql block comment close [#2095](https://github.com/sqlfluff/sqlfluff/pull/2095) [@jpy-git](https://github.com/jpy-git) * Fix PlaceholderTemplater slice_type for templated code (substitutions) [#2085](https://github.com/sqlfluff/sqlfluff/pull/2085) [@barrywhart](https://github.com/barrywhart) * Exasol: Fix UDF script syntax [#2083](https://github.com/sqlfluff/sqlfluff/pull/2083) [@sti0](https://github.com/sti0) * Fix issues with placeholder templating docs [#2078](https://github.com/sqlfluff/sqlfluff/pull/2078) [@jpy-git](https://github.com/jpy-git) * Update dbt templater docs to clarify that the profiles_dir setting is optional [#2070](https://github.com/sqlfluff/sqlfluff/pull/2070) [@barrywhart](https://github.com/barrywhart) * Bug fix of L054 for Snowflake and Exasol [#2069](https://github.com/sqlfluff/sqlfluff/pull/2069) [@tunetheweb](https://github.com/tunetheweb) * Fix L043 issue when trying to autofix functions [#2059](https://github.com/sqlfluff/sqlfluff/pull/2059) [@jpy-git](https://github.com/jpy-git) * Add request for users dbt version in bug_report issue template [#2058](https://github.com/sqlfluff/sqlfluff/pull/2058) [@jpy-git](https://github.com/jpy-git) * Fix parameters for Snowflake create tasks statement [#2037](https://github.com/sqlfluff/sqlfluff/pull/2037) [@chwiese](https://github.com/chwiese) * Linguist: Include test/** in language statistics to better reflect use of SQL [#2034](https://github.com/sqlfluff/sqlfluff/pull/2034) [@jpy-git](https://github.com/jpy-git) * L044 should handle nested CTEs [#1991](https://github.com/sqlfluff/sqlfluff/pull/1991) [@barrywhart](https://github.com/barrywhart) * Add dbt adapter install advice to configuration documentation [#2011](https://github.com/sqlfluff/sqlfluff/pull/2011) [@jpy-git](https://github.com/jpy-git) * Update pre-commit dbt instructions to reference separate dbt package [#2005](https://github.com/sqlfluff/sqlfluff/pull/2005) [@jpy-git](https://github.com/jpy-git) * Fix config.get for iterable sections [#2020](https://github.com/sqlfluff/sqlfluff/pull/2020) [@jpy-git](https://github.com/jpy-git) * Fix inline comment interactions with L052 [#2019](https://github.com/sqlfluff/sqlfluff/pull/2019) [@jpy-git](https://github.com/jpy-git) * Make Snowflake tags DRY [#1992](https://github.com/sqlfluff/sqlfluff/pull/1992) [@chwiese](https://github.com/chwiese) * Rename whitelist/blacklist to allowlist/denylist [#1989](https://github.com/sqlfluff/sqlfluff/pull/1989) [@jpy-git](https://github.com/jpy-git) * Fix issue with inline ignores not respecting comment lines [#1985](https://github.com/sqlfluff/sqlfluff/pull/1985) [@jpy-git](https://github.com/jpy-git) * Fix L009 FileSegment child + new create_before/create_after edit types [#1979](https://github.com/sqlfluff/sqlfluff/pull/1979) [@jpy-git](https://github.com/jpy-git) * Adds extra check to L054 to avoid weird error messages [#1988](https://github.com/sqlfluff/sqlfluff/pull/1988) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Allow keywords in column reference components [#1987](https://github.com/sqlfluff/sqlfluff/pull/1987) [@tunetheweb](https://github.com/tunetheweb) * L027: Remove unnessary crawl in get_select_statement_info [#1974](https://github.com/sqlfluff/sqlfluff/pull/1974) [@jpy-git](https://github.com/jpy-git) * Add __all__ attributes to __init__.py files to resolve F401 [#1949](https://github.com/sqlfluff/sqlfluff/pull/1949) [@jpy-git](https://github.com/jpy-git) * Fix incorrect comment on L055 [#1967](https://github.com/sqlfluff/sqlfluff/pull/1967) [@jpy-git](https://github.com/jpy-git) * Docs: fix docker hub link to public URL [#1964](https://github.com/sqlfluff/sqlfluff/pull/1964) [@kevinmarsh](https://github.com/kevinmarsh) * Fix issue releasing dbt package: tox commands run relative to repo root [#1962](https://github.com/sqlfluff/sqlfluff/pull/1962) [@jpy-git](https://github.com/jpy-git) ## [0.8.2] - 2021-11-22 ## What’s Changed One of the biggest new features in this release is the support for SQLAlchemy and other "placeholder" templating within SQL queries. Check out [the documentation on how to set it up](https://docs.sqlfluff.com/en/latest/configuration.html#placeholder-templating). This release also adds **seven** new rules. Get some help with your leading whitespace, semi-colon placement, inconsistent column references in `GROUP BY/ORDER BY`, and getting rid of `RIGHT JOIN`'s among other useful lints with our new rules! See our [rules documentation](https://docs.sqlfluff.com/en/stable/rules.html) for more details. On top of those, we have made loads of grammar improvements across many dialects, improvements to the dbt templater (including issues where `sqlfluff fix` would corrupt the code :scream:), more fix routines, and lots more improvements. ## 🚀 Enhancements * [many dialects] Implement generic placeholder templating [#1887](https://github.com/sqlfluff/sqlfluff/pull/1887) [@jacopofar](https://github.com/jacopofar) * [many dialects] Add support for SQLAlchemy templating [#1878](https://github.com/sqlfluff/sqlfluff/pull/1878) [@jacopofar](https://github.com/jacopofar) * Add DROP PROCEDURE statement to T-SQL [#1921](https://github.com/sqlfluff/sqlfluff/pull/1921) [@jpy-git](https://github.com/jpy-git) * T-SQL dialect: fix index/tables creation options [#1955](https://github.com/sqlfluff/sqlfluff/pull/1955) [@tkachenkomaria244](https://github.com/tkachenkomaria244) * Add DROP TYPE statement to ANSI dialect [#1919](https://github.com/sqlfluff/sqlfluff/pull/1919) [@jpy-git](https://github.com/jpy-git) * Add INSERT INTO statements to Redshift Dialect [#1896](https://github.com/sqlfluff/sqlfluff/pull/1896) [@tdstark](https://github.com/tdstark) * Added TABLESAMPLE support to Bigquery [#1897](https://github.com/sqlfluff/sqlfluff/pull/1897) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add [LEFT] ANTI and [LEFT] SEMI joins to the Spark3 dialect [#1942](https://github.com/sqlfluff/sqlfluff/pull/1942) [@jpy-git](https://github.com/jpy-git) * Parse UPDATE/INSERT within WITH clause [#1889](https://github.com/sqlfluff/sqlfluff/pull/1889) [@jpy-git](https://github.com/jpy-git) * Add OVERRIDING SYSTEM/USER VALUE to insert statement in postgres dialect [#1869](https://github.com/sqlfluff/sqlfluff/pull/1869) [@jpy-git](https://github.com/jpy-git) * Add support for DROP SCHEMA [IF EXISTS] name [ CASCADE | RESTRICT ] [#1865](https://github.com/sqlfluff/sqlfluff/pull/1865) [@gimmyxd](https://github.com/gimmyxd) * Add CREATE TABLE Statement To Redshift [#1855](https://github.com/sqlfluff/sqlfluff/pull/1855) [@tdstark](https://github.com/tdstark) * Add DROP TYPE statement in postgres dialect [#1870](https://github.com/sqlfluff/sqlfluff/pull/1870) [@jpy-git](https://github.com/jpy-git) * Add SEQUENCE NAME to postgres sequence options [#1866](https://github.com/sqlfluff/sqlfluff/pull/1866) [@jpy-git](https://github.com/jpy-git) * Added SET Statement to Postgres [#1877](https://github.com/sqlfluff/sqlfluff/pull/1877) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Allow use of quoted identifiers to ALTER TABLE OWNER TO [#1856](https://github.com/sqlfluff/sqlfluff/pull/1856) [@markpolyak](https://github.com/markpolyak) * Updates to COPY INTO grammar in Snowflake [#1884](https://github.com/sqlfluff/sqlfluff/pull/1884) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres & T-SQL: Drop Function [#1924](https://github.com/sqlfluff/sqlfluff/pull/1924) [@WittierDinosaur](https://github.com/WittierDinosaur) * Add Expressions to SET syntax [#1852](https://github.com/sqlfluff/sqlfluff/pull/1852) [@tunetheweb](https://github.com/tunetheweb) * Update DbtTemplater to use JinjaTracer [#1788](https://github.com/sqlfluff/sqlfluff/pull/1788) [@barrywhart](https://github.com/barrywhart) * L043 refinement: Add autofix for common use of CASE to fill NULL values. [#1923](https://github.com/sqlfluff/sqlfluff/pull/1923) [@jpy-git](https://github.com/jpy-git) * New Rule L050: No leading whitespace [#1840](https://github.com/sqlfluff/sqlfluff/pull/1840) [@jpy-git](https://github.com/jpy-git) * L050: updating to target jinja templates [#1885](https://github.com/sqlfluff/sqlfluff/pull/1885) [@jpy-git](https://github.com/jpy-git) * New rule L051 to forbid lone JOIN [#1879](https://github.com/sqlfluff/sqlfluff/pull/1879) [@jpy-git](https://github.com/jpy-git) * New Rule L052: Semi colon alignment [#1902](https://github.com/sqlfluff/sqlfluff/pull/1902) [@jpy-git](https://github.com/jpy-git) * New Rule L053: Remove outer brackets from top-level statements. [#1916](https://github.com/sqlfluff/sqlfluff/pull/1916) [@jpy-git](https://github.com/jpy-git) * New Rule L054: Inconsistent column references in GROUP BY/ORDER BY clauses. [#1917](https://github.com/sqlfluff/sqlfluff/pull/1917) [@jpy-git](https://github.com/jpy-git) * New Rule L055: Use LEFT JOIN instead of RIGHT JOIN. [#1931](https://github.com/sqlfluff/sqlfluff/pull/1931) [@jpy-git](https://github.com/jpy-git) * New Rule L056: 'SP_' prefix should not be used for user-defined stored procedures [#1930](https://github.com/sqlfluff/sqlfluff/pull/1930) [@jpy-git](https://github.com/jpy-git) * Tsql partition by multiple columns [#1906](https://github.com/sqlfluff/sqlfluff/pull/1906) [@jpers36](https://github.com/jpers36) * Added bare functions to values clause [#1876](https://github.com/sqlfluff/sqlfluff/pull/1876) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remove unnecessary context section, from code and the docs [#1905](https://github.com/sqlfluff/sqlfluff/pull/1905) [@jacopofar](https://github.com/jacopofar) * L036 docstring refinements [#1903](https://github.com/sqlfluff/sqlfluff/pull/1903) [@jpy-git](https://github.com/jpy-git) * Add `exclude_rules` option for the Simple API [#1850](https://github.com/sqlfluff/sqlfluff/pull/1850) [@tunetheweb](https://github.com/tunetheweb) * Tox improvements: Streamline development/testing environments. [#1860](https://github.com/sqlfluff/sqlfluff/pull/1860) [@jpy-git](https://github.com/jpy-git) * Add Tox publish commands [#1853](https://github.com/sqlfluff/sqlfluff/pull/1853) [@jpy-git](https://github.com/jpy-git) * Documentation: Change inheritance dialect example to Redshift [#1900](https://github.com/sqlfluff/sqlfluff/pull/1900) [@chwiese](https://github.com/chwiese) * Remove failing requires.io badge [#1898](https://github.com/sqlfluff/sqlfluff/pull/1898) [@jpy-git](https://github.com/jpy-git) * [Snowflake] Allow naked AUTOINCREMENT [#1883](https://github.com/sqlfluff/sqlfluff/pull/1883) [@gordonhart](https://github.com/gordonhart) * Add support for curly brackets in SnowSQL ampersand variables [#1901](https://github.com/sqlfluff/sqlfluff/pull/1901) [@chwiese](https://github.com/chwiese) * Add short form help option (-h) [#1947](https://github.com/sqlfluff/sqlfluff/pull/1947) [@jpy-git](https://github.com/jpy-git) * Remove plaintext API key from benchmark utility [#1863](https://github.com/sqlfluff/sqlfluff/pull/1863) [@jpy-git](https://github.com/jpy-git) * Add `skip_install` to static analysis sections of tox.ini [#1851](https://github.com/sqlfluff/sqlfluff/pull/1851) [@jpy-git](https://github.com/jpy-git) * Move typing_extensions from `requirements_dev.txt` to `requirements.txt` [#1956](https://github.com/sqlfluff/sqlfluff/pull/1956) [@jpy-git](https://github.com/jpy-git) ## 🐛 Bug Fixes * Fix bug where "sqlfluff fix" deletes dbt "{% snapshot %}" line [#1907](https://github.com/sqlfluff/sqlfluff/pull/1907) [@barrywhart](https://github.com/barrywhart) * Fix subquery bug in L026 [#1948](https://github.com/sqlfluff/sqlfluff/pull/1948) [@jpy-git](https://github.com/jpy-git) * Fix bug where L041 was confused by L016's placement of newlines in the parse tree [#1904](https://github.com/sqlfluff/sqlfluff/pull/1904) [@barrywhart](https://github.com/barrywhart) * Fix progressbar artifacts within linter errors [#1873](https://github.com/sqlfluff/sqlfluff/pull/1873) [@adam-tokarski](https://github.com/adam-tokarski) * Correct Snowflake warehouse sizes [#1872](https://github.com/sqlfluff/sqlfluff/pull/1872) [@jpy-git](https://github.com/jpy-git) * Fixed Delimited() logic, added T-SQL grammar [#1894](https://github.com/sqlfluff/sqlfluff/pull/1894) [@WittierDinosaur](https://github.com/WittierDinosaur) * L036 refinement - FROM clause interaction [#1893](https://github.com/sqlfluff/sqlfluff/pull/1893) [@jpy-git](https://github.com/jpy-git) * Add missing chardet install in setup.py [#1928](https://github.com/sqlfluff/sqlfluff/pull/1928) [@jpy-git](https://github.com/jpy-git) * Fix misplaced TableAliasInfo in L031 documentation [#1946](https://github.com/sqlfluff/sqlfluff/pull/1946) [@jpy-git](https://github.com/jpy-git) * Fix broken link to external SQL style guide [#1918](https://github.com/sqlfluff/sqlfluff/pull/1918) [@kevinmarsh](https://github.com/kevinmarsh) ## [0.8.1] - 2021-11-07 ## What’s Changed Fixes missing dependency issue with 0.8.0 for `tqdm`, plus add a test to ensure this does not happen again. ## 🐛 Bug Fixes * Fix: add tqdm to setup.py installation requirements [#1842](https://github.com/sqlfluff/sqlfluff/pull/1842) [@skykasko](https://github.com/skykasko) * Add test to ensure pip install works [#1843](https://github.com/sqlfluff/sqlfluff/pull/1843) [@tunetheweb](https://github.com/tunetheweb) ## [0.8.0] - 2021-11-07 ## What’s Changed This release brings an improvement to the performance of the parser, a rebuild of the Jinja Templater, and a progress bar for the CLI. Lots of dialect improvements have also been done. Full list of changes below: ## 🚀 Enhancements * Updated L009 logic to only allow a single trailing newline. [#1838](https://github.com/sqlfluff/sqlfluff/pull/1838) [@jpy-git](https://github.com/jpy-git) * Progressbar utility [#1609](https://github.com/sqlfluff/sqlfluff/pull/1609) [@adam-tokarski](https://github.com/adam-tokarski) * Teradata dialect: Add support for SEL form of SELECT [#1776](https://github.com/sqlfluff/sqlfluff/pull/1776) [@samlader](https://github.com/samlader) * Added trigger support in ANSI - and extended it in Postgres [#1818](https://github.com/sqlfluff/sqlfluff/pull/1818) [@WittierDinosaur](https://github.com/WittierDinosaur) * Exasol: Make references more strict [#1829](https://github.com/sqlfluff/sqlfluff/pull/1829) [@sti0](https://github.com/sti0) * Hive: INSERT statement support [#1828](https://github.com/sqlfluff/sqlfluff/pull/1828) [@mifercre](https://github.com/mifercre) * ANSI: Add TABLESAMPLE support [#1811](https://github.com/sqlfluff/sqlfluff/pull/1811) [@CrossNox](https://github.com/CrossNox) * T-SQL: Support trailing commas in CREATE TABLE [#1817](https://github.com/sqlfluff/sqlfluff/pull/1817) [@tommydb](https://github.com/tommydb) * Spark3: Add CREATE VIEW support [#1813](https://github.com/sqlfluff/sqlfluff/pull/1813) [@DipeshCS](https://github.com/DipeshCS) * BigQuery: Support PIVOT and UNPIVOT [#1794](https://github.com/sqlfluff/sqlfluff/pull/1794) [@tunetheweb](https://github.com/tunetheweb) * L029: Optionally check quoted identifiers in addition to naked identifiers [#1775](https://github.com/sqlfluff/sqlfluff/pull/1775) [@jpers36](https://github.com/jpers36) * Add sysdate to Redshift as a bare function [#1789](https://github.com/sqlfluff/sqlfluff/pull/1789) [@tdstark](https://github.com/tdstark) * Robust Jinja raw/template mapping [#1678](https://github.com/sqlfluff/sqlfluff/pull/1678) [@barrywhart](https://github.com/barrywhart) * Add CREATE TABLE AS to Postgres and Redshift [#1785](https://github.com/sqlfluff/sqlfluff/pull/1785) [@tdstark](https://github.com/tdstark) * Improve Parser Performance By Caching Values [#1744](https://github.com/sqlfluff/sqlfluff/pull/1744) [@WittierDinosaur](https://github.com/WittierDinosaur) * templater-dbt: Change dbt dependency to dbt-core [#1786](https://github.com/sqlfluff/sqlfluff/pull/1786) [@amardeep](https://github.com/amardeep) * T-SQL: Create Schema definition [#1773](https://github.com/sqlfluff/sqlfluff/pull/1773) [@jpers36](https://github.com/jpers36) * T-SQL: allow optional brackets for column default constraints [#1760](https://github.com/sqlfluff/sqlfluff/pull/1760) [@nevado](https://github.com/nevado) * Postgres: Support parameters and identifiers prepended with _ and containing $ [#1765](https://github.com/sqlfluff/sqlfluff/pull/1765) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres: Added support for double precision [#1764](https://github.com/sqlfluff/sqlfluff/pull/1764) [@WittierDinosaur](https://github.com/WittierDinosaur) * "sqlfluff fix": Write to a temporary .sql file first [#1763](https://github.com/sqlfluff/sqlfluff/pull/1763) [@barrywhart](https://github.com/barrywhart) * Update older dbt dependency [#1756](https://github.com/sqlfluff/sqlfluff/pull/1756) [@alanmcruickshank](https://github.com/alanmcruickshank) * T-SQL: add IDENTITY column constraint [#1757](https://github.com/sqlfluff/sqlfluff/pull/1757) [@nevado](https://github.com/nevado) * Update CI to run under Python 3.10 [#1739](https://github.com/sqlfluff/sqlfluff/pull/1739) [@rooterkyberian](https://github.com/rooterkyberian) * MySQL: Add drop index support [#1738](https://github.com/sqlfluff/sqlfluff/pull/1738) [@fatelei](https://github.com/fatelei) * Snowflake dialect improvements [#1737](https://github.com/sqlfluff/sqlfluff/pull/1737) [@tunetheweb](https://github.com/tunetheweb) * Add missing test case [#1735](https://github.com/sqlfluff/sqlfluff/pull/1735) [@tunetheweb](https://github.com/tunetheweb) ## 🐛 Bug Fixes * Fix: Add missing init file to sqlfluff.core.templaters.slicers [#1826](https://github.com/sqlfluff/sqlfluff/pull/1826) [@CrossNox](https://github.com/CrossNox) * Hive: Fix order of CREATE TEMPORARY EXTERNAL TABLE [#1825](https://github.com/sqlfluff/sqlfluff/pull/1825) [@mifercre](https://github.com/mifercre) * T-SQL: add AS keyword as optional in PIVOT-UNPIVOT [#1807](https://github.com/sqlfluff/sqlfluff/pull/1807) [@tkachenkomaria244](https://github.com/tkachenkomaria244) * Prevent L019 plus L034 corrupting SQL [#1803](https://github.com/sqlfluff/sqlfluff/pull/1803) [@barrywhart](https://github.com/barrywhart) * L028 fix - Allow SELECT column alias in WHERE clauses for certain dialects [#1796](https://github.com/sqlfluff/sqlfluff/pull/1796) [@tunetheweb](https://github.com/tunetheweb) * Comment out instructions in GitHub templates [#1792](https://github.com/sqlfluff/sqlfluff/pull/1792) [@tunetheweb](https://github.com/tunetheweb) * Fix internal error in L016 when template/whitespace-only line too long [#1795](https://github.com/sqlfluff/sqlfluff/pull/1795) [@barrywhart](https://github.com/barrywhart) * Fix L049 to allow = NULL in SET clauses [#1791](https://github.com/sqlfluff/sqlfluff/pull/1791) [@tunetheweb](https://github.com/tunetheweb) * Hive: Fix bug in CREATE TABLE WITH syntax [#1790](https://github.com/sqlfluff/sqlfluff/pull/1790) [@iajoiner](https://github.com/iajoiner) * Fixed encoding error when linting to file [#1787](https://github.com/sqlfluff/sqlfluff/pull/1787) [@WittierDinosaur](https://github.com/WittierDinosaur) * Fix L012 documentation [#1782](https://github.com/sqlfluff/sqlfluff/pull/1782) [@jpers36](https://github.com/jpers36) * T-SQL: fix quote alias [#1766](https://github.com/sqlfluff/sqlfluff/pull/1766) [@jpers36](https://github.com/jpers36) * Fix incorrect indentation issue [#1733](https://github.com/sqlfluff/sqlfluff/pull/1733) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Fix OVER functionality for functions [#1731](https://github.com/sqlfluff/sqlfluff/pull/1731) [@jpers36](https://github.com/jpers36) ## [0.7.1] - 2021-10-22 ## What’s Changed Highlights of this release contains a lot of T-SQL dialect improvements (shout out to @jpers36 for most of these!). We also added Spark3 as a new dialect thanks to @R7L208. The complete list of changes are shown below. ## 🚀 Enhancements * T-SQL: Add rank functions [#1725](https://github.com/sqlfluff/sqlfluff/pull/1725) [@jpers36](https://github.com/jpers36) * Spark3 Dialect Support [#1706](https://github.com/sqlfluff/sqlfluff/pull/1706) [@R7L208](https://github.com/R7L208) * Postgres Array Support [#1722](https://github.com/sqlfluff/sqlfluff/pull/1722) [@WittierDinosaur](https://github.com/WittierDinosaur) * Hive: Add LEFT SEMI JOIN support [#1718](https://github.com/sqlfluff/sqlfluff/pull/1718) [@fatelei](https://github.com/fatelei) * MySQL: Change and drop column in alter table [#1670](https://github.com/sqlfluff/sqlfluff/pull/1670) [@MontealegreLuis](https://github.com/MontealegreLuis) * Added type hints to some rule files [#1616](https://github.com/sqlfluff/sqlfluff/pull/1616) [@ttomasz](https://github.com/ttomasz) * Added Redshift to README [#1720](https://github.com/sqlfluff/sqlfluff/pull/1720) [@WittierDinosaur](https://github.com/WittierDinosaur) * Exasol: Fix create table statement [#1700](https://github.com/sqlfluff/sqlfluff/pull/1700) [@sti0](https://github.com/sti0) * T-SQL: Add optional delimiter to SET [#1717](https://github.com/sqlfluff/sqlfluff/pull/1717) [@jpers36](https://github.com/jpers36) * T-SQL: Escaped quotes [#1715](https://github.com/sqlfluff/sqlfluff/pull/1715) [@jpers36](https://github.com/jpers36) * T-SQL: SELECT INTO [#1714](https://github.com/sqlfluff/sqlfluff/pull/1714) [@jpers36](https://github.com/jpers36) * Postgres: Added support for psql variables [#1709](https://github.com/sqlfluff/sqlfluff/pull/1709) [@WittierDinosaur](https://github.com/WittierDinosaur) * T-SQL: split location clause out from index clause [#1711](https://github.com/sqlfluff/sqlfluff/pull/1711) [@jpers36](https://github.com/jpers36) * T-SQL: Override ANSI HAVING [#1707](https://github.com/sqlfluff/sqlfluff/pull/1707) [@jpers36](https://github.com/jpers36) * T-SQL: Add UPDATE STATISTICS [#1703](https://github.com/sqlfluff/sqlfluff/pull/1703) [@jpers36](https://github.com/jpers36) * T-SQL: CTAS Option Clause [#1705](https://github.com/sqlfluff/sqlfluff/pull/1705) [@jpers36](https://github.com/jpers36) * T-SQL: DECLARE has optional AS [#1704](https://github.com/sqlfluff/sqlfluff/pull/1704) [@jpers36](https://github.com/jpers36) * T-SQL: DROP STATISTICS and INDEX [#1698](https://github.com/sqlfluff/sqlfluff/pull/1698) [@jpers36](https://github.com/jpers36) * T-SQL: CTAS select can be optionally bracketed [#1697](https://github.com/sqlfluff/sqlfluff/pull/1697) [@jpers36](https://github.com/jpers36) * Exasol: Make function_script_terminator more strict [#1696](https://github.com/sqlfluff/sqlfluff/pull/1696) [@sti0](https://github.com/sti0) * T-SQL distribution index location [#1695](https://github.com/sqlfluff/sqlfluff/pull/1695) [@jpers36](https://github.com/jpers36) * T-SQL: allow for non-alphanumeric initial characters in delimited identifiers [#1693](https://github.com/sqlfluff/sqlfluff/pull/1693) [@jpers36](https://github.com/jpers36) * T-SQL: allow for semi-colon after BEGIN in a BEGIN/END block [#1694](https://github.com/sqlfluff/sqlfluff/pull/1694) [@jpers36](https://github.com/jpers36) * Exasol: Fix adapter script syntax [#1692](https://github.com/sqlfluff/sqlfluff/pull/1692) [@sti0](https://github.com/sti0) * T-SQL: Basic EXECUTE functionality [#1691](https://github.com/sqlfluff/sqlfluff/pull/1691) [@jpers36](https://github.com/jpers36) * T-SQL: Add #, @ to valid identifier characters [#1690](https://github.com/sqlfluff/sqlfluff/pull/1690) [@jpers36](https://github.com/jpers36) * T-SQL - add support for Filegroups in table create [#1689](https://github.com/sqlfluff/sqlfluff/pull/1689) [@nevado](https://github.com/nevado) * Exclude Exasol scripts from rule L003 [#1684](https://github.com/sqlfluff/sqlfluff/pull/1684) [@tunetheweb](https://github.com/tunetheweb) * Added PostGIS keyword data types to Postgres [#1686](https://github.com/sqlfluff/sqlfluff/pull/1686) [@WittierDinosaur](https://github.com/WittierDinosaur) * Indent LIMIT values if on separate line [#1683](https://github.com/sqlfluff/sqlfluff/pull/1683) [@tunetheweb](https://github.com/tunetheweb) * Postgres: Added support for SELECT INTO statements [#1676](https://github.com/sqlfluff/sqlfluff/pull/1676) [@WittierDinosaur](https://github.com/WittierDinosaur) * Allow :: casting of CASE statements [#1657](https://github.com/sqlfluff/sqlfluff/pull/1657) [@tunetheweb](https://github.com/tunetheweb) * Add more keywords to Redhift and BigQuery to avoid errors [#1671](https://github.com/sqlfluff/sqlfluff/pull/1671) [@tunetheweb](https://github.com/tunetheweb) * T-SQL begin end delimiter [#1664](https://github.com/sqlfluff/sqlfluff/pull/1664) [@jpers36](https://github.com/jpers36) * Teradata: Added date as bare function for [#1663](https://github.com/sqlfluff/sqlfluff/pull/1663) [@anzelpwj](https://github.com/anzelpwj) * T-SQL: CREATE STATISTICS [#1662](https://github.com/sqlfluff/sqlfluff/pull/1662) [@jpers36](https://github.com/jpers36) * T-SQL table and query hints [#1661](https://github.com/sqlfluff/sqlfluff/pull/1661) [@jpers36](https://github.com/jpers36) * T-SQL: Allow spaces in qualified names [#1654](https://github.com/sqlfluff/sqlfluff/pull/1654) [@jpers36](https://github.com/jpers36) ## 🐛 Bug Fixes * EXASOL: Fix typo in alter_table_statement [#1726](https://github.com/sqlfluff/sqlfluff/pull/1726) [@sti0](https://github.com/sti0) * Fix markdown links in production.rst [#1721](https://github.com/sqlfluff/sqlfluff/pull/1721) [@asottile](https://github.com/asottile) * Correct contributing testing information [#1702](https://github.com/sqlfluff/sqlfluff/pull/1702) [@adam-tokarski](https://github.com/adam-tokarski) * More ORDER BY clarifications [#1681](https://github.com/sqlfluff/sqlfluff/pull/1681) [@tunetheweb](https://github.com/tunetheweb) * Fix T-SQL L025 linter exception [#1677](https://github.com/sqlfluff/sqlfluff/pull/1677) [@tunetheweb](https://github.com/tunetheweb) * Improve Jinja whitespace handling in rules [#1647](https://github.com/sqlfluff/sqlfluff/pull/1647) [@barrywhart](https://github.com/barrywhart) ## [0.7.0] - 2021-10-14 **BREAKING CHANGE** This release extracts the dbt templater to a separately installable plugin [sqlfluff-templater-dbt](https://pypi.org/project/sqlfluff-templater-dbt/). For users who take advantage of the dbt templater see the [updated docs on how to migrate](https://docs.sqlfluff.com/en/latest/configuration.html#installation-configuration). It also adds the `redshift` dialect and removes the `exasol_fs` dialect which has been merged into the `exasol` dialect. ## What’s Changed * src/sqlfluff/core/linter: Improve ignore file processing [#1650](https://github.com/sqlfluff/sqlfluff/pull/1650) [@CyberShadow](https://github.com/CyberShadow) * Misc documentation updates [#1644](https://github.com/sqlfluff/sqlfluff/pull/1644) [@tunetheweb](https://github.com/tunetheweb) * Segregate dbt plugin tests [#1610](https://github.com/sqlfluff/sqlfluff/pull/1610) [@alanmcruickshank](https://github.com/alanmcruickshank) * Add initial Redshift support [#1641](https://github.com/sqlfluff/sqlfluff/pull/1641) [@tunetheweb](https://github.com/tunetheweb) * Update docs for dbt templater, improve error messages when not installed. [#1583](https://github.com/sqlfluff/sqlfluff/pull/1583) [@alanmcruickshank](https://github.com/alanmcruickshank) * Make templaters pluggable and move the dbt templater into a plugin [#1264](https://github.com/sqlfluff/sqlfluff/pull/1264) [@alanmcruickshank](https://github.com/alanmcruickshank) ## 🚀 Enhancements * T-SQL: CTAS delimiter [#1652](https://github.com/sqlfluff/sqlfluff/pull/1652) [@jpers36](https://github.com/jpers36) * T-SQL: Allow for multiple variables DECLAREd in the same statement [#1651](https://github.com/sqlfluff/sqlfluff/pull/1651) [@jpers36](https://github.com/jpers36) * T-SQL: Allow DECLARE/SET statements to parse using ExpressionStatement [#1649](https://github.com/sqlfluff/sqlfluff/pull/1649) [@jpers36](https://github.com/jpers36) * T-SQL PRINT statement parsing [#1648](https://github.com/sqlfluff/sqlfluff/pull/1648) [@jpers36](https://github.com/jpers36) * Better date function for tsql [#1636](https://github.com/sqlfluff/sqlfluff/pull/1636) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Allow for multiple statements in a procedure [#1637](https://github.com/sqlfluff/sqlfluff/pull/1637) [@jpers36](https://github.com/jpers36) * T-SQL: Allow for !>, !< operators [#1640](https://github.com/sqlfluff/sqlfluff/pull/1640) [@jpers36](https://github.com/jpers36) * T-SQL: Fix GROUP BY delimiter [#1635](https://github.com/sqlfluff/sqlfluff/pull/1635) [@jpers36](https://github.com/jpers36) * T-SQL: Fix DROP delimiter [#1633](https://github.com/sqlfluff/sqlfluff/pull/1633) [@jpers36](https://github.com/jpers36) * T-SQL: +RENAME statement for Azure Synapse Analytics [#1631](https://github.com/sqlfluff/sqlfluff/pull/1631) [@jpers36](https://github.com/jpers36) * T-SQL: Fix CASTing variables [#1627](https://github.com/sqlfluff/sqlfluff/pull/1627) [@jpers36](https://github.com/jpers36) * Snowflake: Add implementation for CREATE TASK statement [#1597](https://github.com/sqlfluff/sqlfluff/pull/1597) [#1603](https://github.com/sqlfluff/sqlfluff/pull/1603) [@JoeHut](https://github.com/JoeHut) * Allow global config for rule testcases [#1580](https://github.com/sqlfluff/sqlfluff/pull/1580) [@sti0](https://github.com/sti0) * Snowflake dollar sign literals [#1591](https://github.com/sqlfluff/sqlfluff/pull/1591) [@myschkyna](https://github.com/myschkyna) * Rename test/fixtures/parser directory to test/fixtures/dialects [#1585](https://github.com/sqlfluff/sqlfluff/pull/1585) [@tunetheweb](https://github.com/tunetheweb) * Rename keyword files [#1584](https://github.com/sqlfluff/sqlfluff/pull/1584) [@tunetheweb](https://github.com/tunetheweb) * Add some more unreserved keywords to BigQuery [#1588](https://github.com/sqlfluff/sqlfluff/pull/1588) [@tunetheweb](https://github.com/tunetheweb) * Increase minimum runs before coverage report is issued [#1596](https://github.com/sqlfluff/sqlfluff/pull/1596) [@tunetheweb](https://github.com/tunetheweb) * Snowflake: Support CURRENT_TIMESTAMP as a column default value [#1578](https://github.com/sqlfluff/sqlfluff/pull/1578) [@wong-codaio](https://github.com/wong-codaio) * T-SQL temp tables [#1574](https://github.com/sqlfluff/sqlfluff/pull/1574) [@jpers36](https://github.com/jpers36) ## 🐛 Bug Fixes * Fix NoneType exception in L031 [#1643](https://github.com/sqlfluff/sqlfluff/pull/1643) [@tunetheweb](https://github.com/tunetheweb) * Stop rule L048 complaining if literal is followed by a semicolon [#1638](https://github.com/sqlfluff/sqlfluff/pull/1638) [@tunetheweb](https://github.com/tunetheweb) * L031 desc updated to cover both 'from' and 'join' [#1625](https://github.com/sqlfluff/sqlfluff/pull/1625) [@nevado](https://github.com/nevado) * Snowflake auto increments fixes [#1620](https://github.com/sqlfluff/sqlfluff/pull/1620) [@myschkyna](https://github.com/myschkyna) * Fix DECLARE Delimitation [#1615](https://github.com/sqlfluff/sqlfluff/pull/1615) [@jpers36](https://github.com/jpers36) * Snowflake drop column fixes [#1618](https://github.com/sqlfluff/sqlfluff/pull/1618) [@myschkyna](https://github.com/myschkyna) * T-SQL: fix statement delimitation [#1612](https://github.com/sqlfluff/sqlfluff/pull/1612) [@jpers36](https://github.com/jpers36) * Snowflake: Fixed data type casting not working in `SET` statement [#1604](https://github.com/sqlfluff/sqlfluff/pull/1604) [@wong-codaio](https://github.com/wong-codaio) * Postgres dialect: Fix parse error for "on delete", "on update" clauses in column constraints [#1586](https://github.com/sqlfluff/sqlfluff/pull/1586) [@samlader](https://github.com/samlader) * Fix AttributeError: 'NoneType' object has no attribute 'get_child' error with rule L031 [#1595](https://github.com/sqlfluff/sqlfluff/pull/1595) [@barrywhart](https://github.com/barrywhart) * Fix zero length templated file bug. [#1577](https://github.com/sqlfluff/sqlfluff/pull/1577) [@alanmcruickshank](https://github.com/alanmcruickshank) * Fully remove exasol_fs dialect and bump version [#1573](https://github.com/sqlfluff/sqlfluff/pull/1573) [@alanmcruickshank](https://github.com/alanmcruickshank) ## [0.6.9] - 2021-10-08 Another dbt bugfix from 0.6.7 and 0.6.8, plus a host of dialect and syntax improvements. ## 🚀 Enhancements * Correct and expand Snowflake CREATE TABLE syntax [#1567] [@tunetheweb](https://github.com/tunetheweb) * Support brackets in Postgres Meta commands [#1548](https://github.com/sqlfluff/sqlfluff/pull/1548) [@tunetheweb](https://github.com/tunetheweb) * added type hints to rule files [#1515](https://github.com/sqlfluff/sqlfluff/pull/1515) [@ttomasz](https://github.com/ttomasz) * Update Rule L028 to handle T-SQL PIVOT columns [#1545](https://github.com/sqlfluff/sqlfluff/pull/1545) [@tunetheweb](https://github.com/tunetheweb) * T-SQL IF/ELSE [#1564](https://github.com/sqlfluff/sqlfluff/pull/1564) [@jpers36](https://github.com/jpers36) * Enums for format types and colors added [#1558](https://github.com/sqlfluff/sqlfluff/pull/1558) [@adam-tokarski](https://github.com/adam-tokarski) * Add dbt 0.21.0 to the test suite [#1566](https://github.com/sqlfluff/sqlfluff/pull/1566) [@alanmcruickshank](https://github.com/alanmcruickshank) * Merge EXASOL_FS dialect into EXASOL dialect [#1498](https://github.com/sqlfluff/sqlfluff/pull/1498) [@sti0](https://github.com/sti0) * T-SQL - BEGIN/END blocks [#1553](https://github.com/sqlfluff/sqlfluff/pull/1553) [@jpers36](https://github.com/jpers36) * Small refactor with type hints and string formatting [#1525](https://github.com/sqlfluff/sqlfluff/pull/1525) [@adam-tokarski](https://github.com/adam-tokarski) * Add Github Preview Image [#1557](https://github.com/sqlfluff/sqlfluff/pull/1557) [@alanmcruickshank](https://github.com/alanmcruickshank) * Support SETOF in Postgres [#1522](https://github.com/sqlfluff/sqlfluff/pull/1522) [@tunetheweb](https://github.com/tunetheweb) * Support Double Precision in ANSI [#1524](https://github.com/sqlfluff/sqlfluff/pull/1524) [@tunetheweb](https://github.com/tunetheweb) * Support LATERAL joins in Postgres [#1519](https://github.com/sqlfluff/sqlfluff/pull/1519) [@adam-tokarski](https://github.com/adam-tokarski) * Add a rule to warn on "= NULL" or "<> NULL" comparisons [#1527](https://github.com/sqlfluff/sqlfluff/pull/1527) [@barrywhart](https://github.com/barrywhart) * Support Group and Groups as table names [#1546](https://github.com/sqlfluff/sqlfluff/pull/1546) [@tunetheweb](https://github.com/tunetheweb) * Support more complex IN (...) expressions [#1550](https://github.com/sqlfluff/sqlfluff/pull/1550) [@tunetheweb](https://github.com/tunetheweb) * Support CROSS APPLY and OUTER APPLY and TOP in T-SQL [#1551](https://github.com/sqlfluff/sqlfluff/pull/1551) [@tunetheweb](https://github.com/tunetheweb) * Add support for WITHOUT ROWID to SQLite [#1531](https://github.com/sqlfluff/sqlfluff/pull/1531) [@tunetheweb](https://github.com/tunetheweb) * Postgres: add `CONCURRENTLY` and `FINALIZE` keywords to `DETACH PARTITION` [#1529](https://github.com/sqlfluff/sqlfluff/pull/1529) [@kevinmarsh](https://github.com/kevinmarsh) * Better support of MySQL CREATE TABLE TIMESTAMP/DATESTAMP [#1530](https://github.com/sqlfluff/sqlfluff/pull/1530) [@tunetheweb](https://github.com/tunetheweb) * "Found unparsable section" instead of stack trace when multiple semicolons provided [#1517](https://github.com/sqlfluff/sqlfluff/pull/1517) [@adam-tokarski](https://github.com/adam-tokarski) ## 🐛 Bug Fixes * Fix test coverage [#1569](https://github.com/sqlfluff/sqlfluff/pull/1569) [@tunetheweb](https://github.com/tunetheweb) * Remove lint_templated_tokens as no longer does anything [#1570](https://github.com/sqlfluff/sqlfluff/pull/1570) [@tunetheweb](https://github.com/tunetheweb) * Fix broken block comments in exasol [#1565](https://github.com/sqlfluff/sqlfluff/pull/1565) [@sti0](https://github.com/sti0) * Rethink sequence_files in dbt templater. [#1563](https://github.com/sqlfluff/sqlfluff/pull/1563) [@alanmcruickshank](https://github.com/alanmcruickshank) * T-SQL: fix STRING_AGG() WITHIN GROUP clause [#1559](https://github.com/sqlfluff/sqlfluff/pull/1559) [@jpers36](https://github.com/jpers36) * fix spelling: occurrence>occurrence [#1507](https://github.com/sqlfluff/sqlfluff/pull/1507) [@jpers36](https://github.com/jpers36) ## [0.6.8] - 2021-10-05 Fixed a DBT bug introduced in 0.6.7 - apologies! ## What’s Changed SQLFluff can't find dbt models [#1513](https://github.com/sqlfluff/sqlfluff/pull/1513) [@barrywhart](https://github.com/barrywhart) T-SQL: Support for unicode literals [#1511](https://github.com/sqlfluff/sqlfluff/pull/1511) [@adam-tokarski](https://github.com/adam-tokarski) ## [0.6.7] - 2021-10-04 Lots of fixes to our rules (particularly when running `sqlfluff fix`, and particularly for Jinja and DBT templates). We also have good improvements to Exasol, Snowflake, and T-SQL dialects amongst others. Plus we added Hive and SQLite as supported dialects! ## What’s Changed * Snowflake better WAREHOUSE and CREATE (EXTERNAL) TABLES support [#1508](https://github.com/sqlfluff/sqlfluff/pull/1508) [@tunetheweb](https://github.com/tunetheweb) * Exasol: Fix typo in `REORGANIZE` statement [#1509](https://github.com/sqlfluff/sqlfluff/pull/1509) [@sti0](https://github.com/sti0) * Fix bug that can prevent linting ephemeral dbt models [#1496](https://github.com/sqlfluff/sqlfluff/pull/1496) [@barrywhart](https://github.com/barrywhart) * Disable rules L026 and L028 for BigQuery by default, with option to re-enable [#1504](https://github.com/sqlfluff/sqlfluff/pull/1504) [@tunetheweb](https://github.com/tunetheweb) * BigQuery keywords [#1506](https://github.com/sqlfluff/sqlfluff/pull/1506) [@tunetheweb](https://github.com/tunetheweb) * Inline --noqa not always honoured by "sqlfluff fix" [#1502](https://github.com/sqlfluff/sqlfluff/pull/1502) [@barrywhart](https://github.com/barrywhart) * Snowflake - fix parsing of UNPIVOT [#1505](https://github.com/sqlfluff/sqlfluff/pull/1505) [@michael-the1](https://github.com/michael-the1) * Better parsing of DATEADD function [#1486](https://github.com/sqlfluff/sqlfluff/pull/1486) [@jpers36](https://github.com/jpers36) * Fix handling of ISNULL and NOTNULL keywords [#1483](https://github.com/sqlfluff/sqlfluff/pull/1483) [@leamingrad](https://github.com/leamingrad) * Improved test cases names [#1501](https://github.com/sqlfluff/sqlfluff/pull/1501) [@ttomasz](https://github.com/ttomasz) * Exasol: Fix CREATE TABLE in-/outline constraint / Adjusted DISTRIBUTE/PARTITION clause [#1491](https://github.com/sqlfluff/sqlfluff/pull/1491) [@sti0](https://github.com/sti0) * Add support for SnowSQL variables [#1497](https://github.com/sqlfluff/sqlfluff/pull/1497) [@samlader](https://github.com/samlader) * Ignore erroneous newline segments in L016 (e.g. Jinja for loops) [#1494](https://github.com/sqlfluff/sqlfluff/pull/1494) [@tunetheweb](https://github.com/tunetheweb) * Indentation error on Jinja templated test case [#1444](https://github.com/sqlfluff/sqlfluff/pull/1444) [@barrywhart](https://github.com/barrywhart) * Improve EXASOL dialect [#1484](https://github.com/sqlfluff/sqlfluff/pull/1484) [@sti0](https://github.com/sti0) * T-SQL dialect - +support for CONVERT() special function [#1489](https://github.com/sqlfluff/sqlfluff/pull/1489) [@jpers36](https://github.com/jpers36) * Allow Postgres column references to use `AT TIME ZONE` [#1485](https://github.com/sqlfluff/sqlfluff/pull/1485) [@leamingrad](https://github.com/leamingrad) * T-SQL dialect - provide alternate ASA PR incorporating ASA into T-SQL [#1478](https://github.com/sqlfluff/sqlfluff/pull/1478) [@jpers36](https://github.com/jpers36) * Modest parser performance improvement [#1475](https://github.com/sqlfluff/sqlfluff/pull/1475) [@NathanHowell](https://github.com/NathanHowell) * Disable rule L033 for dialects that do not support it (e.g. Exasol, Postgres) [#1482](https://github.com/sqlfluff/sqlfluff/pull/1482) [@tunetheweb](https://github.com/tunetheweb) * Adding a new BaseFileSegment class for FileSegments to inherit from [#1473](https://github.com/sqlfluff/sqlfluff/pull/1473) [@sti0](https://github.com/sti0) * EXASOL_FS: Fix adapter script type [#1480](https://github.com/sqlfluff/sqlfluff/pull/1480) [@sti0](https://github.com/sti0) * Dialect/tsql update - added pivot / unpivot, view support, sequence support on table creation [#1469](https://github.com/sqlfluff/sqlfluff/pull/1469) [@ericmuijs](https://github.com/ericmuijs) * Correct typo in SQLFluff name [#1470](https://github.com/sqlfluff/sqlfluff/pull/1470) [@tunetheweb](https://github.com/tunetheweb) * Stop L008 from adding spaces for simple SELECTs [#1461](https://github.com/sqlfluff/sqlfluff/pull/1461) [@CyberShadow](https://github.com/CyberShadow) * Add SQLite dialect [#1453](https://github.com/sqlfluff/sqlfluff/pull/1453) [@tunetheweb](https://github.com/tunetheweb) * Fix Windows Clause for Exasol [#1463](https://github.com/sqlfluff/sqlfluff/pull/1463) [@tunetheweb](https://github.com/tunetheweb) * Add CHECK constraint syntax to ANSI SQL [#1451](https://github.com/sqlfluff/sqlfluff/pull/1451) [@tunetheweb](https://github.com/tunetheweb) * Move Exasol test statements fixtures from Python to SQL files [#1449](https://github.com/sqlfluff/sqlfluff/pull/1449) [@tunetheweb](https://github.com/tunetheweb) * fix spelling of "preceding" [#1455](https://github.com/sqlfluff/sqlfluff/pull/1455) [@jpers36](https://github.com/jpers36) * Add NORMALIZE to Teradata dialect [#1448](https://github.com/sqlfluff/sqlfluff/pull/1448) [@tunetheweb](https://github.com/tunetheweb) * Add @ and $ symbols to Exasol to avoid lexing errors [#1447](https://github.com/sqlfluff/sqlfluff/pull/1447) [@tunetheweb](https://github.com/tunetheweb) * Stop fix adding then removing whitespace [#1443](https://github.com/sqlfluff/sqlfluff/pull/1443) [@barrywhart](https://github.com/barrywhart) * Stop exception in L016 for long Jinja comments [#1440](https://github.com/sqlfluff/sqlfluff/pull/1440) [@tunetheweb](https://github.com/tunetheweb) * Fix some issues where the SQL file is corrupted by lint "fixes" in or near Jinja loops [#1431](https://github.com/sqlfluff/sqlfluff/pull/1431) [@barrywhart](https://github.com/barrywhart) * T-SQL: Remove Limit and NamedWindow segments as not supported in T-SQL [#1420](https://github.com/sqlfluff/sqlfluff/pull/1420) [@jpers36](https://github.com/jpers36) * Fix runtime error (IndexError ) when linting file with jinja "if" [#1430](https://github.com/sqlfluff/sqlfluff/pull/1430) [@barrywhart](https://github.com/barrywhart) * Add Hive dialect (#985) [@satish-ravi](https://github.com/satish-ravi) * Further fix for L036 [#1428](https://github.com/sqlfluff/sqlfluff/pull/1428) [@tunetheweb](https://github.com/tunetheweb) * Add default parameter to dbt "var" macro stub [#1426](https://github.com/sqlfluff/sqlfluff/pull/1426) [@CyberShadow](https://github.com/CyberShadow) ## [0.6.6] - 2021-09-20 Fixed some of our autofix rules where running `fix` sometimes made unintended changes. Added config to rules L011 and L012 to allow preferring implicit aliasing. Also further improved our Postgres support and documentation. ### What’s Changed * Rule L036 bug fixes [#1427](https://github.com/sqlfluff/sqlfluff/pull/1427) [@tunetheweb](https://github.com/tunetheweb) * Added support for psql meta commands to Postgres [#1423](https://github.com/sqlfluff/sqlfluff/pull/1423) [@WittierDinosaur](https://github.com/WittierDinosaur) * Remaining line endings [#1415](https://github.com/sqlfluff/sqlfluff/pull/1415) [@tunetheweb](https://github.com/tunetheweb) * T-SQL: Remove match possibilities for segments with no T-SQL equivalent [#1416](https://github.com/sqlfluff/sqlfluff/pull/1416) [@jpers36](https://github.com/jpers36) * Fix generate error on test file with just a comment [#1413](https://github.com/sqlfluff/sqlfluff/pull/1413) [@tunetheweb](https://github.com/tunetheweb) * Misc fixes to workflow files [#1412](https://github.com/sqlfluff/sqlfluff/pull/1412) [@tunetheweb](https://github.com/tunetheweb) * Added support for escape character strings to Postgres [#1409](https://github.com/sqlfluff/sqlfluff/pull/1409) [@WittierDinosaur](https://github.com/WittierDinosaur) * Issue 845: L016 should compute line length prior to template expansion [#1411](https://github.com/sqlfluff/sqlfluff/pull/1411) [@barrywhart](https://github.com/barrywhart) * Add .editorconfig config and enforce style rules [#1410](https://github.com/sqlfluff/sqlfluff/pull/1410) [@tunetheweb](https://github.com/tunetheweb) * Allow optional enforcing of implicit aliasing of tables (L011) and columns (L012) [#1402](https://github.com/sqlfluff/sqlfluff/pull/1402) [@tunetheweb](https://github.com/tunetheweb) * Better error messages on error [#1407](https://github.com/sqlfluff/sqlfluff/pull/1407) [@tunetheweb](https://github.com/tunetheweb) * Add README on how to generate docs [#1403](https://github.com/sqlfluff/sqlfluff/pull/1403) [@tunetheweb](https://github.com/tunetheweb) * Fix extra underscores in case rules (L010 and L014) [#1396](https://github.com/sqlfluff/sqlfluff/pull/1396) [@tunetheweb](https://github.com/tunetheweb) * Remove unused deps in tox test docbuild [#1406](https://github.com/sqlfluff/sqlfluff/pull/1406) [@zhongjiajie](https://github.com/zhongjiajie) * Prevent CodeCov commenting on coverage differences too early [#1404](https://github.com/sqlfluff/sqlfluff/pull/1404) [@tunetheweb](https://github.com/tunetheweb) * Fix "sqlfluff fix compatible" rules indenting to much in documentation [#1405](https://github.com/sqlfluff/sqlfluff/pull/1405) [@tunetheweb](https://github.com/tunetheweb) * Fix documentation SQL highlight error [#1393](https://github.com/sqlfluff/sqlfluff/pull/1393) [@zhongjiajie](https://github.com/zhongjiajie) * Support TIMESTAMPTZ in TIME ZONE queries for Postgres [#1398](https://github.com/sqlfluff/sqlfluff/pull/1398) [@tunetheweb](https://github.com/tunetheweb) * Improve datatypes: CHARACTER VARYING for ANSI, and Postgres and also TIMESTAMP AT TIME ZONE for Postgres [#1378](https://github.com/sqlfluff/sqlfluff/pull/1378) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve rules L003 and L019 by processing multi-line fixes in one pass. [#1391](https://github.com/sqlfluff/sqlfluff/pull/1391) [@barrywhart](https://github.com/barrywhart) * Correct codecov badge for Docs website [#1390](https://github.com/sqlfluff/sqlfluff/pull/1390) [@tunetheweb](https://github.com/tunetheweb) * Change fix to use non-zero exit code if unfixable [#1389](https://github.com/sqlfluff/sqlfluff/pull/1389) [@tunetheweb](https://github.com/tunetheweb) * Bugfix, frame clauses in window functions were not working [#1381](https://github.com/sqlfluff/sqlfluff/pull/1381) [@WittierDinosaur](https://github.com/WittierDinosaur) * Handle template and unfixable errors when fixing stdin [#1385](https://github.com/sqlfluff/sqlfluff/pull/1385) [@nolanbconaway](https://github.com/nolanbconaway) * CREATE, ALTER, DROP SEQUENCE support, with Postgres extensions [#1380](https://github.com/sqlfluff/sqlfluff/pull/1380) [@WittierDinosaur](https://github.com/WittierDinosaur) * Postgres analyze [#1377](https://github.com/sqlfluff/sqlfluff/pull/1377) [@WittierDinosaur](https://github.com/WittierDinosaur) * L016: "sqlfluff fix" adds too many newlines [#1382](https://github.com/sqlfluff/sqlfluff/pull/1382) [@barrywhart](https://github.com/barrywhart) * L003 fix mixes hanging and clean indents [#1383](https://github.com/sqlfluff/sqlfluff/pull/1383) [@barrywhart](https://github.com/barrywhart) * L034 should not fix inside "INSERT" or "CREATE TABLE AS SELECT" [#1384](https://github.com/sqlfluff/sqlfluff/pull/1384) [@barrywhart](https://github.com/barrywhart) ## [0.6.5] - 2021-09-10 ### What’s Changed This release includes initial support of Transact-SQL (T-SQL), much better Postgres and Snowflake support, improvements to our documentation, 100% coverage for Python code (with a small number of accepted exceptions), along with numerous other bug fixes and improvements. Many thanks to all the [contributors](https://github.com/sqlfluff/sqlfluff/graphs/contributors) helping to improve SQLFluff! ### Complete list of changes * Simplify rule L030 and fix recursion bug ([#1376](https://github.com/sqlfluff/sqlfluff/pull/1376)) ([@tunetheweb](https://github.com/tunetheweb) * Move from CircleCI to GitHub Actions for Continuous Integration ([#1361](https://github.com/sqlfluff/sqlfluff/pull/1361)) ([@tunetheweb](https://github.com/tunetheweb) * Postgres enhance create index ([#1375](https://github.com/sqlfluff/sqlfluff/pull/1375)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Initial support for Transact-SQL (T-SQL) dialect ([#1313](https://github.com/sqlfluff/sqlfluff/pull/1313)) ([@ericmuijs](https://github.com/ericmuijs) * Handle initial whitespace lines in rule L001 ([#1372](https://github.com/sqlfluff/sqlfluff/pull/1372)) ([@tunetheweb](https://github.com/tunetheweb) * Postgres Improved DEFAULT column constraint support ([#1373](https://github.com/sqlfluff/sqlfluff/pull/1373)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Minor grammar, spelling, and readability fixes ([#1370](https://github.com/sqlfluff/sqlfluff/pull/1370)) ([@WittierDinosaur](https://github.com/Fdawgs) * Issues 854, 1321: Handle Jinja leading whitespace-only lines ([#1364](https://github.com/sqlfluff/sqlfluff/pull/1364)) ([@barrywhart](https://github.com/barrywhart) * Enhanced the Postgres grammar for create table ([#1369](https://github.com/sqlfluff/sqlfluff/pull/1369)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Added ability to Grant and Revoke Grant to multiple users ([#1367](https://github.com/sqlfluff/sqlfluff/pull/1367)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Add BigQuery Parameter Lexing and Parsing ([#1363](https://github.com/sqlfluff/sqlfluff/pull/1363)) ([@rileyrunnoe](https://github.com/rileyrunnoe) * Rule L030 bugfix ([#1360](https://github.com/sqlfluff/sqlfluff/pull/1360)) ([@WittierDinosaur](https://github.com/WittierDinosaur) * Add Postgres dialect for COMMENT ON ([#1358](https://github.com/sqlfluff/sqlfluff/pull/1358)) ([@miketheman](https://github.com/miketheman) * Allow ORDER BY and LIMIT after QUALIFY in BigQuery ([#1362](https://github.com/sqlfluff/sqlfluff/pull/1362)) ([@tunetheweb](https://github.com/tunetheweb) * Correct CircleCI badge reference ([#1359](https://github.com/sqlfluff/sqlfluff/pull/1359)) [@miketheman](https://github.com/miketheman) * Minor grammar corrections to documentation ([#1355](https://github.com/sqlfluff/sqlfluff/pull/1355)) [@miketheman](https://github.com/miketheman) * Pytest coverage exceptions to get us to 100% coverage! ([#1346](https://github.com/sqlfluff/sqlfluff/pull/1346)) [@tunetheweb](https://github.com/tunetheweb) * Greatly improved Snowflake syntax support ([#1353](https://github.com/sqlfluff/sqlfluff/pull/1353)) [@tunetheweb](https://github.com/tunetheweb) * Postgres keyword support ([#1347](https://github.com/sqlfluff/sqlfluff/pull/1347)) [@WittierDinosaur](https://github.com/WittierDinosaur) * Added full support for postgres's ALTER DEFAULT PRIVILEGES. ([#1350](https://github.com/sqlfluff/sqlfluff/pull/1350)) [@creste](https://github.com/creste) * Show all LintResult in Rule_L020 ([#1348](https://github.com/sqlfluff/sqlfluff/pull/1348)) [@zhongjiajie](https://github.com/zhongjiajie) * Enhance error message L010 base on configure ([#1351](https://github.com/sqlfluff/sqlfluff/pull/1351)) [@zhongjiajie](https://github.com/zhongjiajie) * Remove unused variable insert_str ([#1352](https://github.com/sqlfluff/sqlfluff/pull/1352)) [@zhongjiajie](https://github.com/zhongjiajie) * Pytest coverage exceptions for Core code - part 1 ([#1343](https://github.com/sqlfluff/sqlfluff/pull/1343)) [@tunetheweb](https://github.com/tunetheweb) * BigQuery: Allow Qualify Clause for UnorderedSelectStatements ([#1341](https://github.com/sqlfluff/sqlfluff/pull/1341)) [@tunetheweb](https://github.com/tunetheweb) * Postgres "ALTER TABLE" enhancement, and timestamp bug fix ([#1338](https://github.com/sqlfluff/sqlfluff/pull/1338)) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve pytest coverage for non-core code ([#1319](https://github.com/sqlfluff/sqlfluff/pull/1319)) [@tunetheweb](https://github.com/tunetheweb) * Support additional GRANTs in Postgres ([#1339](https://github.com/sqlfluff/sqlfluff/pull/1339)) [@creste](https://github.com/creste) * Allow optional alias for BigQuery WITH OFFSET ([#1330](https://github.com/sqlfluff/sqlfluff/pull/1330)) [@tunetheweb](https://github.com/tunetheweb) * Improve function support in Postgres dialect ([#1336](https://github.com/sqlfluff/sqlfluff/pull/1336)) [@WittierDinosaur](https://github.com/WittierDinosaur) * Using github star instead of watch in docs ([#1337](https://github.com/sqlfluff/sqlfluff/pull/1337)) [@zhongjiajie](https://github.com/zhongjiajie) * Add unittest for rules docstring ([#1335](https://github.com/sqlfluff/sqlfluff/pull/1335)) [@zhongjiajie](https://github.com/zhongjiajie) * Bugfix PR, fixes issue [#1333](https://github.com/sqlfluff/sqlfluff/issues/#1333), wherein test___main___help() defaults to your default Python installation ([#1334](https://github.com/sqlfluff/sqlfluff/pull/1334)) [@WittierDinosaur](https://github.com/WittierDinosaur) * Improve wording of L007 now the before/after is configurable ([#1325](https://github.com/sqlfluff/sqlfluff/pull/1325)) [@tunetheweb](https://github.com/tunetheweb) * Fix a couple of small issues with CI jobs ([#1326](https://github.com/sqlfluff/sqlfluff/pull/1326)) [@tunetheweb](https://github.com/tunetheweb) * Add updated sqlfluff graphics and source. ([#1315](https://github.com/sqlfluff/sqlfluff/pull/1315)) [@alanmcruickshank](https://github.com/alanmcruickshank) * Issue 1277: Enforce that YML test files are computer generated and not edited ([#1279](https://github.com/sqlfluff/sqlfluff/pull/1279)) [@barrywhart](https://github.com/barrywhart) * Fix typo in README ([#1320](https://github.com/sqlfluff/sqlfluff/pull/1320)) [@tunetheweb](https://github.com/tunetheweb) * Fix link in README ([#1316](https://github.com/sqlfluff/sqlfluff/pull/1316)) [@jmks](https://github.com/jmks) * Update documentation to make the project more discoverable ([#1311](https://github.com/sqlfluff/sqlfluff/pull/1311)) [@tunetheweb](https://github.com/tunetheweb) * Show latest version number on unsupported Python error message ([#1307](https://github.com/sqlfluff/sqlfluff/pull/1307)) [@zhongjiajie](https://github.com/zhongjiajie) * Fix typo in github PR template ([#1308](https://github.com/sqlfluff/sqlfluff/pull/1308)) [@zhongjiajie](https://github.com/zhongjiajie) ## [0.6.4] - 2021-08-20 ### Added * Added support for empty WINDOWS specifications ([#1293](https://github.com/sqlfluff/sqlfluff/pull/1293)) [@matthieucan](https://github.com/matthieucan) * Added auto release drafter ([#1287](https://github.com/sqlfluff/sqlfluff/pull/1287)) [@tunetheweb](https://github.com/tunetheweb) ### Changed * Fix typo in the in the wild page ([#1285](https://github.com/sqlfluff/sqlfluff/pull/1285)) [@tunetheweb](https://github.com/tunetheweb) * Fix spacing issue for BigQuery UNNEST statement for rules L003 and L025 ([#1303](https://github.com/sqlfluff/sqlfluff/pull/1303)) [@tunetheweb](https://github.com/tunetheweb) * Update GitHub templates ([#1297](https://github.com/sqlfluff/sqlfluff/pull/1297)) [@tunetheweb](https://github.com/tunetheweb) * Allow BigQuery UDF with triple quoted bodies to pass rule L048 ([#1300](https://github.com/sqlfluff/sqlfluff/pull/1300)) [@tunetheweb](https://github.com/tunetheweb) * Add Parameterless Functions and more function names support to BigQuery ([#1299](https://github.com/sqlfluff/sqlfluff/pull/1299)) [@tunetheweb](https://github.com/tunetheweb) * Add release drafter ([#1295](https://github.com/sqlfluff/sqlfluff/pull/1295)) [@tunetheweb](https://github.com/tunetheweb) * Support empty OVER() clause in Window Specification ([#1294](https://github.com/sqlfluff/sqlfluff/pull/1294)) [@tunetheweb](https://github.com/tunetheweb) * Fix typo on the In the Wild page ([#1285](https://github.com/sqlfluff/sqlfluff/pull/1285)) [@tunetheweb](https://github.com/tunetheweb) ## [0.6.3] - 2021-08-16 ### Added - Support for primary index name, collect stats improvement, COMMENT statement for teradata dialect [#1232](https://github.com/sqlfluff/sqlfluff/issues/1232) - Support config for L007 to prefer end of line operators [#1261](https://github.com/sqlfluff/sqlfluff/issues/1261) - Support for DETERMINISTIC user defined functions in BigQuery dialect [#1251](https://github.com/sqlfluff/sqlfluff/issues/1251) - Support more identifiers in BigQuery dialect [#1253](https://github.com/sqlfluff/sqlfluff/issues/1253) - Support function member field references in BigQuery dialect [#1255](https://github.com/sqlfluff/sqlfluff/issues/1255) - Support alternative indentation for USING and ON clauses [#1250](https://github.com/sqlfluff/sqlfluff/issues/1250) - Support COUNT(0) preference over COUNT(*) or COUNT(1) [#1260](https://github.com/sqlfluff/sqlfluff/issues/1260) - Support for BigQuery "CREATE table OPTIONS ( description = 'desc' )" [#1205](https://github.com/sqlfluff/sqlfluff/issues/1205) - Support wildcard member field references in BigQuery dialect [#1269](https://github.com/sqlfluff/sqlfluff/issues/1269) - Support ARRAYS of STRUCTs in BigQuery dialect [#1271](https://github.com/sqlfluff/sqlfluff/issues/1271) - Support fields of field references in BigQuery dialect [#1276](https://github.com/sqlfluff/sqlfluff/issues/1276) - Support OFFSET and ORDINAL clauses of Array Functions in BigQuery dialect [#1171](https://github.com/sqlfluff/sqlfluff/issues/1171) - Added check for generated YML files [#1277](https://github.com/sqlfluff/sqlfluff/issues/1277) - Support QUALIFY to BigQuery dialect [#1242](https://github.com/sqlfluff/sqlfluff/issues/1242) ### Changed - Fix comma removed by L019 [#939](https://github.com/sqlfluff/sqlfluff/issues/939) - Update L019 (leading/trailng comma rule) so it doesn't run on unparsable code. - The `--nocolor` command-line option should suppress emoji output [#1246](https://github.com/sqlfluff/sqlfluff/issues/1246) - Added HTTP Archive to the [In The Wild page](https://docs.sqlfluff.com/en/stable/inthewild.html) ## [0.6.2] - 2021-07-22 ### Added - Support for looping statements (loop, while, repeat) and supporting statements to mysql dialect [#1180](https://github.com/sqlfluff/sqlfluff/issues/1180) ### Changed - Added dbt 0.20.* to the default test suite. - Updated manifest loading in dbt 0.20.* to use the new `ManifestLoader` [#1220](https://github.com/sqlfluff/sqlfluff/pull/1220) - Handle newlines in rule list configuration in .sqlfluff [#1215](https://github.com/sqlfluff/sqlfluff/issues/1215) - Fix looping interaction between L008 and L030 [#1207](https://github.com/sqlfluff/sqlfluff/issues/1207) ## [0.6.1] - 2021-07-16 ### Added - Linting output now supports GitHub Actions [#1190](https://github.com/sqlfluff/sqlfluff/issues/1190) - Support for QUALIFY syntax specific to teradata dialect [#1184](https://github.com/sqlfluff/sqlfluff/issues/1184) - Support for TRUNCATE statement [#1194](https://github.com/sqlfluff/sqlfluff/pull/1194) - Support for prepared statement syntaxes specific to mysql dialect [#1147](https://github.com/sqlfluff/sqlfluff/issues/1147) - Support for GET DIAGNOSTICS statement syntax specific to mysql dialect [#1148](https://github.com/sqlfluff/sqlfluff/issues/1148) - Support for cursor syntax specific to mysql dialect [#1145](https://github.com/sqlfluff/sqlfluff/issues/1145) - Support sequential shorthand casts [#1178](https://github.com/sqlfluff/sqlfluff/pull/1178) - Support for select statement syntax specific to mysql dialect [#1175](https://github.com/sqlfluff/sqlfluff/issues/1175) - Support for the CALL statement for the mysql dialect [#1144](https://github.com/sqlfluff/sqlfluff/issues/1144) - Support for OVERLAPS predicate [#1091](https://github.com/sqlfluff/sqlfluff/issues/1091) - Support for the CREATE/DROP PROCEDURE statement for the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901) - Specific allowed/required syntaxes for CREATE/DROP FUNCTION within the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901) - Support for DECLARE statement for the mysql dialect [#1140](https://github.com/sqlfluff/sqlfluff/issues/1140) - Support for the IF-THEN-ELSEIF-ELSE syntax for the mysql dialect [#1140](https://github.com/sqlfluff/sqlfluff/issues/1140) - Support for the DEFINER syntax for the mysql dialect [#1131](https://github.com/sqlfluff/sqlfluff/issues/1131) - Preserve existing file encoding in the "fix" command. Partially addresses [#654](https://github.com/sqlfluff/sqlfluff/issues/654) - Support for DECLARE and SET variable syntax for the BigQuery dialect [#1127](https://github.com/sqlfluff/sqlfluff/issues/1127) - Support for ALTER TASK statement on Snowflake [#1211](https://github.com/sqlfluff/sqlfluff/pull/1211) ### Changed - Fix runtime error in diff-cover plugin caused by new diff-cover release 6.1.0 [#1195](https://github.com/sqlfluff/sqlfluff/pull/1195) - Resolved an issue with the snowflake dialect where backslash escaped single quoted strings led to fatal lexing errors [#1200](https://github.com/sqlfluff/sqlfluff/pull/1200) ### Contributors - [@GitHub-Username](Link to GitHub profile) ([#PR-Number](Link to PR)) - [@dflss](https://github.com/dflss) ([#1154](https://github.com/sqlfluff/sqlfluff/pull/1154)) - [@barrywhart](https://github.com/barrywhart) ([#1177](https://github.com/sqlfluff/sqlfluff/pull/1177), [#1195](https://github.com/sqlfluff/sqlfluff/pull/1195)) - [@niallrees](https://github.com/niallrees) ([#1178](https://github.com/sqlfluff/sqlfluff/pull/1178)) - [@barnabyshearer](https://github.com/barnabyshearer) ([#1194](https://github.com/sqlfluff/sqlfluff/pull/1194)) - [@silverbullettruck2001](https://github.com/silverbullettruck2001) ([#1141](https://github.com/sqlfluff/sqlfluff/pull/1141), [#1159](https://github.com/sqlfluff/sqlfluff/pull/1159), [#1161](https://github.com/sqlfluff/sqlfluff/pull/1161), [#1176](https://github.com/sqlfluff/sqlfluff/pull/1176), [#1179](https://github.com/sqlfluff/sqlfluff/pull/1179), [#1181](https://github.com/sqlfluff/sqlfluff/pull/1181), [#1193](https://github.com/sqlfluff/sqlfluff/pull/1193), [#1203](https://github.com/sqlfluff/sqlfluff/pull/1203)) ## [0.6.0] - 2021-06-06 ### Added - Respect XDG base dirs on Mac OS ([#889](https://github.com/sqlfluff/sqlfluff/issues/889)). - Added support for additional delimiters by creating a new DelimiterSegment in the ANSI dialect which defaults to the semicolon, but allows it to be more intuitive when overridden in a specific child dialect (mysql) [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Added support for the DELIMITER statement in the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Added support for additional delimiters by creating a new DelimiterSegment in the ANSI dialect which defaults to the semicolon, but allows it to be more intuitive when overridden in a specific child dialect (mysql) [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Added support for function as a default column value [#849](https://github.com/sqlfluff/sqlfluff/issues/849). - Add an `--include-metas` option for parse output to show the meta segments in the parse tree. - Allow CREATE FUNCTION syntax without arguments [@bolajiwahab](https://github.com/bolajiwahab) [#1063](https://github.com/sqlfluff/sqlfluff/pull/1063). - Added support for the CREATE/DROP PROCEDURE statement for the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Added specific allowed/required syntaxes for CREATE/DROP FUNCTION within the mysql dialect [#901](https://github.com/sqlfluff/sqlfluff/issues/901)) - Now possible to run sqlfluff commands outside the project root when using the dbt templater. ### Changed - Renamed --parallel CLI argument to --processes to be more accurate. - L034 now ignores select statements which contain macros. - L034 now ignores select statements part of a set expression, most commonly a union. - Fix bug [#1082](https://github.com/sqlfluff/sqlfluff/issues/1082), adding support for BigQuery `select as struct '1' as bb, 2 as aa` syntax. - Rationalisation of the placement of meta segments within templated queries to support more reliable indentation. This includes prioritising _longer_ invariant sections first and then dropping any shorter ones which then are relatively out of place. - Additional logging within the lexer and templater engines. - Allow meta segments to parse within `Delimited` grammars which otherwise don't allow gaps. This is facilitated through an optional argument to `trim_non_code_segments`. - Fix bug [#1079](https://github.com/sqlfluff/sqlfluff/issues/1079), addressing issues with L025 and L026 with BigQuery column references involving `STRUCT`. - [#1080](https://github.com/sqlfluff/sqlfluff/issues/1080) Add SET SCHEMA and DROP SCHEMA support to ANSI dialect. ### Contributors - [@bolajiwahab](https://github.com/bolajiwahab) ([#1063](https://github.com/sqlfluff/sqlfluff/pull/1063)) - [@silverbullettruck2001](https://github.com/silverbullettruck2001) ([#1126](https://github.com/sqlfluff/sqlfluff/pull/1126), [#1099](https://github.com/sqlfluff/sqlfluff/pull/1099), [#1141](https://github.com/sqlfluff/sqlfluff/pull/1141)) ## [0.6.0a2] - 2021-05-27 ### Changed - Better exception handling for the simple parsing API (`sqlfluff.parse`) which now raises an exception which holds all potential parsing issues and prints nicely with more than one issue. - Fix bug [#1037](https://github.com/sqlfluff/sqlfluff/issues/1037), in which fix logging had been sent to stdout when reading data from stdin. - Add a little bit of fun on CLI exit 🎉! - Disabled models in the dbt templater are now skipped entirely rather than returning an untemplated file. - Add a changelog check to SQLFluff continuous integration. - Fix bug [#1083](https://github.com/sqlfluff/sqlfluff/issues/1083), adding support for BigQuery named function arguments, used with functions such as [ST_GEOGFROMGEOJSON()](https://cloud.google.com/bigquery/docs/reference/standard-sql/geography_functions#st_geogfromgeojson) - Update documentation links to sqlfluff-online. ## [0.6.0a1] - 2021-05-15 ### Added - Lint and fix parallelism using `--parallel` CLI argument - Fix [1051](https://github.com/sqlfluff/sqlfluff/issues/1051), adding support for bitwise operators `&`, `|`, `^`, `<<`, `>>` ## [0.5.6] - 2021-05-14 - Bugfix release for an issue in `L016` introduced in `0.5.4`. - Fix for `L016` issue where `DISTINCT` keywords were mangled during fixing [#1024](https://github.com/sqlfluff/sqlfluff/issues/1024). ## [0.5.5] - 2021-05-13 - Bugfix release for an off-by-one error introduced in L016 as part of `0.5.4`. ## [0.5.4] - 2021-05-12 ### Added - Parsing of Postgres dollar quoted literals. - Parsing of Postgres filter grammar. - Parsing of "ALTER DEFAULT PRIVILEGES" Postgres statement. - Parsing of Postgres non-explicit role granting and function execution. - Early failing on fatal dbt templater fails. ### Changed - Big rewrite of the lexer, segments and position markers for simplicity and to support future parallelism work. - Fix to L036 which previously mangled whitespace. ## [0.5.3] - 2021-05-04 ### Added - [`L009`](https://docs.sqlfluff.com/en/stable/rules.html#sqlfluff.core.rules.Rule_L009) can now be enforced when `templater = dbt`. - Parsing of `EXPLAIN`, `USE` statements. - Parsing of `ALTER TABLE x RENAME TO y` syntax. - Parsing of `ALTER SESSION` in snowflake. - Parsing of numeric literals with exponents. - Added rule codes to diff_cover output. ### Changed - Fix `templater = dbt` L009 bug [#861](https://github.com/sqlfluff/sqlfluff/issues/861) where: - `sqlfluff lint` would incorrectly always return `L009 | Files must end with a trailing newline.` - `sqlfluff fix` would remove trailing newlines when `exclude_rules = L009`. - Fix bug with BigQuery comparison operators. - Fix recursion bug with L045. - Fix tuple index bug with L016. - Fix mange coalecse bug with L043. - Fix Jinja templating error with _UnboundLocalError_. - Improve array parsing. - Simplify bracket parsing. - Speed up L010 with caching capitalisation policy. - Output of `sqlfluff dialects` is now sorted. - Handle disabled `dbt` models. ## [0.5.2] - 2021-04-11 ### Changed - Fix false positive in L045 when CTE used in WHERE clause ([#944](https://github.com/sqlfluff/sqlfluff/issues/944)) - Logging and readout now includes more detail and a notification of dbt compilation. - Fix bug in L048 which flagged adjoining commas as failures. - Fix bug in L019 with inline comments. - Fix bug in L036 with multiple newlines. - Skip disabled dbt models. ([#931](https://github.com/sqlfluff/sqlfluff/issues/931)). - Support "USE" statement in ANSI ([#902](https://github.com/sqlfluff/sqlfluff/issues/902)). - Parse explain statement ([#893](https://github.com/sqlfluff/sqlfluff/issues/893)). ## [0.5.1] - 2021-04-09 ### Changed - Parsing improvements around optional brackets. - Better parsing of set operators (like `UNION`) and how they interact with `ORDER BY` clauses. - Support for comparison operators like `~`. - Fix parsing of snowflake `SAMPLE` syntax. - Fix recursion issues in L044. - `SPACE` keyword now has no special meaning in the postgres dialect. ## [0.5.0] - 2021-04-05 ### Added - `pascal` (PascalCase) `capitalisation_policy` option for L014 (unquoted identifiers) - `only_aliases` configuration option for L014 (unquoted identifiers) - Dialects now have more advanced dependency options to allow less repetition between related dialects. The methods `get_segment` and `get_grammar` can be used on unexpanded grammars to access elements of the parent grammars. The `copy` method on grammars can be used to copy with alterations. - Rule L046 to line whitespace within jinja tags. - Enable and Disable syntax for [ignoring violations from ranges of lines](https://docs.sqlfluff.com/en/latest/configuration.html#ignoring-line-ranges). ### Changed - Renamed the BaseCrawler class to BaseRule. This is the base class for all rules. This is a breaking change for any custom rules that have been added via plugins or by forking the SQLFluff repo. - Renamed `sqlfluff.rules()` to `sqlfluff.list_rules()` and `sqlfluff.dialects()` to `sqlfluff.list_dialects()` due to naming conflicts with the now separate `sqlfluff.dialects` module. - Extracted dialect definitions from the `sqlfluff.core` module so that each dialect is better isolated from each other. This also allows more focused testing and the potential for dialect plugins in future. Dialects are now only imported as needed at runtime. All dialects should now be accessed using the selector methods in `sqlfluff.core.dialects` rather than importing from `sqlfluff.dialects` directly. - Add support for `ALTER USER` commands in Snowflake dialect. - Added describe statement to ANSI dialect - Renamed `capitalisation_policy` to `extended_capitalisation_policy` for L014 to reflect the fact that it now accepts more options (`pascal`) than regular `capitalisation_policy` still used by L010 and others. - Replaced `only_aliases` config with `unquoted_identifiers_policy` and added it to rule L014 in addition to L029. - Parse structure of `FROM` clauses to better represent nested joins and table functions. - Parse structure of expressions to avoid unnecessary nesting and overly recursive method calls. ## [0.4.1] - 2021-02-25 ### Added - Initial architecture for rule plugins to allow custom rules. This initial release should be considered _beta_ until the release of 0.5.0. - Add tests for dbt 0.19.0. - General increased parsing coverage. - Added some missing Postgres syntax elements. - Added some basic introspection API elements to output what dialects and rules are available for use within the API. ### Changed - Fix several Snowflake parsing bugs. - Refactor from clause to handle flattens after joins. - Fix .get_table_references() in Snowflake dialect. - Macros defined within the .sqlfluff config will take precedence over the macros defined in the path that is defined with config value `sqlfluff:templater:jinja:load_macros_from_path`. - Fix Snowflake indent parsing. - Fixed incorrect parsing of syntax-like elements in comments. - Altered parsing of `NULL` keywords, so parse as Literals where appropriate. - Fixed bug in expression parsing leading to recursion errors. ## [0.4.0] - 2021-02-14 ### Added - Public API to enable people to import `sqlfluff` as a python module and call `parse`, `lint` and `fix` within their own projects. See [the docs](https://docs.sqlfluff.com/en/latest/api.html) for more information. ([#501](https://github.com/sqlfluff/sqlfluff/pull/501)) - The ability to use `dbt` as a templating engine directly allowing richer and more accurate linting around `dbt` macros (and packages related to `dbt`). For more info see [the docs](https://docs.sqlfluff.com/en/latest/configuration.html#dbt-project-configuration). ([#508](https://github.com/sqlfluff/sqlfluff/pull/508)) - Support for modulo (`%`) operator. ([#447](https://github.com/sqlfluff/sqlfluff/pull/447)) - A limit in the internal fix routines to catch any infinite loops. ([#494](https://github.com/sqlfluff/sqlfluff/pull/494)) - Added the `.is_type()` method on segments to more intelligently deal with type matching in rules when inheritance is at play. - Added the ability for the user to add their own rules when interacting with the `Linter` directly using `user_rules`. - Added L034 'Fields should be stated before aggregates / window functions' per [dbt coding convenventions](https://github.com/fishtown-analytics/corp/blob/master/dbt_coding_conventions.md#sql-style-guide.) ([#495](https://github.com/sqlfluff/sqlfluff/pull/495)) - Templating tags, such as `{{ variables }}`, `{# comments #}` and `{% loops %}` (in jinja) now have placeholders in the parsed structure. Rule L003 (indentation), also now respects these placeholders so that their indentation is linted accordingly. For loop or block tags, they also generate an `Indent` and `Dedent` tag accordingly (which can be enabled or disabled) with a configuration value so that indentation around these functions can be linted accordingly. ([#541](https://github.com/sqlfluff/sqlfluff/pull/541)) - MyPy type linting into a large proportion of the core library. ([#526](https://github.com/sqlfluff/sqlfluff/pull/526), [#580](https://github.com/sqlfluff/sqlfluff/pull/580)) - Config values specific to a file can now be defined using a comment line starting with `-- sqlfluff:`. ([#541](https://github.com/sqlfluff/sqlfluff/pull/541)) - Added documentation for `--noqa:` use in rules. ([#552](https://github.com/sqlfluff/sqlfluff/pull/552)) - Added `pre-commit` hooks for `lint` and `fix`. ([#576](https://github.com/sqlfluff/sqlfluff/pull/576)) - Added a fix routine for Rule L019 (comma placement). ([#575](https://github.com/sqlfluff/sqlfluff/pull/575)) - Added Rule L031 to enforce "avoid using alias in the `FROM`/`JOIN` clauses" from the `dbt` coding conventions. ([#473](https://github.com/sqlfluff/sqlfluff/pull/473), [#479](https://github.com/sqlfluff/sqlfluff/pull/479)) - Added Rule L032 to enforce "do not use `USING`" from the `dbt` coding conventions. ([#487](https://github.com/sqlfluff/sqlfluff/pull/487)) - Added Rule L033 to enforce "prefer `UNION ALL` to `UNION *`" from the `dbt` coding conventions. ([#489](https://github.com/sqlfluff/sqlfluff/pull/489)) - Added Rule L034 to enforce "fields should be stated before aggregate/window functions" from the `dbt` coding conventions. ([#495](https://github.com/sqlfluff/sqlfluff/pull/495)) - Added Rule L038 to forbid (or require) trailing commas in select clauses. ([#362](https://github.com/sqlfluff/sqlfluff/pull/752)) - Added Rule L039 to lint unnecessary whitespace between elements. ([#502](https://github.com/sqlfluff/sqlfluff/pull/753)) - Added a fix routine for L015. ([#732](https://github.com/sqlfluff/sqlfluff/pull/732)) - Added a fix routine for L025. ([#404](https://github.com/sqlfluff/sqlfluff/pull/741)) - Adopted the `black` coding style. ([#485](https://github.com/sqlfluff/sqlfluff/pull/485)) - Added validation and documentation for rule configuration options. ([#462](https://github.com/sqlfluff/sqlfluff/pull/462)) - Added documentation for which rules are fixable. ([#594](https://github.com/sqlfluff/sqlfluff/pull/594)) - Added `EPOCH` keyword for postgres dialect. ([#522](https://github.com/sqlfluff/sqlfluff/pull/522)) - Added column index identifier in snowflake dialect. ([#458](https://github.com/sqlfluff/sqlfluff/pull/458)) - Added `USE` statement to the snowflake dialect. ([#537](https://github.com/sqlfluff/sqlfluff/pull/537)) - Added `CODE_OF_CONDUCT` to the project. ([#471](https://github.com/sqlfluff/sqlfluff/pull/471)) - Added `ISNULL` and `NOTNULL` keywords to ansi dialect. ([#441](https://github.com/sqlfluff/sqlfluff/pull/441)) - Added support for python 3.9. ([#482](https://github.com/sqlfluff/sqlfluff/pull/482)) - Added `requirements_dev.txt` for local testing/linting. ([#500](https://github.com/sqlfluff/sqlfluff/pull/500)) - Added CLI option `--disregard-sqlfluffignores` to allow direct linting of files in the `.sqlfluffignore`. ([#486](https://github.com/sqlfluff/sqlfluff/pull/486)) - Added `dbt` `incremental` macro. ([#363](https://github.com/sqlfluff/sqlfluff/pull/363)) - Added links to cockroachlabs expression grammars in ansi dialect. ([#592](https://github.com/sqlfluff/sqlfluff/pull/592)) - Added favicon to the docs website. ([#589](https://github.com/sqlfluff/sqlfluff/pull/589)) - Added `CREATE FUNCTION` syntax for postgres and for bigquery. ([#325](https://github.com/sqlfluff/sqlfluff/pull/325)) - Added `CREATE INDEX` and `DROP INDEX` for mysql. ([#740](https://github.com/sqlfluff/sqlfluff/pull/748)) - Added `IGNORE NULLS`, `RESPECT NULLS`, `GENERATE_DATE_ARRAY` and `GENERATE_TIMESTAMP_ARRAY` for bigquery. ( [#667](https://github.com/sqlfluff/sqlfluff/pull/727), [#527](https://github.com/sqlfluff/sqlfluff/pull/726)) - Added `CREATE` and `CREATE ... CLONE` for snowflake. ([#539](https://github.com/sqlfluff/sqlfluff/pull/670)) - Added support for EXASOL. ([#684](https://github.com/sqlfluff/sqlfluff/pull/684)) ### Changed - Fixed parsing of semi-structured objects in the snowflake of dialects with whitespace gaps. [#634](https://github.com/sqlfluff/sqlfluff/issues/635) - Handle internal errors elegantly, reporting the stacktrace and the error-surfacing file. [#632](https://github.com/sqlfluff/sqlfluff/pull/632) - Improve message for when an automatic fix is not available for L004. [#633](https://github.com/sqlfluff/sqlfluff/issues/633) - Linting errors raised on templated sections are now ignored by default and added a configuration value to show them. ([#713](https://github.com/sqlfluff/sqlfluff/pull/745)) - Big refactor of logging internally. `Linter` is now decoupled from logging so that it can be imported directly by subprojects without needing to worry about weird output or without the log handing getting in the way of your project. ([#460](https://github.com/sqlfluff/sqlfluff/pull/460)) - Linting errors in the final file are now reported with their position in the source file rather than in the templated file. This means when using sqlfluff as a plugabble library within an IDE, the references match the file which is being edited. ([#541](https://github.com/sqlfluff/sqlfluff/pull/541)) - Created new Github Organisation (https://github.com/sqlfluff) and migrated from https://github.com/alanmcruickshank/sqlfluff to https://github.com/sqlfluff/sqlfluff. ([#444](https://github.com/sqlfluff/sqlfluff/issues/444)) - Changed the handling of `*` and `a.b.*` expressions to have their own expressions. Any dependencies on this structure downstream will be broken. This also fixes the linting of both kinds of expressions with regard to L013 and L025. ([#454](https://github.com/sqlfluff/sqlfluff/pull/454)) - Refactor of L022 to handle poorly formatted CTEs better. ([#494](https://github.com/sqlfluff/sqlfluff/pull/494)) - Restriction of L017 to only fix when it would delete whitespace or newlines. ([#598](https://github.com/sqlfluff/sqlfluff/pull/756)) - Added a configuration value to L016 to optionally ignore lines containing only comments. ([#299](https://github.com/sqlfluff/sqlfluff/pull/751)) - Internally added an `EphemeralSegment` to aid with parsing efficiency without altering the end structure of the query. ([#491](https://github.com/sqlfluff/sqlfluff/pull/491)) - Split `ObjectReference` into `ColumnReference` and `TableReference` for more useful API access to the underlying structure. ([#504](https://github.com/sqlfluff/sqlfluff/pull/504)) - `KeywordSegment` and the new `SymbolSegment` both now inherit from `_ProtoKeywordSegment` which allows symbols to match in a very similar way to keywords without later appearing with the `type` of `keyword`. ([#504](https://github.com/sqlfluff/sqlfluff/pull/504)) - Introduced the `Parser` class to parse a lexed query rather than relying on users to instantiate a `FileSegment` directly. As a result the `FileSegment` has been moved from the core parser directly into the dialects. Users can refer to it via the `get_root_segment()` method of a dialect. ([#510](https://github.com/sqlfluff/sqlfluff/pull/510)) - Several performance improvements through removing unused functionality, sensible caching and optimising loops within functions. ([#526](https://github.com/sqlfluff/sqlfluff/pull/526)) - Split up rule tests into separate `yml` files. ([#553](https://github.com/sqlfluff/sqlfluff/pull/553)) - Allow escaped quotes in strings. ([#557](https://github.com/sqlfluff/sqlfluff/pull/557)) - Fixed `ESCAPE` parsing in `LIKE` clause. ([#566](https://github.com/sqlfluff/sqlfluff/pull/566)) - Fixed parsing of complex `BETWEEN` statements. ([#498](https://github.com/sqlfluff/sqlfluff/pull/498)) - Fixed BigQuery `EXCEPT` clause parsing. ([#472](https://github.com/sqlfluff/sqlfluff/pull/472)) - Fixed Rule L022 to respect leading comma configuration. ([#455](https://github.com/sqlfluff/sqlfluff/pull/455)) - Improved instructions on adding a virtual environment in the `README`. ([#457](https://github.com/sqlfluff/sqlfluff/pull/457)) - Improved documentation for passing CLI defaults in `.sqlfluff`. ([#452](https://github.com/sqlfluff/sqlfluff/pull/452)) - Fix bug with templated blocks + `capitalisation_policy = lower`. ([#477](https://github.com/sqlfluff/sqlfluff/pull/477)) - Fix array accessors in snowflake dialect. ([#442](https://github.com/sqlfluff/sqlfluff/pull/442)) - Color `logging` warnings red. ([#497](https://github.com/sqlfluff/sqlfluff/pull/497)) - Allow whitespace before a shorthand cast. ([#544](https://github.com/sqlfluff/sqlfluff/pull/544)) - Silenced warnings when fixing from stdin. ([#522](https://github.com/sqlfluff/sqlfluff/pull/522)) - Allow an underscore as the first char in a semi structured element key. ([#596](https://github.com/sqlfluff/sqlfluff/pull/596)) - Fix PostFunctionGrammar in the Snowflake dialect which was causing strange behaviour in L012. ([#619](https://github.com/sqlfluff/sqlfluff/pull/619/files)) - `Bracketed` segment now obtains its brackets directly from the dialect using a set named `bracket_pairs`. This now enables better configuration of brackets between dialects. ([#325](https://github.com/sqlfluff/sqlfluff/pull/325)) ### Removed - Dropped support for python 3.5. ([#482](https://github.com/sqlfluff/sqlfluff/pull/482)) - From the CLI, the `--no-safety` option has been removed, the default is now that all enabled rules will be fixed. ([#583](https://github.com/sqlfluff/sqlfluff/pull/583)) - Removed `BaseSegment.grammar`, `BaseSegment._match_grammar()` and `BaseSegment._parse_grammar()` instead preferring references directly to `BaseSegment.match_grammar` and `BaseSegment.parse_grammar`. ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) - Removed `EmptySegmentGrammar` and replaced with better non-code handling in the `FileSegment` itself. ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) - Remove the `ContainsOnly` grammar as it remained only as an anti-pattern. ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) - Removed the `expected_string()` functionality from grammars and segments ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) as it was poorly supported. - Removed `BaseSegment.as_optional()` as now this functionality happens mostly in grammars (including `Ref`). ([#509](https://github.com/sqlfluff/sqlfluff/pull/509)) - Removed `ColumnExpressionSegment` in favour of `ColumnReference`. ([#512](https://github.com/sqlfluff/sqlfluff/pull/512)) - Removed the `LambdaSegment` feature, instead replacing with an internal to the grammar module called `NonCodeMatcher`. ([#512](https://github.com/sqlfluff/sqlfluff/pull/512)) - Case sensitivity as a feature for segment matching has been removed as not required for existing dialects. ([#517](https://github.com/sqlfluff/sqlfluff/pull/517)) - Dependency on `difflib` or `cdifflib`, by relying on source mapping instead to apply fixes. ([#541](https://github.com/sqlfluff/sqlfluff/pull/541)) ## [0.3.6] - 2020-09-24 ### Added - `sqlfluff dialects` command to get a readout of available dialects [+ associated docs]. - More helpful error messages when trying to run in Python2. - Window functions now parse with `IGNORE`/`RESPECT` `NULLS`. - Parsing of `current_timestamp` and similar functions. Thanks [@dmateusp](https://github.com/dmateusp). - Snowflake `QUALIFY` clause. ### Changed - Respect user config directories. Thanks [@sethwoodworth](https://github.com/sethwoodworth). - Fix incorrect reporting of L013 with `*`. Thanks [@dmateusp](https://github.com/dmateusp). - Fix incorrect reporting of L027 with column aliases. Thanks [@pwildenhain](https://github.com/pwildenhain). - Simplification of application of fixes and correction of a case where fixes could be depleted. Thanks [@NiallRees](https://github.com/NiallRees). - Fix functions with a similar structure to `SUBSTRING`. - Refactor BigQuery `REPLACE` and `EXCEPT` clauses. - Bigquery date parts corrected. - Snowflake array accessors. - Psotgres `NOTNULL` and `ISNULL`. - Bugfix in snowflake for keywords used in semistructured queries. - Nested `WITH` statements now parse. - Performance improvements in the `fix` command. - Numeric literals starting with a decimal now parse. - Refactor the jinja templater. ## [0.3.5] - 2020-08-03 ### Added - Patterns and Anti-patterns in documentation. Thanks [@flpezet](https://github.com/flpezet). - Functions in `GROUP BY`. Thanks [@flpezet](https://github.com/flpezet). ### Changed - Deep bugfixes in the parser to handle simple matching better for a few edge cases. Also added some logging deeper in the parser. - Added in the `SelectableGrammar` and some related segments to make it easier to refer to _select-like_ things in other grammars. - Fixes to `CASE` statement parsing. Thanks [@azhard](https://github.com/azhard). - Fix to snowflake `SAMPLE` implementation. Thanks [@rkm3](https://github.com/rkm3). - Numerous docs fixes. Thanks [@SimonStJG](https://github.com/SimonStJG), [@flpezet](https://github.com/flpezet), [@s-pace](https://github.com/s-pace), [@nolanbconaway](https://github.com/nolanbconaway). ## [0.3.4] - 2020-05-13 ### Changed - Implementation of the bigquery `CREATE MODEL` syntax. Thanks [@barrywhart](https://github.com/barrywhart). - Bugfixes for: - Edge cases for L006 - False alarms on L025 - `ORDER BY x NULLS FIRST|LAST` - `FOR` keyword in bigquery `SYSTEM_TIME` syntax. ## [0.3.3] - 2020-05-11 ### Added - Added the `--nofail` option to `parse` and `lint` commands to assist rollout. - Added the `--version` option to complement the `version` option already available on the cli. - Parsing for `ALTER TABLE`. - Warning for unset dialects when getting parsing errors. - Configurable line lengths for output. ## [0.3.2] - 2020-05-08 ### Added - Support for the Teradata dialect. Thanks [@Katzmann1983](https://github.com/Katzmann1983)! - A much more detailed getting started guide in the docs. - For the `parse` command, added the `--profiler` and `--bench` options to help debugging performance issues. - Support for the `do` command in the jinja templater. - Proper parsing of the concatenate operator (`||`). - Proper indent handling of closing brackets. - Logging and benchmarking of parse performance as part of the CI pipeline. - Parsing of object references with defaults like `my_db..my_table`. - Support for the `INTERVAL '4 days'` style interval expression. - Configurable trailing or leading comma linting. - Configurable indentation for `JOIN` clauses. - Rules now have their own logging interface to improve debugging ability. - Snowflake and Postgres dialects. - Support for a `.sqlfluffignore` file to ignore certain paths. - More generic interfaces for managing keywords in dialects, including `set` interfaces for managing and creating keywords and the `Ref.keyword()` method to refer to them, and the ability to refer directly to keyword names in most grammars using strings directly. Includes `SegmentGenerator` objects to bind dialect objects at runtime from sets. Thanks [@Katzmann1983](https://github.com/Katzmann1983)! - Rule `L029` for using unreserved keywords as variable names. - The jinja templater now allows macros loaded from files, and the hydration of variables ending in `_path` in the config files. - JSON operators and the `DISTINCT ON ()` syntax for the postgres dialect. ### Changed - Refactor of whitespace and non-code handling so that segments are less greedy and default to not holding whitespace on ends. This allows more consistent linting rule application. - Change config file reading to _case-sensitive_ to support case sensitivity in jinja templating. - Non-string values (including lists) now function in the python and jinja templating libraries. - Validation of the match results of grammars has been reduced. In production cases the validation will still be done, but only on _parse_ and not on _match_. - At low verbosities, python level logging is also reduced. - Some matcher rules in the parser can now be classified as _simple_ which allows them to shortcut some of the matching routines. - Yaml output now double quotes values with newlines or tab characters. - Better handling on hanging and closing indents when linting rule L003. - More capable handline of multi-line comments so that indentation and line length parsing works. This involves some deep changes to the lexer. - Getting violations from the linter now automatically takes into account of ignore rules and filters. - Several bugfixes, including catching potential infinite regress during fixing of files, if one fix would re-introduce a problem with another. - Behaviour of the `Bracketed` grammar has been changed to treat its content as a `Sequence` rather than a `OneOf`. - Move to `SandboxedEnvironment` rather than `Environment` for jinja templating for security. - Improve reporting of templating issues, especially for the jinja templater so that missing variables are rendered as blanks, but still reported as templating violations. ## [0.3.1] - 2020-02-17 ### Added - Support for `a.b.*` on top of `a.*` in select target expressions. ## [0.3.0] - 2020-02-15 ### Changed - Deprecated python 2.7 and python 3.4 which are now both past their maintenance horizon. The 0.2.x branch will remain available for continued development for these versions. - Rule L003 is now significantly smarter in linting indentation with support for hanging indents and comparison to the most recent line which doesn't have an error. The old (more simple) functionality of directly checking whether an indent was a multiple of a preset value has been removed. - Fixed the "inconsistent" bug in L010. Thanks [@nolanbconaway](https://github.com/nolanbconaway). - Updated logging of parsing and lexing errors to have more useful error codes. - Changed parsing of expressions to favour functions over identifiers to [fix the expression bug](https://github.com/sqlfluff/sqlfluff/issues/96). - Fixed the "inconsistent" bug in L010. Thanks [@nolanbconaway](https://github.com/nolanbconaway). - Moved where the `SELECT` keyword is parsed within a select statement, so that it belongs as part of the newly renamed `select_clause` (renamed from previously `select_target_group`). - Clarified handling of the `type` and `name` properties of the BaseSegment class and its children. `name` should be specific to a particular kind of segment, and `type` should express a wider group. Handling of the `newline`, `whitespace` and `comma` segments has been updated so that we use the `type` property for most use cases rather than `name`. ### Added - _Meta segments_ for indicating where things can be present in the parsed tree. This is mostly illustrated using the `Indent` and `Dedent` segments used for indicating the position of theoretical indents in the structure. Several helper functions have been added across the codebase to handle this increase in the kinds of segments which might be encountered by various grammars. - Rule L016 has been added to lint long lines. In the `fix` phase of this rule, there is enough logic to try and reconstruct a sensible place for line breaks as re-flow the query. This will likely need further work and may still encounter places where it doesn't fix all errors but should be able to deal with the majority of simple cases. - BigQuery dialect, initially just for appropriate quoting. - Added parsing of DDL statements such as `COMMIT`, `DROP`, `GRANT`, `REVOKE` and `ROLLBACK`. Thanks [@barrywhart](https://github.com/barrywhart). - `--format` option to the `parse` command that allows a yaml output. This is mostly to make test writing easier in the development process but might also be useful for other things. - Parsing of set operations like `UNION`. - Support for the `diff-cover` tool. Thanks [@barrywhart](https://github.com/barrywhart). - Enabled the `fix` command while using `stdin`. Thanks [@nolanbconaway](https://github.com/nolanbconaway). - Rule to detect incorrect use of `DISTINCT`. Thanks [@barrywhart](https://github.com/barrywhart). - Security fixes from DeepCover. Thanks [@sanketsaurav](https://github.com/sanketsaurav). - Automatic fix testing, to help support the newer more complicated rules. - Interval literals - Support for the `source` macro from dbt. Thanks [@Dandandan](https://github.com/Dandandan) - Support for functions with spaces between the function name and the brackets and a linting rule `L017` to catch this. - Efficiency cache for faster pruning of the parse tree. - Parsing of array notation as using in BigQuery and Postgres. - Enable the `ignore` parameter on linting and fixing commands to ignore particular kinds of violations. ## [0.2.4] - 2019-12-06 ### Added - A `--code-only` option to the `parse` command to spit out a more simplified output with only the code elements. - Rules can now optionally override the description of the violation and pass that back via the `LintingResult`. ### Changed - Bugfix, correct missing files in `setup.py` `install_requires` section. - Better parsing of the _not equal_ operator. - Added more exclusions to identifier reserved words to fix cross joins. - At verbosity levels 2 or above, the root config is printed and then any diffs to that for specific files are also printed. - Linting and parsing of directories now reports files in alphabetical order. Thanks [@barrywhart](https://github.com/barrywhart). - Better python 2.7 stability. Thanks [@barrywhart](https://github.com/barrywhart). - Fixing parsing of `IN`/`NOT IN` and `IS`/`IS NOT`. ## [0.2.3] - 2019-12-02 ### Changed - Bugfix, default config not included. ## [0.2.2] - 2019-12-02 ### Changed - Tweek rule L005 to report more sensibly with newlines. - Rework testing of rules to be more modular. - Fix a config file bug if no root config file was present for some values. Thanks [@barrywhart](https://github.com/barrywhart). - Lexing rules are now part of the dialect rather than a global so that they can be overridden by other dialects when we get to that stage. ## [0.2.0] - 2019-12-01 ### Added - Templating support (jinja2, python or raw). - Variables + Macros. - The `fix` command is also sensitive to fixing over templates and will skip certain fixes if it feels that it's conflicted. - Config file support, including specifying context for the templater. - Documentation via Sphinx and readthedocs. - Including a guide on the role of SQL in the real world. Assisted by [@barrywhart](https://github.com/barrywhart). - Documentation LINTING (given we're a linting project) introduced in CI. - Reimplemented L006 & L007 which lint whitespace around operators. - Ability to configure rule behaviour directly from the config file. - Implemented L010 to lint capitalisation of keywords. - Allow casting in the parser using the `::` operator. - Implemented `GROUP BY`and `LIMIT`. - Added `ORDER BY` using indexes and expressions. - Added parsing of `CASE` statements. - Support for window/aggregate functions. - Added linting and parsing of alias expressions. ### Changed - Fixed a bug which could cause potential infinite recursion in configuration - Changed how negative literals are handled, so that they're now a compound segment rather than being identified at the lexing stage. This is to allow the parser to resolve the potential ambiguity. - Restructure of rule definitions to be more streamlined and also enable autodocumentation. This includes a more complete `RuleSet` class which now holds the filtering code. - Corrected logging in fix mode not to duplicate the reporting of errors. - Now allows insert statements with a nested `with` clause. - Fixed verbose logging during parsing. - Allow the `Bracketed` grammar to optionally match empty brackets using the optional keyword. ## [0.1.5] - 2019-11-11 ### Added - Python 3.8 Support! ### Changed - Moved some of the responsibility for formatted logging into the linter to mean that we can log progressively in large directories. - Fixed a bug in the grammar where one of the return values was messed up. ## [0.1.4] - 2019-11-10 ### Added - Added a `--exclude-rules` argument to most of the commands to allow rule users to exclude specific subset of rules, by [@sumitkumar1209](https://github.com/sumitkumar1209) - Added lexing for `!=`, `~` and `::`. - Added a new common segment: `LambdaSegment` which allows matching based on arbitrary functions which can be applied to segments. - Recursive Expressions for both arithmetic and functions, based heavily off the grammar provided by the guys at [CockroachDB](https://www.cockroachlabs.com/docs/stable/sql-grammar.html#select_stmt). - An `Anything` grammar, useful in matching rather than in parsing to match anything. ### Changed - Complete rewrite of the bracket counting functions, using some centralised class methods on the `BaseGrammar` class to support common matching features across multiple grammars. In particular this affects the `Delimited` grammar which is now _much simpler_ but does also require _slightly_ more liberal use of terminators to match effectively. - Rather than passing around multiple variables during parsing and matching, there is now a `ParseContext` object which contains things like the dialect and various depths. This simplifies the parsing and matching code significantly. - Bracket referencing is now done from the dialect directly, rather than in individual Grammars (except the `Bracketed` grammar, which still implements it directly). This takes out some originally duplicated code. - Corrected the parsing of ordering keywords in and `ORDER BY` clause. ### Removed - Removed the `bracket_sensitive_forward_match` method from the `BaseGrammar`. It was ugly and not flexible enough. It's been replaced by a suite of methods as described above. ## [0.1.3] - 2019-10-30 ### Changed - Tweak to the L001 rule so that it doesn't crash the whole thing. ## [0.1.2] - 2019-10-30 ### Changed - Fixed the errors raised by the lexer. ## [0.1.1] - 2019-10-30 ### Changed - Fixed which modules from sqlfluff are installed in the setup.py. This affects the `version` command. ## [0.1.0] - 2019-10-29 ### Changed - _Big Rewrite - some loss in functionality might be apparent compared to pre-0.1.0. Please submit any major problems as issues on github_ - Changed unicode handling for better escape codes in python 2. Thanks [@mrshu](https://github.com/mrshu) - BIG rewrite of the parser, completely new architecture. This introduces breaking changes and some loss of functionality while we catch up. - In particular, matches now return partial matches to speed up parsing. - The `Delimited` matcher has had a significant re-write with a major speedup and broken the dependency on `Sequence`. - Rewrite of `StartsWith` and `Sequence` to use partial matches properly. - Different treatment of numeric literals. - Both `Bracketed` and `Delimited` respect bracket counting. - MASSIVE rewrite of `Bracketed`. - Grammars now have timers. - Joins properly parsing, - Rewrite of logging to selectively output commands at different levels of verbosity. This uses the `verbosity_logger` method. - Added a command line `sqlfluff parse` option which runs just the parsing step of the process to better understand how a file is being parsed. This also has options to configure how deep we recurse. - Complete Re-write of the rules section, implementing new `crawlers` which implement the linting rules. Now with inbuilt fixers in them. - Old rules removed and re implemented so we now have parity with the old rule sets. - Moved to using Ref mostly within the core grammar so that we can have recursion. - Used recursion to do a first implementation of arithmetic parsing. Including a test for it. - Moved the main grammar into a separate dialect and renamed source and test files accordingly. - Moved to file-based tests for the ansi dialect to make it easier to test using the tool directly. - As part of file tests - expected outcomes are now encoded in yaml to make it easier to write new tests. - Vastly improved readability and debugging potential of the \_match logging. - Added support for windows line endings in the lexer. ## [0.0.7] - 2018-11-19 ### Added - Added a `sqlfluff fix` as a command to implement auto-fixing of linting errors. For now only `L001` is implemented as a rule that can fix things. - Added a `rules` command to introspect the available rules. - Updated the cli table function to use the `testwrap` library and also deal a lot better with longer values. - Added a `--rules` argument to most of the commands to allow rule users to focus their search on a specific subset of rules. ### Changed - Refactor the cli tests to use the click CliRunner. Much faster ## [0.0.6] - 2018-11-15 ### Added - Number matching ### Changed - Fixed operator parsing and linting (including allowing the exception of `(*)`) ## [0.0.5] - 2018-11-15 ### Added - Much better documentation including the DOCS.md ### Changed - Fixed comma parsing and linting ## [0.0.4] - 2018-11-14 ### Added - Added operator regexes - Added a priority for matchers to resolve some ambiguity - Added tests for operator regexes - Added ability to initialise the memory in rules ## [0.0.3] - 2018-11-14 ### Added - Refactor of rules to allow rules with memory - Adding comma linting rules (correcting the single character matchers) - Adding mixed indentation linting rules - Integration with CircleCI, CodeCov and lots of badges ### Changed - Changed import of version information to fix bug with importing config.ini - Added basic violations/file reporting for some verbosities - Refactor of rules to simplify definition - Refactor of color cli output to make it more reusable ## [0.0.2] - 2018-11-09 ### Added - Longer project description - Proper exit codes - colorama for colored output ### Changed - Significant CLI changes - Much improved output from CLI ## [0.0.1] - 2018-11-07 ### Added - Initial Commit! - VERY ALPHA - Restructure into [package layout](https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure) - Adding Tox and Pytest so that they work sqlfluff-2.3.5/CODE_OF_CONDUCT.md000066400000000000000000000121641451700765000161530ustar00rootroot00000000000000 # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement via email to `admins`@`sqlfluff.com`. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. sqlfluff-2.3.5/CONTRIBUTING.md000066400000000000000000000475101451700765000156100ustar00rootroot00000000000000# SQLFluff - Contributing :star2: **First** - thanks for being interested in improving SQLFluff! :smiley: :star2: **Second** - please read and familiarise yourself with both the content of this guide and also our [code of conduct](CODE_OF_CONDUCT.md). :star2: **Third** - the best way to get started contributing, is to use the tool in anger and then to submit bugs and features through GitHub. In particular, in helping to develop the parser, examples of queries that do not parse as expected are especially helpful. :star2: **Fourth** - making sure that our documentation is up-to-date and useful for new users is really important. If you are a new user, you are in precisely the best position to do this. Familiarise yourself with the tool (as per step 2 above) and familiarise yourself with the current documentation (live version at [docs.sqlfluff.com](https://docs.sqlfluff.com) and the source can be found in the [docs](./docs/) folder of the repo). Pull requests are always welcome with documentation improvements. Keep in mind that there are linting checks in place for good formatting so keep an eye on the tests whenever submitting a PR. We also have a [GitHub wiki](https://github.com/sqlfluff/sqlfluff/wiki) for longer tutorials. We welcome [contributions, suggestions or requests](https://github.com/sqlfluff/sqlfluff/issues/2104) for the wiki. :star2: **Fifth** - if you are so inclined - pull requests on the core codebase are always welcome. Dialect additions are often a good entry point for new contributors, and we have [a wiki page](https://github.com/sqlfluff/sqlfluff/wiki/Contributing-Dialect-Changes) to help you through your first contribution. Bear in mind that all the tests should pass, and test coverage should not decrease unduly as part of the changes which you make. You may find it useful to familiarise yourself with the [architectural principles here](https://docs.sqlfluff.com/en/stable/internals.html#architecture) and with the [current documentation here](https://docs.sqlfluff.com). ## How The Community Works SQLFluff is maintained by a community of volunteers, which means we have a few processes in place to allow everyone to contribute at a level that suits them and at a time that suits them. These are not meant to be a way of restricting development, but a way of allowing the community to agree what to focus on and then effectively direct its focus toward that. Anyone can pipe up in these discussions, and the more we hear from users the more we can build a tool that is useful for the community. - Large features for consideration will be organised into _Major Releases_. These will usually include significant changes in functionality or backwards-incompatible changes. As some of these features may require significant coordination, discussion or development work, there is a process for each major release to work out what features will fit into that release. - Each major release will have its own GitHub issue. For example, the link to the issue for [0.6.0 is here](https://github.com/sqlfluff/sqlfluff/issues/922). - Features or issues are organised into a _shortlist_. During the initial discussion for the release, each feature is vetted for enough clarity that someone in the community can pick it up. Issues, where we cannot reach clarity, will be pushed to the next release. Getting this clarity is important before development work progresses so that we know that larger changes are a) in line with the aims of the project and b) are effectively pre-approved changes so that there are not any surprises when it comes to merging. - Once we reach the deadline for closing the roadmap for a release the focus on development work should be on those features. - Small features and bug fixes (assuming no backward compatibility issues) do not need to go through the same process and vetting and can be picked up and merged at any time. ### Maintainers A small group of people volunteer their time to maintain the project and share the responsibility for responding to issues and reviewing any proposed changes via pull requests. Each one of them will be trying to follow the process above and keep development work on the project moving. That means for smaller changes and improvements they may review changes as individuals and merge them into the project in a very lightweight way. For larger changes, especially if not already part of the current major release process the expectation is that they will involve other members or the maintainer community or the project admins before merging in larger changes or giving the green light to major structural project changes. ## Nerdy Details ### Developing and Running SQLFluff Locally #### Requirements The simplest way to set up a development environment is to use `tox`. First ensure that you have tox installed: ```shell python3.8 -m pip install -U tox ``` **IMPORTANT:** `tox` must be installed with a minimum of Python 3.8 as the `mypy` checks are incompatible with 3.7. Those using newer versions of Python may replace `python3.8` as necessary (the test suite runs primarily under 3.12 for example). Note: Unfortunately tox does not currently support setting just a minimum Python version (though this may be be coming in tox 4!). #### Creating a virtual environment A virtual environment can then be created and activated by running (check the [requirements](#requirements) before running this): ```shell tox -e dbt021-py38 --devenv .venv source .venv/bin/activate ``` (The `dbt021-py38` environment is a good default choice. However any version can be installed by replacing `dbt021-py38` with `py`, `py37`, `py39`, `dbt020-py38`, etc. `py` defaults to the python version that was used to install tox. However, to be able to run all tests including the dbt templater, choose one of the dbt environments.) Windows users should call `.venv\Scripts\activate` rather than `source .venv/bin/activate`. This virtual environment will already have the package installed in editable mode for you, as well as `requirements_dev.txt` and `plugins/sqlfluff-plugin-example`. Additionally if a dbt virtual environment was specified, you will also have `dbt-core`, `dbt-postgres`, and `plugins/sqlfluff-templater-dbt` available. ### Wiki We have a [GitHub wiki](https://github.com/sqlfluff/sqlfluff/wiki) with some more long form tutorials for contributors, particularly those new to SQLFluff or contributing to open source. We welcome [contributions, suggestions or requests](https://github.com/sqlfluff/sqlfluff/issues/2104) for the wiki. ### Developing plugins If you're working on plugins (like the dbt templater), you'll also need to install those plugins too in an editable mode. This works the same way as the main project but you'll need to do each one explicitly. e.g. ```shell pip install -e plugins/sqlfluff-templater-dbt/. ``` > NOTE: For packages intended to be installed like this, the source code must be directly > within a subdirectory with the name of the package and not in a subdirectory such as > src. This is due to a restriction in the implementation of setup.py in editable mode. ### Testing To test locally, SQLFluff uses `tox` (check the [requirements](#requirements)!). The test suite can be run via: ```shell tox ``` This will build and test for several Python versions, and also lint the project. Practically on a day-to-day basis, you might only want to lint and test for one Python version, so you can always specify a particular environment. For example, if you are developing in Python 3.8 you might call... ```shell tox -e generate-fixture-yml,py38,linting,mypy ``` ...or if you also want to see the coverage reporting... ```shell tox -e generate-fixture-yml,cov-init,py38,cov-report,linting,mypy ``` > NB: The `cov-init` task clears the previous test results, the `py38` environment > generates the results for tests in that Python version and the `cov-report` > environment reports those results out to you (excluding dbt). `tox` accepts `posargs` to allow you to refine your test run, which is much faster while working on an issue, before running full tests at the end. For example, you can run specific tests by making use of the `-k` option in `pytest`: ``` tox -e py38 -- -k AL02 test ``` Alternatively, you can also run tests from a specific directory or file only: ``` tox -e py38 -- test/cli tox -e py38 -- test/cli/commands_test.py ``` You can also manually test your updated code against a SQL file via: ```shell sqlfluff parse test.sql ``` (ensure your virtual environment is activated first). #### How to use and understand the test suite When developing for SQLFluff, you may not need (or wish) to run the whole test suite, depending on what you are working on. Here are a couple of scenarios for development, and which parts of the test suite you may find most useful. 1. For dialect improvements (i.e. changes to anything in [src/sqlfluff/dialects](./src/sqlfluff/dialects)) you should not need to continuously run the full core test suite. Running either `tox -e generate-fixture-yml` (if using tox), or setting up a python virtualenv and running `test/generate_parse_fixture_yml.py` directly will usually be sufficient. Both of these options accept arguments to restrict runs to specific dialects to further improve iteration speed. e.g. - `tox -e generate-fixture-yml -- -d mysql` will run just the mysql tests. - `python test/generate_parse_fixture_yml.py -d mysql` will do the same. 2. Developing for the dbt templater should only require running the dbt test suite (see below). 3. Developing rules and rule plugins there are a couple of scenarios. - When developing a new rule or working with a more isolated rule, you should only need to run the tests for that rule. These are usually what are called the _yaml tests_. This refers to a body of example sql statements and potential fixes defined in a large set of yaml files found in [test/fixtures/rules/std_rule_cases](./test/fixtures/rules/std_rule_cases). The easiest way to run these is by calling that part of the suite directly and filtering to just that rule. For example: - `tox -e py39 -- test/rules/yaml_test_cases_test.py -k AL01` - `pytest test/rules/yaml_test_cases_test.py -k AL01` - When developing on some more complicated rules, or ones known to have interactions with other rules, there are a set of rule fixing tests which apply a set combination of those rules. These are best run via the `autofix` tests. For example: - `tox -e py39 -- test/rules/std_fix_auto_test.py` - `pytest test/rules/std_fix_auto_test.py` - Potentially even the full rules suite `tox -e py39 -- test/rules` - A small number of core rules are also used in making sure that inner parts of SQLFluff are also functioning. This isn't great isolation but does mean that occasionally you may find side effects of your changes in the wider test suite. These can usually be caught by running the full `tox -e py39` suite as a final check (or using the test suite on GitHub when posting your PR). 4. When developing the internals of SQLFluff (i.e. anything not already mentioned above), the test suite typically mirrors the structure of the internal submodules of sqlfluff: - When working with the CLI, the `sqlfluff.cli` module has a test suite called via `tox -e py39 -- test/cli`. - When working with the templaters (i.e. `sqlfluff.core.templaters`), the corresponding test suite is found via `tox -e py39 -- test/core/templaters`. - This rough guidance and may however not apply for all of the internals. For example, changes to the internals of the parsing module (`sqlfluff.core.parser`) are very likely to have knock-on implications across the rest of the test suite and it may be necessary to run the whole thing. In these situations however you can usually work slowly outward, for example: 1. If your change is to the `AnyOf()` grammar, first running `tox -e py39 -- test/core/parser/grammar_test.py` would be wise. 2. ...followed by `tox -e py39 -- test/core/parser` once the above is passing. 3. ...and then `tox -e py39 -- test/core`. 4. ...and finally the full suite `tox -e py39`. #### dbt templater tests The dbt templater tests require a locally running Postgres instance. See the required connection parameters in `plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles.yml`. We recommend using https://postgresapp.com/. To run the dbt-related tests you will have to explicitly include these tests: ```shell tox -e cov-init,dbt018-py38,cov-report-dbt -- plugins/sqlfluff-templater-dbt ``` For more information on adding and running test cases see the [Parser Test README](test/fixtures/dialects/README.md) and the [Rules Test README](test/fixtures/rules/std_rule_cases/README.md). #### Running dbt templater tests in Docker Compose NOTE: If you prefer, you can develop and debug the dbt templater using a Docker Compose environment. It's a simple two-container configuration: * `app`: Hosts the SQLFluff development environment. The host's source directory is mounted into the container, so you can iterate on code changes without having to constantly rebuild and restart the container. * `postgres`: Hosts a transient Postgres database instance. Steps to use the Docker Compose environment: * Install Docker on your machine. * Run `plugins/sqlfluff-templater-dbt/docker/startup` to create the containers. * Run `plugins/sqlfluff-templater-dbt/docker/shell` to start a bash session in the `app` container. Inside the container, run: ``` py.test -v plugins/sqlfluff-templater-dbt/test/ ``` ### Pre-Commit Config For development convenience we also provide a `.pre-commit-config.yaml` file to allow the user to install a selection of pre-commit hooks by running (check the [requirements](#requirements) before running this): ``` tox -e pre-commit -- install ``` These hooks can help the user identify and fix potential linting/typing violations prior to committing their code and therefore reduce having to deal with these sort of issues during code review. ### Documentation Website Documentation is built using Sphinx with some pages being built based on the source code. See the [Documentation Website README.md](./docs/README.md) file for more information on how to build and test this. ### Building Package New versions of SQLFluff will be published to PyPI automatically via [GitHub Actions](.github/workflows/publish-release-to-pypi.yaml) whenever a new release is published to GitHub. #### Release checklist: The [release page](https://github.com/sqlfluff/sqlfluff/releases) shows maintainers all merges since last release. Once we have a long enough list, we should prepare a release. A release PR can be created by maintainers via the ["Create release pull request" GitHub Action](https://github.com/sqlfluff/sqlfluff/actions/workflows/create-release-pull-request.yaml). As further PRs are merged, we may need to rerun the release script again (or alternatively just manually updating the branch). This can only be rerun locally (the GitHub Action will exit error if the branch already exists to prevent overwriting it). Check out the release branch created by the GitHub Action locally and run the script. It will preserve any `Highlights` you have added and update the other sections with new contributions. It can be run as follows (you will need a [GitHub Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) with "repo" permission): ```shell source .venv/bin/activate export GITHUB_REPOSITORY_OWNER=sqlfluff export GITHUB_TOKEN=gho_xxxxxxxx # Change to your token with "repo" permissions. python util.py release 2.0.3 # Change to your release number ``` Below is the old list of release steps, but many are automated by the process described above. - [ ] Change the version in `setup.cfg` and `plugins/sqlfluff-templater-dbt/setup.cfg` - [ ] Update the stable_version in the `[sqlfluff_docs]` section of `setup.cfg` - [ ] Copy the draft releases from https://github.com/sqlfluff/sqlfluff/releases to [CHANGELOG.md](CHANGELOG.md). These draft release notes have been created by a GitHub Action on each PR merge. - [ ] If you pretend to create a new draft in GitHub and hit "Auto Generate Release Notes", then it will basically recreate these notes (though in a slightly different format), but also add a nice "First contributors" section, so can copy that "First contributors" section too and then abandon that new draft ([an issues](https://github.com/release-drafter/release-drafter/issues/1001) has been raised to ask for this in Release Drafter GitHub Action). - [ ] Add markdown links to PRs as annoyingly GitHub doesn't do this automatically when displaying Markdown files, like it does for comments. You can use regex in most code editors to replace `\(#([0-9]*)\) @([^ ]*)$` to `[#$1](https://github.com/sqlfluff/sqlfluff/pull/$1) [@$2](https://github.com/$2)`, or if using the GitHub generated release notes then can replace `by @([^ ]*) in https://github.com/sqlfluff/sqlfluff/pull/([0-9]*)$` to `[#$2](https://github.com/sqlfluff/sqlfluff/pull/$2) [@$1](https://github.com/$1)`. - [ ] For the new contributors section, you can replace `\* @([^ ]*) made their first contribution in https://github.com/sqlfluff/sqlfluff/pull/([0-9]*)$` with `* [@$1](https://github.com/$1) made their first contribution in [#$2](https://github.com/sqlfluff/sqlfluff/pull/$2)` to do this automatically). - [ ] Check each issue title is clear, and if not edit issue title (which will automatically update Release notes on next PR merged, as the Draft one is recreated in full each time). We also don't use [conventional commit PR titles](https://www.conventionalcommits.org/en/v1.0.0/) (e.g. `feat`) so make them more English readable. Make same edits locally in [CHANGELOG.md](CHANGELOG.md). - [ ] Add a comment at the top to highlight the main things in this release. - [ ] If this is a non-patch release then update the `Notable changes` section in `index.rst` with a brief summary of the new features added that made this a non-patch release. - [ ] View the CHANGELOG in this branch on GitHub to ensure you didn't miss any link conversions or other markup errors. - [ ] Open draft PR with those change a few days in advance to give contributors notice. Tag those with open PRs in the PR in GitHub to give them time to merge their work before the new release - [ ] Comment in #contributing slack channel about release candidate. - [ ] Update the draft PR as more changes get merged. - [ ] Get another contributor to approve the PR. - [ ] Merge the PR when looks like we've got all we’re gonna get for this release. - [ ] Go to the [releases page](https://github.com/sqlfluff/sqlfluff/releases), edit the release to be same as [CHANGELOG.md](CHANGELOG.md) (remember to remove your release PR which doesn’t need to go in this). Add version tag and a title and click “Publish release”. - [ ] Announce the release in the #general channel, with shout outs to those who contributed many, or big items. - [ ] Announce the release on Twitter (@tunetheweb can do this or let him know your Twitter handle if you want access to Tweet on SQLFluff’s behalf). :warning: **Before creating a new release, ensure that [setup.cfg](setup.cfg) is up-to-date with a new version** :warning:. If this is not done, PyPI will reject the package. Also, ensure you have used that version as a part of the tag and have described the changes accordingly. #### Releasing Manually If for some reason the package needs to be submitted to PyPI manually, we use `twine`. You will need to be an admin to submit this to PyPI, and you will need a properly formatted `.pypirc` file. If you have managed all that then you can run: ```shell tox -e publish-dist ``` ... and the most recent version will be uploaded to PyPI. sqlfluff-2.3.5/Dockerfile000066400000000000000000000022631451700765000153450ustar00rootroot00000000000000FROM python:3.9-slim-bullseye # Set separate working directory for easier debugging. WORKDIR /app # Create virtual environment. ENV VIRTUAL_ENV /app/.venv RUN python -m venv $VIRTUAL_ENV ENV PATH $VIRTUAL_ENV/bin:$PATH RUN pip install --no-cache-dir --upgrade pip setuptools wheel # Install requirements separately # to take advantage of layer caching. # N.B. we extract the requirements from setup.cfg COPY setup.cfg . RUN python -c "import configparser; c = configparser.ConfigParser(); c.read('setup.cfg'); print(c['options']['install_requires'])" > requirements.txt RUN pip install --no-cache-dir --upgrade -r requirements.txt # Copy minimal set of SQLFluff package files. COPY MANIFEST.in . COPY README.md . COPY setup.py . COPY src ./src # Install sqlfluff package. RUN pip install --no-cache-dir --no-dependencies . # Switch to non-root user. USER 5000 # Switch to new working directory as default bind mount location. # User can bind mount to /sql and not have to specify the full file path in the command: # i.e. docker run --rm -it -v $PWD:/sql sqlfluff/sqlfluff:latest lint test.sql WORKDIR /sql # Set SQLFluff command as entry point for image. ENTRYPOINT ["sqlfluff"] CMD ["--help"] sqlfluff-2.3.5/LICENSE.md000066400000000000000000000020611451700765000147530ustar00rootroot00000000000000MIT License Copyright (c) 2018 Alan Cruickshank Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. sqlfluff-2.3.5/MANIFEST.in000066400000000000000000000000521451700765000151030ustar00rootroot00000000000000include README.md LICENSE.md CHANGELOG.md sqlfluff-2.3.5/README.md000066400000000000000000000202121451700765000146240ustar00rootroot00000000000000![SQLFluff](https://raw.githubusercontent.com/sqlfluff/sqlfluff/main/images/sqlfluff-wide.png) # The SQL Linter for Humans [![PyPi Version](https://img.shields.io/pypi/v/sqlfluff.svg?style=flat-square&logo=PyPi)](https://pypi.org/project/sqlfluff/) [![PyPi License](https://img.shields.io/pypi/l/sqlfluff.svg?style=flat-square)](https://pypi.org/project/sqlfluff/) [![PyPi Python Versions](https://img.shields.io/pypi/pyversions/sqlfluff.svg?style=flat-square)](https://pypi.org/project/sqlfluff/) [![PyPi Status](https://img.shields.io/pypi/status/sqlfluff.svg?style=flat-square)](https://pypi.org/project/sqlfluff/) [![PyPi Downloads](https://img.shields.io/pypi/dm/sqlfluff?style=flat-square)](https://pypi.org/project/sqlfluff/) [![Coveralls](https://img.shields.io/coverallsCoverage/github/sqlfluff/sqlfluff?logo=coveralls&style=flat-square)](https://coveralls.io/github/sqlfluff/sqlfluff?branch=main) [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sqlfluff/sqlfluff/.github/workflows/ci-tests.yml?logo=github&style=flat-square)](https://github.com/sqlfluff/sqlfluff/actions/workflows/ci-tests.yml?query=branch%3Amain) [![ReadTheDocs](https://img.shields.io/readthedocs/sqlfluff?style=flat-square&logo=Read%20the%20Docs)](https://sqlfluff.readthedocs.io) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg?style=flat-square)](https://github.com/psf/black) [![Docker Pulls](https://img.shields.io/docker/pulls/sqlfluff/sqlfluff?logo=docker&style=flat-square)](https://hub.docker.com/r/sqlfluff/sqlfluff) **SQLFluff** is a dialect-flexible and configurable SQL linter. Designed with ELT applications in mind, **SQLFluff** also works with Jinja templating and dbt. **SQLFluff** will auto-fix most linting errors, allowing you to focus your time on what matters. ## Dialects Supported Although SQL is reasonably consistent in its implementations, there are several different dialects available with variations of syntax and grammar. **SQLFluff** currently supports the following SQL dialects (though perhaps not in full): - ANSI SQL - this is the base version and on occasion may not strictly follow the ANSI/ISO SQL definition - [Athena](https://aws.amazon.com/athena/) - [BigQuery](https://cloud.google.com/bigquery/) - [ClickHouse](https://clickhouse.com/) - [Databricks](https://databricks.com/) (note: this extends the `sparksql` dialect with [Unity Catalog](https://docs.databricks.com/data-governance/unity-catalog/index.html) syntax). - [Db2](https://www.ibm.com/analytics/db2) - [DuckDB](https://duckdb.org/) - [Exasol](https://www.exasol.com/) - [Greenplum](https://greenplum.org/) - [Hive](https://hive.apache.org/) - [Materialize](https://materialize.com/) - [MySQL](https://www.mysql.com/) - [Oracle](https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/index.html) - [PostgreSQL](https://www.postgresql.org/) (aka Postgres) - [Redshift](https://docs.aws.amazon.com/redshift/index.html) - [Snowflake](https://www.snowflake.com/) - [SOQL](https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql.htm) - [SparkSQL](https://spark.apache.org/docs/latest/) - [SQLite](https://www.sqlite.org/) - [Teradata](https://www.teradata.com/) - [Transact-SQL](https://docs.microsoft.com/en-us/sql/t-sql/language-reference) (aka T-SQL) We aim to make it easy to expand on the support of these dialects and also add other, currently unsupported, dialects. Please [raise issues](https://github.com/sqlfluff/sqlfluff/issues) (or upvote any existing issues) to let us know of demand for missing support. Pull requests from those that know the missing syntax or dialects are especially welcomed and are the question way for you to get support added. We are happy to work with any potential contributors on this to help them add this support. Please raise an issue first for any large feature change to ensure it is a good fit for this project before spending time on this work. ## Templates Supported SQL itself does not lend itself well to [modularity](https://docs.getdbt.com/docs/viewpoint#section-modularity), so to introduce some flexibility and reusability it is often [templated](https://en.wikipedia.org/wiki/Template_processor) as discussed more in [our modularity documentation](https://docs.sqlfluff.com/en/stable/realworld.html#modularity). **SQLFluff** supports the following templates: - [Jinja](https://jinja.palletsprojects.com/) (aka Jinja2) - [dbt](https://www.getdbt.com/) Again, please raise issues if you wish to support more templating languages/syntaxes. ## VS Code Extension We also have a VS Code extension: - [Github Repository](https://github.com/sqlfluff/vscode-sqlfluff) - [Extension in VS Code marketplace](https://marketplace.visualstudio.com/items?itemName=dorzey.vscode-sqlfluff) # Getting Started To get started, install the package and run `sqlfluff lint` or `sqlfluff fix`. ```shell $ pip install sqlfluff $ echo " SELECT a + b FROM tbl; " > test.sql $ sqlfluff lint test.sql --dialect ansi == [test.sql] FAIL L: 1 | P: 1 | LT01 | Expected only single space before 'SELECT' keyword. | Found ' '. [layout.spacing] L: 1 | P: 1 | LT02 | First line should not be indented. | [layout.indent] L: 1 | P: 1 | LT13 | Files must not begin with newlines or whitespace. | [layout.start_of_file] L: 1 | P: 11 | LT01 | Expected only single space before binary operator '+'. | Found ' '. [layout.spacing] L: 1 | P: 14 | LT01 | Expected only single space before naked identifier. | Found ' '. [layout.spacing] L: 1 | P: 27 | LT01 | Unnecessary trailing whitespace at end of file. | [layout.spacing] L: 1 | P: 27 | LT12 | Files must end with a single trailing newline. | [layout.end_of_file] All Finished 📜 🎉! ``` Alternatively, you can use the [**Official SQLFluff Docker Image**](https://hub.docker.com/r/sqlfluff/sqlfluff) or have a play using [**SQLFluff online**](https://online.sqlfluff.com/). For full [CLI usage](https://docs.sqlfluff.com/en/stable/cli.html) and [rules reference](https://docs.sqlfluff.com/en/stable/rules.html), see [the SQLFluff docs](https://docs.sqlfluff.com/en/stable/). # Documentation For full documentation visit [docs.sqlfluff.com](https://docs.sqlfluff.com/en/stable/). This documentation is generated from this repository so please raise [issues](https://github.com/sqlfluff/sqlfluff/issues) or pull requests for any additions, corrections, or clarifications. # Releases **SQLFluff** adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html), so breaking changes should be restricted to major versions releases. Some elements (such as the python API) are in a less stable state and may see more significant changes more often. For details on breaking changes and how to migrate between versions, see our [release notes](https://docs.sqlfluff.com/en/latest/releasenotes.html). See the [changelog](CHANGELOG.md) for more details. If you would like to join in, please consider [contributing](CONTRIBUTING.md). New releases are made monthly. For more information, visit [Releases](https://github.com/sqlfluff/sqlfluff/releases). # SQLFluff on Slack We have a fast-growing community [on Slack](https://join.slack.com/t/sqlfluff/shared_invite/zt-o1f4x0e8-pZzarAIlQmKj_6ZwD16w0g), come and join us! # SQLFluff on Twitter Follow us [on Twitter @SQLFluff](https://twitter.com/SQLFluff) for announcements and other related posts. # Contributing We are grateful to all our [contributors](https://github.com/sqlfluff/sqlfluff/graphs/contributors). There is a lot to do in this project, and we are just getting started. If you want to understand more about the architecture of **SQLFluff**, you can find [more here](https://docs.sqlfluff.com/en/latest/internals.html#architecture). If you would like to contribute, check out the [open issues on GitHub](https://github.com/sqlfluff/sqlfluff/issues). You can also see the guide to [contributing](CONTRIBUTING.md). # Sponsors Datacoves
The turnkey analytics stack, find out more at [Datacoves.com](https://datacoves.com/). sqlfluff-2.3.5/constraints/000077500000000000000000000000001451700765000157175ustar00rootroot00000000000000sqlfluff-2.3.5/constraints/dbt110.txt000066400000000000000000000000441451700765000174510ustar00rootroot00000000000000dbt-core~=1.1.0 dbt-postgres~=1.1.0 sqlfluff-2.3.5/constraints/dbt120.txt000066400000000000000000000000441451700765000174520ustar00rootroot00000000000000dbt-core~=1.2.0 dbt-postgres~=1.2.0 sqlfluff-2.3.5/constraints/dbt130.txt000066400000000000000000000000441451700765000174530ustar00rootroot00000000000000dbt-core~=1.3.0 dbt-postgres~=1.3.0 sqlfluff-2.3.5/constraints/dbt140.txt000066400000000000000000000000441451700765000174540ustar00rootroot00000000000000dbt-core~=1.4.0 dbt-postgres~=1.4.0 sqlfluff-2.3.5/constraints/dbt150-winpy.txt000066400000000000000000000000661451700765000206250ustar00rootroot00000000000000dbt-core~=1.5.0 dbt-postgres~=1.5.0 markupsafe<=2.0.1 sqlfluff-2.3.5/constraints/dbt150.txt000066400000000000000000000000441451700765000174550ustar00rootroot00000000000000dbt-core~=1.5.0 dbt-postgres~=1.5.0 sqlfluff-2.3.5/constraints/dbt160.txt000066400000000000000000000000441451700765000174560ustar00rootroot00000000000000dbt-core~=1.6.0 dbt-postgres~=1.6.0 sqlfluff-2.3.5/docs/000077500000000000000000000000001451700765000143005ustar00rootroot00000000000000sqlfluff-2.3.5/docs/Makefile000066400000000000000000000015421451700765000157420ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. # We want SPHINX to warn on error (-W) for example if we don't force SQL # formatting, but we also want to continue "--keep-going" and check the # other docs before exiting non-zero SPHINXOPTS ?= -W --keep-going SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile python generate-rule-docs.py @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) sqlfluff-2.3.5/docs/README.md000066400000000000000000000027151451700765000155640ustar00rootroot00000000000000# SQLFluff - Generating the document website You can run the following steps to generate the documentation website: ``` tox -e docbuild,doclinting ``` The `docbuild` job will recognise when source files have changed and only generate the changed files. To force a clean build (for example when changing config) rather than the source files use the following command from the project root directory (drop the `-C docs` if running from within the `docs` directory). ``` make -C docs clean ``` The built HTML should be placed in `docs/build/html` and can be opened directly in the browser or you can launch a simple webserver with the below command and then navigate to http://127.0.0.1:8000/ to view the site locally: ``` python -m http.server --directory docs/build/html ``` Again, this command is run from the root server, not the `docs` subfolder but you can alter the path as appropriate if needs be. If you don't want to use `tox`, then you can complete the steps manually with the following commands after setting up your Python environment as detailed in the [CONTRIBUTING.md](../CONTRIBUTING.md) file. ``` cd docs pip install -r requirements.txt make html python -m http.server --directory build/html ``` Or alternatively from the root folder: ``` pip install -r docs/requirements.txt make -C docs html python -m http.server --directory docs/build/html ``` The docs use Sphinx and are generated from the source code. The config is available in `docs/source/conf.py`. sqlfluff-2.3.5/docs/generate-rule-docs.py000066400000000000000000000071541451700765000203460ustar00rootroot00000000000000"""Generate rule documentation automatically.""" from collections import defaultdict from pathlib import Path from sqlfluff.core.plugin.host import get_plugin_manager base_path = Path(__file__).parent.absolute() ########################################## # Generate rule documentation dynamically. ########################################## autogen_header = """.. NOTE: This file is generated by the conf.py script. Don't edit this by hand """ table_header = f""" +{'-' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+ |{'Bundle' : <42}|{'Rule Name' : <50}|{'Code' : <30}|{'Aliases' : <20}| +{'=' * 42}+{'=' * 50}+{'=' * 30}+{'=' * 20}+ """ # Extract all the rules. print("Rule Docs Generation: Reading Rules...") rule_bundles = defaultdict(list) for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: _bundle_name = rule.name.split(".")[0] rule_bundles[_bundle_name].append(rule) # Write them into the table. Bundle by bundle. print("Rule Docs Generation: Writing Rule Table...") with open(base_path / "source/partials/rule_table.rst", "w", encoding="utf8") as f: f.write(autogen_header) f.write(table_header) for bundle in sorted(rule_bundles.keys()): # Set the bundle name to the ref. _bundle_name = f":ref:`bundle_{bundle}`" for idx, rule in enumerate(rule_bundles[bundle]): step = 1 # The number of aliases per line. aliases = ", ".join(rule.aliases[:step]) + ( "," if len(rule.aliases) > step else "" ) name_ref = f":sqlfluff:ref:`{rule.name}`" code_ref = f":sqlfluff:ref:`{rule.code}`" f.write( f"| {_bundle_name : <40} | {name_ref : <48} " f"| {code_ref : <28} | {aliases : <18} |\n" ) j = 1 while True: if not rule.aliases[j:]: break aliases = ", ".join(rule.aliases[j : j + step]) + ( "," if len(rule.aliases[j:]) > step else "" ) f.write(f"|{' ' * 42}|{' ' * 50}|{' ' * 30}| {aliases : <18} |\n") j += step if idx + 1 < len(rule_bundles[bundle]): f.write(f"|{' ' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+\n") else: f.write(f"+{'-' * 42}+{'-' * 50}+{'-' * 30}+{'-' * 20}+\n") # Unset the bundle name so we don't repeat it. _bundle_name = "" f.write("\n\n") # Write each of the summary files. print("Rule Docs Generation: Writing Rule Summaries...") with open(base_path / "source/partials/rule_summaries.rst", "w", encoding="utf8") as f: f.write(autogen_header) for bundle in sorted(rule_bundles.keys()): if "sql" in bundle: # This accounts for things like "TSQL" header_name = bundle.upper() else: header_name = bundle.capitalize() # Write the bundle header. f.write( f".. _bundle_{bundle}:\n\n" f"{header_name} bundle\n" f"{'-' * (len(bundle) + 7)}\n\n" ) for rule in rule_bundles[bundle]: f.write( f".. sqlfluff:rule:: {rule.code}\n" f" {rule.name}\n\n" ) # Separate off the heading so we can bold it. heading, _, doc_body = rule.__doc__.partition("\n") underline_char = '"' f.write(f" {heading}\n") f.write(f" {underline_char * len(heading)}\n\n") f.write(" " + doc_body) f.write("\n\n") print("Rule Docs Generation: Done") sqlfluff-2.3.5/docs/make.bat000066400000000000000000000014611451700765000157070ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) REM Generate the rule docs py generate-rule-docs.py %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd sqlfluff-2.3.5/docs/requirements.txt000066400000000000000000000002161451700765000175630ustar00rootroot00000000000000# Requirements for building docs sphinx>=2.4.1 # 2.13.0 added the sql+jinja syntax pygments>=2.13.0 sphinx-click sphinx-reredirects doc8 tqdm sqlfluff-2.3.5/docs/source/000077500000000000000000000000001451700765000156005ustar00rootroot00000000000000sqlfluff-2.3.5/docs/source/_ext/000077500000000000000000000000001451700765000165375ustar00rootroot00000000000000sqlfluff-2.3.5/docs/source/_ext/sqlfluff_domain.py000066400000000000000000000077311451700765000222720ustar00rootroot00000000000000"""The sqlfluff domain for documenting rules.""" from sphinx import addnodes from sphinx.directives import ObjectDescription from sphinx.domains import Domain, ObjType from sphinx.roles import XRefRole from sphinx.util.nodes import make_refnode class SQLFluffRule(ObjectDescription): """SQLFluff rule directive for sphinx. Rule directives can be used as shown below. .. code-block:: rst .. sqlfluff:rule:: AM01 ambiguous.distinct Write the documentation for the rule here. """ def handle_signature(self, sig, signode): """Handle the initial signature of the node. This formats the header of the section. """ raw_obj_type = "code" if len(sig) == 4 else "rule" obj_type = raw_obj_type.capitalize() + " " signode += addnodes.desc_type(obj_type, obj_type) signode += addnodes.desc_name(sig, sig) fullname = obj_type + sig signode["type"] = raw_obj_type signode["sig"] = sig signode["fullname"] = fullname return (fullname, raw_obj_type, sig) def add_target_and_index(self, name_cls, sig, signode): """Hook to add the permalink and index entries.""" # Add an ID for permalinks node_id = "rule" + "-" + sig signode["ids"].append(node_id) if len(sig) == 4: # If it's a code, add support for legacy links too. # Both of these formats have been used in the past. signode["ids"].append(f"sqlfluff.rules.Rule_{sig}") signode["ids"].append(f"sqlfluff.rules.sphinx.Rule_{sig}") # Add to domain for xref resolution fluff = self.env.get_domain("sqlfluff") fluff.add_rule(sig) # Add to index self.indexnode["entries"].append(("single", sig, node_id, "", None)) def _object_hierarchy_parts(self, sig_node): return ("bundle", "name") def _toc_entry_name(self, sig_node) -> str: # NOTE: toctree unpacking issues are due to incorrectly # setting _toc_parts. sig_node["_toc_parts"] = ( "bundle", sig_node["sig"], ) if len(sig_node["sig"]) == 4: # It's a code - don't return TOC entry. return "" else: # It's a name return sig_node["sig"] class SQLFluffDomain(Domain): """SQLFluff domain.""" name = "sqlfluff" label = "sqlfluff" object_types = { "rule": ObjType("rule", "rule", "obj"), } roles = { "ref": XRefRole(), } directives = { "rule": SQLFluffRule, } initial_data = { "rules": [], # object list } def get_full_qualified_name(self, node): """Get the fully qualified name of the rule.""" return f"rule.{node.arguments[0]}" def get_objects(self): """Hook to get all the rules.""" yield from self.data["rules"] def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): """Hook to resolve xrefs. References can be made by code or by name, e.g. - :sqlfluff:ref:`LT01` - :sqlfluff:ref:`layout.spacing` """ match = [ (docname, anchor) for _, sig, _, docname, anchor, _ in self.get_objects() if sig == target ] if len(match) > 0: todocname = match[0][0] targ = match[0][1] return make_refnode(builder, fromdocname, todocname, targ, contnode, targ) else: print(f"Failed to match xref: {target!r}") return None def add_rule(self, signature): """Add a new recipe to the domain.""" name = f"rule.{signature}" anchor = f"rule-{signature}" # name, dispname, type, docname, anchor, priority self.data["rules"].append( (name, signature, "Rule", self.env.docname, anchor, 0) ) def setup(app): """Setup the domain.""" app.add_domain(SQLFluffDomain) sqlfluff-2.3.5/docs/source/_static/000077500000000000000000000000001451700765000172265ustar00rootroot00000000000000sqlfluff-2.3.5/docs/source/_static/custom.css000066400000000000000000000004461451700765000212560ustar00rootroot00000000000000/* Overrides for spacing within autodoc */ dl.py { /* Spacing is normally far too tight, this makes a bit more space between elements */ padding-bottom: 30px; } /* Override unnecessary underline in alabaster's default styling - see #2266 */ .highlight .w { text-decoration: none; } sqlfluff-2.3.5/docs/source/_static/images/000077500000000000000000000000001451700765000204735ustar00rootroot00000000000000sqlfluff-2.3.5/docs/source/_static/images/sqlfluff-lrg.png000066400000000000000000002547621451700765000236250ustar00rootroot00000000000000PNG  IHDR9b4sBIT|d pHYs.#.#x?vtEXtSoftwarewww.inkscape.org< IDATx{x;3{IB@@AE*IvFѪւZVjZckZEz[̆ЪJ *.c7 efv7<>99I&9sBQVVWUթ@?}gf# "j`wv3ofdhΠ}38.)b愢([29x(:;ζmf捭˖-{%vtIӧO;#}y"y F6Q"V1/&-9 >Ifެ(ff&ff Wd7&DG\)ʈh63@g{Dm,k|7bwiZ8": @QK%Q"z4kŊ[ݨǫZ;e e<ѕ4$g0d`*zz]ٳӉTf.ef3H]& CPs$y ]d(@q]R D,F"ID@3K϶w҉7(xa@إhGboFwÄL9sw "C"d&u}E"l=CDD"GheY(!ړ[VV6RӴ_9s*$ኔ.D"oLw[?ҽu].]tu$|OӧOrf^[H$@ssUmG ønذa\xqn;2zѩ(-..~$]|_`HsCnOWW}'ϕOwchKwZqq.+5 c(3 6sgY֝n(=\^o$NQB0 4 Hi0I"3NdF5tf>;=VYY٭r=kq,5Gp9;k֬"]k*Ȭzim?82JdU5 Tf~@-'=ٳgO8>n`O14 n'H$Cf2J쾬'޹s 2siށhUUU|7D.< 36[I1"z㼯(Jm'C(G&:ܗΎ#bUU;Y8SDDΎPdFf]1;DDft7fֈ(ifD)30c\u}mۏţ#2cWi.3wdtϡ|HD{ `Av??~$  DNbԎ&KܽyW\V|mn&zYfX޲F,{340H&pW{"2JHd: ~K,آp8|3OBfpeYe!iDDZ(f> fe~`Ki.vqRo#trm/#)_X~ èN^S -}+r*!Բ.//vbnS,iڳ|ICDS^Hdn9]?ZOdۊ`_B ÇY`CTĆ@7>m$E^rhl0DtiOdY]]ݿO>.!x<~jb Ѧvht]dTϿIO`Olnn>HnrʘeY?p Ϸwi>l봙xѭ~Tw$i6OD":! ˲NZ|W>ybY9׉mGhKJfCk^HDjr˲!^0 ޓC.b"ǸkY% ":@0EmY ?$˲V(Cjk}Ņ.WQk%LBEBfcDttmm4Jt9egDtS}lvJ4oCԒn%dK>GtQhf01N$2CUҝ_lZuV ц ݅2{lٲby10 5$;gΜQ]< ёX,V`eY\22)~Iq.ɲ޲2/|o-F47<8ç`a*ql3^#D{xqq4دnrn;@l (}\ؐL&/IBJӴmYy|lf]CYm݉zE9m"~MMn /2YpxYh"z=#D[w>G獍^5n#;"=^] W˲AB`Oޮh퇰>X@_["z-vU7#Į.al^,M-jIn| 1BmTZZ-ڲe𡛲txUUmEF$ާjjjl?#D;ƺ,BuukKDOBwCD07E#D;ܮ+z6"zM9q.v(TU?~7Fv rY]_[!zq\DZ)f`2W3; }hWv'R pSPUU'R ](eb_"z-@Dbmb[l)*7czT!ZSrp8v'#!2 \_E3.lv;v(Y(bn b1 D"b(;Bt9 @}7!2 ]xmۖgx.<@\%2?"D[b %]d+ )@-B"NXLzbrW ; d2b +v޿$^ >pS&!vio2+$WB˲̟?_)YH$\'fIbvDUbk.OJ}m; {M"LP+e֭hrY3/=B]=^oʕ+p}~6F:qIbxE\V,9sdqjflpy(@D+pFxb1 IWxa 6!eH&͛%]x\%/Xxq2̼& 5/=KyFuuxg/jDOqVUU=B=KFK}% ϶'P(0]O({6l؅׷xew"f >ƗV %H?~xg;\ԈNU[$D+z$D 5oذAlkywaxk_0YzMzWLrQ#_{ ̿1 `/1haV$^&/xl"=^.54𤇘3g(1  yjnjjj|t;3˹˴ٛPe]Jyr12JzM^+3^0 Ҫ*ՇxBኗ~r AfU,{0>BݦxE^u:cM"uNa##ac\D+I$xg=abwY1/~[yy0?c !D{](ӧ$^YH~hQs|!؁ȣo"S'ޚCDdE1[a̗ XBl2JvkU 47.;A 3gFU݈"߼ 5K~Llpg6О3󃺮~2ԜL&% \[WWf f[;V`ee,%*,,+3\}]rPbxE^yN-LӬ4Kى4yI)/=p8,Wx[%K|nY\f>{hg{AljDWb۶\_g&hp8<.)uKt].9SD"߲x4͛5M ZlյCui0QBn=^!bɒ%_ZumAOF"G'N!D{Օ^t/bٲeXu(x:WѪHd\+!xq"o9K-jkk,'XjUDD"#rTٶmah!yMc˲>|~D97"Za^!D x/NqQAy뷗QB<2,x[m,UUw\|UUuszE< U)>7xg]"Xte8N \d2TTTaBxCJ-^,Df>9E|5=ϖ-[\'^+%oh4o˲f*r ê,mC]ׯú]TGCnȜ覺tmQ[[iS; ê,b è.++ú<+xE 5KW[$46MtB:`Wi&{ ͅB!P&;%^@uucfM8? 9~Z\UUUú>شiLyo& >D3n$ p<0B9[ၗ)nl1M{#GSf|iJ8;df!DOo˗eY%dr/vBX,&CB!:coe]8wHHBx[խ, ,`7e=mۮnoLezlmaYZ˲83mV%>+iz|mQWWeY+rR a! X!zQmQ[[eY;3 r3,\K-Z6b _@",!&ijY։"J/Ea?<  (2w@xo ˲,kvz+hb è&Wt%~ĻV:MxWlG8{9_!DImiXu(x&X K].1ۉG-˚}/ː}a%9?qq !b7H4J-ւxA Øú=Ļ+4ADCqa!Io>kYD0|uQ0qݻM'jiWe敽iQ!MoD,E^vp3?ӑHDqe]ZBCDO!x󬶶e _ [DdF"g KeYu)3·Ph>Bt@oaYeH~":L0VQ.o8^>mJ]*((4D^Ib.]ڲ#'3󃺮p29,n!^-Mzy%DoeYtޗМ&Nu2uo'Cs]^VVTTC^C&LI,ZF-r܄I|>Cěj'|(OHx ˲>,GFj&7f~<<-2TSScә~(aI!rIo7dɒ-˺p7rP0 oIIj}mے|$n4,˺8La_؜hlarUxD8>EZea!$n~}4,Lp 9`"z0u=^TWWop8|(s̙Cn=d)I=i-˺6a*0dL?^.hiemOn86GȔ$nŊ[px/"Z@De>fHeyÆ rAkeŊ[ɤ uq]!Vv yL_xx{f4'q~,Vw7Db'px|7c|S~HhZLx{eY>|8f.wTB]x -[ֆM`x@buk2,2%Zxq"3| ,TsGz eI.hXtd2y,|7DQ̙3чXݖ2HZ͛B8A]tK'!ޜ|eYdJAK0,7Sbد|x<. Y2< v10!VV?+dh)IMe}aYADWBuDu Z?0֯ld3J0Gۺq=d K)I#lf53c,WUUwf{,ж>hyü̼'&H+:FP %3NUU]'^j}˖-CS}9jr:/\dIJzf>{|EyyLonnvx$^ W~,3!*)I"ch=x[Lӧ\\2MmEQf‡p{XD!V%LIŲcsG{:"=^ojkkG҇p{ъ9sC.I+v[iiepzZa2q&z\%׻h4_۶9&_\%2%WjGUxeeerWcٲeC~,u>rw9!u_peҥ9C@`ngyǛL&'˲S f<C4sʶmlpGh&=.IJE ɇpE"tb均"r2x'˖-{.Uٞ$fGmmaA""+=^)Ix(wf,s rA˲V\qQ0>EHý…gei IDATgVWW{zjm[o03 C"f>ӇXyvrǧx),,U"#x.hu/=^UU",˺n-A]+}x<6yK.LorgS[B!׋+mY?˲.%} ]P#:inջ B$Io{)z.;3M9=i.<DND"m5v6hݔmnnD4M9UlYyDtODaeZ  M:Cܔݓ$^ᛦ p7Q$bŊ*hw#Wl|O'gT9>.뉥G`:\=e9 I!.$n:ss>uN+fegpG.c˹ڋHa]}_wTQ)nʉܨCwݧ*{"w:9y钨D?7hr{+BDtUqWVVvSVFMMOO!U"0SzJ0M`f~3c\UUU%IaLP,uM&u(㔹-+rU| 2]j1uxC"@,R[ +|̻\2( ~@gZ+{Fi') /;'_q HD:;Ʋ~~gC䢇+T%UUUAngifry (//MYѽHiv26Ù.㓪]9DיݯJD6 㻱Xx0qnˋܫo óv0sif?yl]קy(/8I5۶ozq?Cx|'dnf|v<~HUUU.ˋ.Np%ef/?ҳ3kÑaȃKndc gw{TMMM}c_8.Fm=^T̷,iWfIuY, MӎB6/N,G۟a,P^tQxnꧪ?u^FKݔ,cCa C%K>'vkă pfM||m{_KXUUUA,3%mۗ#g"u̼bٲx7cgdrٮn%(yѲe.CHYYY(8K"W]]r/1,˪ VUUUqD( kX͗PLl`f׽4_ è5Hicm?M,xADb'e'=n:T"m_Cau2۹+:TYY9:=PWWc!}5EUg̙3чX",s赖e#Do=bتRb<+eFm/8ȇpKtB8as7 >fern0.3eY#5)o]oç"d1@W|.9G̬vDʉ( ZXuhPdn0:KU\nY֯ 6k֬P( `=] `i}+Hh 8o0¯h4z_v~cȭ~o^bV (e,~D"3m3ADXϱ$ }1SXUoiZqq 3|oTU}ҥ}-vaCdg˺h4~ś3gюxSUuՒ%KR=%IX$AD'`,T}3C`sbv BOYTVV mL/md/QuD&? 7ϐ _fj ;>]wOލ"fz @DЖbfYÇNa `ڙy(m;̞.5%ɘN"P]]FӫR f(Q<PQi>r0 \Bj۷׉-f2 m+!O>=\TT##ܗN+Dj6)Pgi1:HSEmID?3M~3 G:6[Ju]_,kM=zGXV+D"ѣH%!veYuB&keY;\'J#|EDt @DspȆ#U~,|m ]:&^",2@D3LO>af"=U~_mWLѥ~ef 2L|5 ˲s-GZg򭭭}"ULzxEgvgZW[tM":KncEHZv8d;s;DtuiiiE]]]&3Ma͇𸩂DteY~*0WSSdY֏H_qE^x3(ѝ}LDv ae^^N&g-[앖" Dw\ e]cCMM4h rvWzD?":4gt]?Lw[b|g lD]'ع M\W *^b9eЍxE&> pt'KKK'щw{g[OlY]Cؓd0MU˲Nwg_f5Sd.94JsDW3ŋ'ݘlIop2Ew{Dqe濸H$]YVVZu$3p 1٬OgYV2Y)fvV[%8RfSnLMp~ynh\սG@02=Zurtor8-k+lZ땖d}RWchAAAmMMMN7xt]/D1,cۢ^+`5R>̏GяQQEEŁh5{XU08}({(B;Vfj_m;(JOUAF+tgff۶FmQUumۉ5_Ϥ f޷C~JmyHo׉t,OJ&ߖ/|AD2"z%USScgn0qDz"Nw܌a R =q:x!?1&fh6"j&"jtm͑#Gɷ9D$R ӵdE)p'k1q:AQFq:8]__B!B!B!B!*e`3g}Y"5c?H=/]]!BN(F [܄Bѭ>Lm}} !s9n/ UU1h Ю[Kae֜P("B04 ZãMMMhhhڵkuE0 !~3-_AL4 'NDIIĘKd< V\ ˥s!%3J1cN@QQWA̿o,_iBjt* @!JvZ2bضmcHMB!rʏMOA1!B!P?GQr!_:6_mBѻ!ɓ'O>>߸qZyyjB^k-/L1\ 0}nM!v$Ț#P7A2d.lQr hNo&?Y[WBg(gp!gsW4& @{3rUt~ݥl0 ~ITUL{{wȑ "[~!@/2qٶd2~|.ضzjx +4MFUBcpx4"": _ q HB ç$z5~H%ߡ闏B{{ԪP(tRd5#N-GDͺBt)`Z˂x<~A@ P~[^FvPGƦQ^6!RE!goѿ "lU *8ǣh_(ZV UUg(rZ˾PI(ʯsjK9EQ755M?/L&w>{U2Pr-pH/BZJD[[n;_UէEi_WrA:1lذ~)>.E/K7x<_^655{͛uV03^x6c;j*|#Nͣy QfW4f.=[*((89sxsss_|iu?qNO5'4OӴaA4{ߋEdA0#G78 ]?u 9H=:@ X,v qGsUUw(K/|РAz+Ϛ5+9uW~Ǐo2dH/ѣ?2dO038cɄ 훼k;oUUՙ6moC߾}&LHD"G;pee#G~\ZZ9s#G\?l00a»?O:th_P(d}5XQDzE"G +VfĈ͞=i9I&% xdƍ7koxӇ}xFp |'n>裷pIIIgh4ʁ@ϟCGцۯwp. /:UUs.\e]Ag}paasW#K/˥K6رc/O?tm8vNCu l8psLD|3+ŋ?k98∟B%l#fUUld xcoll|~{)8q3pp8 ׮]EEEbUU_G};B{ŊW\q3&#FHzo(¯?cibŊ8-Zws( g?g̘?)S|>o}~^{5o|M( IDATv$YS__<8r SN9͡C=C&׭[7|30 bŊE}.//_( _|żn8qO|{ƍ^{s=p ؂p=xP(YII-o6777ǫFfĈ飯Kjs=chUU+**ׯ_bڵQUU;ѣg̘q>J8 4 -u644pUUUr xsCC/]{DīWc64io|p̙3>`/8o`M8uZ(--]>}$훘1c3QF7yGN8kcOqVDj7;yJf7Λ75ЈYp!j3f`|ꩧrCC>LD|WrCC[KJJ{.744< .ǍW]uϘ1 ~}Gܿ>|8Ϝ9?:t(\p++Wx֬Y˿oӿ;gΜqMӒ/wy oV裏x̘1M#d<~L<9WZÆ k`xO?4.VU9 vI'5$_Wk׮>}ԏ=zw]@|z!&";$o B4MKA!zIAEjZ\TT裏_'^~ekkCC|1s9ihh뮻eV\Dė]v_}N hDj+|wi@ 5jTl͚5:^{sqqq<$GeMӒ@ OpCC7K?=}֭[]zb1rK>}}UU}_r%(J똦i_x㍼n:8p`Ú%Ok>?C>boVu׿uxMD|W_~RMÇ~=D4dȐm1? @?1s= :~gCL; " Xbb|X &cK1hLL](XPD;wY%w̙33w>egݻwW^}}} ^O0AGHK0L^ {xPj CW\_lY-q,f@y4 гgOFhhX(  ¶m0 Ǝ hРhz聴4\ Xr%+F$ڴik׮ `РA8rѤI#,, V{AlL6 =9_5դIsI֭-[$QQQǏgE Q兆 :wlrss ,BCCh1p@Zj8$I-ZI;v,/C.]<;;;#::@2#00PoɌjjJk۶-@^{50ֹcbb6m؎;:E)BR5SNu5[[n _rya9aXNA^^^CK x5jt  D00j( *&& Atܹ 4HS Dhт(jEQZʕѺukBe$yJL&/ zH lذf͚8q"9i$* Я_?~hժ-;ƢXG@=(ޒ$)sάCW(JAxx(Cxx8(B=8777ڵ=ciܹs9He{ﱀ%AQ δi$Im۶ VmggGFF;NO&MУG tܙVY#`a=1cgooQakkO>wU!@hh(YV-6mڠJ*puuExx8ѥK,[:zvaeȐ!! /G}ryhIIIUdY|reٜz̜9! &&svv֊ڧ=z,|:&M|E޽{#88~~~̙Ô|_57|bDZ *`\ LвeK :Č3ڵ 7x. GDDɁ+Fx_ij}D$a;AUVgEd2IGDpp0֯_;4h.]޾lP^=4kvwh^и&1c{quuܹsѾ}{y\z&M*gff:twW_}2.YuX,?x0ۗ.*a\x%UYfMTPM vvv+B֮]z*|}}ɖ-o߾G#A Wf͚E啬qpvvF~~>ѨߝP777ۗb~~~(^t DHJ*hذ]-{{{$&&"&&5jرcսо}{t!6mu=?hEdgUU[(Q>u}`Fdy&ٹfѣ(0 jԨQY`^]q&7nceܹj5hqΝmOZ(/\mڵ7FF?-Z;}y&6n܈իWǏy6ͫlqֈP7ޘ~SN9@LL &O oa˖-Xx|9<+Ew}gZO-\Z5լ!bSο04MoX,n&I۷/٫WR(-[`ѢE 4v>c,u2•YuaY ~A8߼yS+,\2fΜY2( tIf[_pA_d2R%\UUS͚5Ն 2aaa Eks߿9R>| IJJȑ#d$3Iv/_4ͤ*( `;?*SNi8nMӵV(ܸ́q(WWǗ[$'ҢE6m #j!4e4iiܽUZU "ѼysSy8|0Μ9 6&HhXg)y_\` :͛7%`EԮ]K.O^vxQ8}O$߯_5ׯoRC )EV#F{WV3ĉ=zrm5333g]ryJׯCd'M*~_(HLLč7Ca׮]/އa,7|D1Qfٞ(gϞZ^ׯc߾}Xz5┶<&b ps׮]˶lْh۶-"##{]O?W_˲2dȃK-l6c̙O?7oȶmn{xxr^l6v۷c^0]ۑ8۫W/W_}5O崲'EQjhXdYj׮j/ѽy&>3lٲEy&Iؿ6=={.E\Ѿ^AV[,$!,, ͚5cq+yp{{{ؠN:Xb'Obڴi 2.8pBwB /L9^i?Q]6ѫW/,_˗RJ]ZR( fy^spp\]]iooԩSMn޼YIHH`OpW M lIGm*]x񚪪VkeMӊF`׮]QF!99.zHr^ 4Mòe'...ɑ>zSb;x@U;|3EOo3fPUUûgoڴi)*kW\yk޽, ;u$ϝ;i|.ٽ{7w׺9pl6`?Y`/"$9ÅF)>@ulܸ~)TUѭ[^6]ŋ,葓ӧ/i^`ZUN9O+qGV3EQhժZn ;;;TRσbߢEHђ%K7 ==<_Zf-[8Se$IvvvvN:|5Pԩr'7={>sL|Gںu?~_-ezrN:accs1c ݿ!Ct7o^$СC6_5ѻwﻌ.`v {|m۶Aѣ.\7nP(޷̙3E~嗧ULJEbŊIA.ZN9/WVk5YsNoEjj4weYƅ tq pidffb}(Ǐ(;wn 'j&1 AQV$&!{u˂$IOr2ӧe\~Vu]',YB~c6,l\ wss ֭&L֭[ ?ѹsg^BTT֮]4iGҥKq̙܀[n=X~'HOOddd`8x#ԩSqu>֭[T|We:Ȳ  ,,Lݸqc,EQl ?@9hrEQ\fΜݺu'>GEEi27o7obwL>]MKKǕk_aȑʜ9s[ƶm0x`cϞ=֭|,|7k.]qdɒV<@UVBESPP𧧧Da޼yСC'[v-ugee0G( СCpAl6߷eˊgϞŹstQѺukh)`Ν:&N777Ν;Gg&IHtMǭ8;m4kpp0j֬k ggg 8֭ɓ'1i$TR+WDN닆 o߾ b֭HLLD ~dffoK`߾}ҥKup@ӧ[uRI'ŋ9rq9UUŀk̘1ҙ3gqCulذ5?[(aXqٙ':qi*>>>CVZO*Rd| %ŋǏV^ 2eqF|Ԋ+PPP@^ڪ:}OEQ$y@pNF?>Օ?K?xdбcGh͚5Ki-++ ?V4۹s']M6.\`mۆnݺ;v츫cĉ0`i&[?Ppt]ǥKZM6iS-l۶ [l dHS{Pg^[oj]_PP 9999dbb|rjΜ9˗˜[Fll,֬YTc֬Yhݺ5lllp%l߾3gk x{{#** 'NĆ Jmʍ7M̛7x*g֭4 -E˗q\ѣGɢQ3g?~\l޼yVlb-,GWl-FOxe˖a˖-lvvC TUł 矗8q:v숡Cիj;SFiժUfӦM#ccceYOznD888,X`}6qIkBB~vܹ,&>1CNСCLl躎~MKMM F|Pt钞pL5- IDAT~7HR[ƪX֯5MsХvo߾Θ1C;wɓ8}\lllХK}DJJ El&.\>,_IIIشiH{n-[,u7jժZjŹyb ##'Ođ#G0{l 88]vEΝ0AH~-1p@6lS~~]vq#x$oN>֭zG"Ӗ-[+o߾ c]%''1cƌ*| 9 vA_r%ךuΝ;v cǎ;w0aBWݺu7ĉHHH>QFǝZLS? ˲t~мys3[3أ!^^^/aÆGʕ+)絬,sj'N0׭[Wr:hŋ={Ν3֭[ێ;0+Wt]'dY୷޲MOO?1bD75M /#G ???rʋMHHܹsAÆ Ô?ӷo_zٲe2eZ~ժU:bǎz:u;w ׯ_4irr߾}u'Y)>c ̙3֭7LEaZ3g(GlUU%Ϟ=kת.\ mۆ yEEVZO>3f$inH˲Uwuug2dBCCWdY0 -[bAXn>c={իW+CiILLwݱc3(rssMDll,?ϧ~~~yϿ:}  .Txz CƍSCBBNݻ3gN$IѣG/^^^^8~8̮ *%eS=<#-66ɡ̙CĔ{5jED=IC$InSUu"RN99,ˮ$qAK,Ǐ'ɲ)k֬Q-[F?n݂$I AVV~QrÆ s~^^O",,ڥK*<(Bvj˖-oϝ;-;;39::Z, coooMKKclmmU Ii,Cﯤ( qFQGuիW:pѣG`~[jǒ%Kޛ˗/ڵҸq:uu+V)S۷o `ԨQ˗/suԹu80b )DjZ,W])pttՕ傃˴tR}Μ9TFE%6My]X, |`5WDMt(&aȲ\'oX5AvhVA$fwes%I`dFm60 W,;eYQU&ef`Yv$I]Pre MԦMز^5Jy֪U+yƏ?ɓ'xߥ(J# EqTr֭D @aMntСΝ;;wL(*PU^5 ,1n0ڷ7IQ7hUUQ~Pvv6ANQEe7$Yl6$5MsP̋/z5k:x`iӦϭȕ+W0{lZ Çȑ#K5qA:Aw(}D%>>~I&鈢(!}pl  #7.0 sKEw$-DjjVkb,0At]i/Mk\FSnݺU]G㩢uҲa|GLU6m"㸵hi-U1 KnŲ&N4b 8m:ZfEYi666DQSUFh J$IN4m" 梥۳i6Z+Wޞ={3g`_?AЉ=z4lmm?*iͻnZL,Ka@[݅U] ga5M#TU*E)}8 md`3˲9sX$?snݺ(E/?$Ij$In""jժ}Dz,nܸ,ܸq7oDtt\i眜 4qqqi:w޷:vh߻w0q)e%&5ZHyC9xܼy/^瑘+Wyyypj9>R:<EqH;cwYi 9(_q_QMU?P݊raX&7FѾ}{pJdqو VgϞA4 ˲=,$ucY6Bv%7LViM&qZ`'ߗ}:# ExNӴw~~~/$>zjm[QpEQ #0tddŴhy'DQĸqpA\^^^i2337o͛r ̙TG [CM-lGQTsUU7(s` 5ME=TAt]$ya$ )8]OIiZlI-k#Fdgg׹S Af]׋"0 )cTuiӦ*nJ4.]!iSx@1G˲4PQ0CQcK,cd2ͰX,q/EQ ɩRŊ} }grbYV_d ѨCHff&!"Νz|Z`Y1kPA|A_A˜Gdx#L&t^ڵ+Ӯ];2<<<Оcظq#FɈkEQRU"0JbpÙc$|=~eEQ\3aZAx`, \/GW^M&8|pW^UySNGuz֬YtG9 @]WW^c4\GGG|MK/_7Z>|IIIHLLTt$)iv:{Y:؎ta?,ˎSwPP:b]vOu=~Ν:t.I$y\hh(žH711Gzz:RRR,iiiDmhGSD9114MÁf}ƍj-PUu?8VcJNˆn2\.]T%##B4SRMD$l޼={MӨUOoߞu>}:O䛊p0Fq+yAeeTϧH899rOq9swQrQЇI0)f&EQKTUu+l6[mmmVZyY޳nڴIJLLܹêJ(Bt-M>zW\@N0 D^Nt#G$jԨQfŪ7nD\\zbh999 xEYT_q-ogddXt·oߞ~!G)))hݺ5Ο?@#ٌ/Et]_0bwK*X ^E_+VjժU懦i>uk׮,JZIyO9<<RU(uJ[7n`tCxǏڵKr劉$InݺI([rJpFfT^yFaaa{*'۷GZ֒[gY6YV0t^I#l?piNz=>>d2-[" I퍄TR8@}U,Vg+A^;y^U*T;::RGe:i/\[nP`ZOn>Kyd2Z 3mڴADD]򐙙Y&]gŁcǎaԩHHH=qD-666w˾H݇涑$InӦ :th666e.XUU\3grrr.8n9/4mei`]ÃmӦ աC25k$''s(R \ey,0REE׭[ٳg;Hԭ[ײyf#Ky,cԨQʟ޹sUVnZX"q`mнy>}SC:~J<#Iك7*[l딢(DQ\` ^}9 _N9/iL???il߾}4|y]wAvvvҥKw5,~,I'G8qdY={jՂT(EIIIصk6oެS,uY7X҉=dMad!b`-E\HKKC۶mDk!UU ddd ==WͱZ#q5K˲$I 22RQ兺u׷ej$;v ڱcϓ4MY=VPP{5IQqUU+@ʕ!WPiܸ1:w.B֭[7|S$:th" y2u<ѣG# LN<ϊKSNrssIIHQ=zTJSSU1*UFLÆ Үߺu W^Epp0Ѯ];4h)))6&M|h>WI2bLi(Ykngko0v0 mG8P#>[:0Fz 0R IDAT@CUUѼysԨQ \(}d Ƿ~+={$I^'I +OT%;߳o-^p90t?2#˲zfM\Ν1`#,,_~NiAIsΥDQ$իDGGSG5M :f$z0tK '~D5`t( Xh6h攙͛tSLя? YfƸqP$~ᇨ[]s]s, `_4)Ϟ=;v~&<<:::UV3fYYYoiUU7e<[X 1`2e'H(^UE0^TlY =hp= ?{1 })qLaLL&Aeq8d PaZʄ}PbT)آ$I(('E(\P@20mD b߄ӦMCttCnSY|9֮]K+a «E QdZ,: EQ`t8n{D$e3`ȸVaŕ$atTlcck%6(jx @,A{a(ш$V^,cݺuXpnoo^+YdHIIAfʹGWw؁?ȐdJس, I˲  0o}}d'B0(c4M7yC!Qr҂9e eٝCEǎG-SaO?E֭djyܹYӦMK (L& MM_ B9a=Y-9o `0pחe@JD Mq~E!,E1@Q@.Ma"IR/]Vk$I]Q 뺾㸯eYbeffd*92 %bW6$9F&p$Ir4 ~=pW0,~  NVu&Y M\Y ^j taxBQha94MiC|@Ӵ6<3i> "I2L Ü I4MQ+yxxTmӦTZ5)99?~<ի&$$if"3fP5kDjj*6nHnKB 4$I8xK4{EQj PO0uj @}iZQcY6D1hqE &{Equ);3˲)""" D1 v-cď&Dc]Ƃذ"v{C+EW RL?vGM2%qMhdYnR^p bEX= k)x,ˎu$`RE5Je4jԨ(Rip&!y~<XQQwlBػ+ҋaH̓T0 iLt$q(hAFTX0d2yfXG-jdks8Uqq1 Μ9ٳgl6SذakܘL& lZ=vl'X] *TB>>m.,&"lV&I#vF3j6rBtܽ{d\jj*1n8׷L\4M[n0-X@&P7תUV|j;㸉bыؤjժPcmtd}VV-d2ڑLӴ'˲?|)q`>eYn#I U*U(x Z}h4ȑ#Ibb"IOO'?ǎ#9'ZtԉL6(+4<_iZ#nnn{dѢEdhҤIQR)*G^o(ei'ׯ_RNH&MU*aYT*#իW|H\\\ʥK2 Co& Сc|(~nDo4MT*eKJJα, /ӱcܹ3a0 (Ҹqc%$$i$&&^[rm^REݛ:f̘H֭矯_~q-?s'OQ("vJj t;ZV% ÐEQ[nJV޽{+UTUV,'&&V GDD(g޽{dСC[@ڶmK޽K|||dZ-GquuU<==}Q.^H!Gj듆 qƛ߿0 SׯߣzL0zEyuԑܔEeΝivt:iȐ!e˖J```R\9%,,,VjZ*ZV=<<cBQ)W@jԨx{{[4ܾ}{eW@x'_~%K'OaZVI&Mo6344A 2f̘rff&?g6l ,˒Ç_ *RTuy<+۶m# Ðs!)''㳚4i"WREGEErqqQ|||K1VXq8am۶Ag ":Q߿Z ;x 7niѢtDӑM>L:lٲ;vOرceDVm۶`#s%i:\Z53 6e (&@3DDMy߮VK)RjX4f,<> իW_FlK.Y7o& .$}$pBӴb%wqq 0'7&E)ʕ3Fo^Ѯ]uHxxذ0Kr,< NlHHH2bĈ9F2zyyݢ( @fwaAHTTDR]vIԪUx{{ 4Hqtt===S5jdvqq/_l0H۶mE$&& WVLPPЯ;wH``T~:ӓԨQdzWTɒJ6l@ZmqNNN/^d֬YDJ)jղܻw 9::f[6 QFgg  MӊF)Zd 5j@BCC,Z H7nږe˖$""B48xR^=zjbYO Ϸu֊﵁wwwL6899FQrqңG ҳgO[nk׮ҠA+ըQ#N7|CM f`N+Wܣɓ'V+nz#EQԩS tI@6mDɠAd2E-ZD|}}-HʕM{tj+1c1 $44"j*BӴj,X@ iР(Ν;$<<\R95j 2/_>˒AܹCjժe\n޽{˫W&<ϛ+W|W^rxxظq=;v(J>!!aHTTòCX/ >:Fڷoo+ER'J|'yQF1cnxyy>}d:J09::JnnnQFgϞ}5>>~k4xq ̜9ܽ{9z(atdRR|`.96l ,X4-WPӄa?V]FMecY5nٲlۨdZmM_4M//4X+Z SNDREY>3BӴLQ,Y4nܸ2e (zwd۶m>"m֬BӴR͜9tY@&OL(M{& `9z(1 իVNrrr2ݽ{ ֭# #,_ 7.1 ^zFŋ$66 Z*mϺvjUˈ#aÆI'^zF@t",[jnJ(M EQ 0I}mI` ֭#ȤIŋ 2j(r²0LAPP@~Gb0HEd…v&d̙dԨQ EQ+_r4L: rصkW^ 6GGGFϞ=[/]T\DQ۷ZLLL$4MٮӰd„ ]*T0fff@>#h0Ȟ={|̙3ϝ;z/_&^^^FqƑ:uĉ MӅZMqppȡiz:lsly a%qƩwZha8p t1 5kwޘ8q"֮]'N۸y&`UTG0sLԭ[͛7_ϗy /eݺu?>t:t:!C5jZhu"99Y-`Y)))88 4HHIIIjڴqj۷eYhB$Iܹs… eVR@@\B͛7-Z^EaeSZ=Ŕ Ì(j 1c0 Fy-ex Ξ=x۷֭ WT9rh61gKRT5jTMBHHxhڴijŤ$bn˖-())Aql#IsBXX'-Y0`(HJ5G$G^uh^*VVۜү_?vСk0zh$%%V'0EQoݺpEQT5k7nOggg?郃è[eÆ Rqq1 *))oR7U|ut&`)/;wk...1bz*/_ơC>&rssqcTk3n޼Y}@~~>C]F988ۛzѻg%88½{ +a'ظqC`6zoEJ0h lٲ={M[U%"w(**B`` 5kխ[Æ CDDZn`ɒ%hlݺ...1-Ot/\t4ڵA;v@=PPPTڵkb &&vɓ'a6Vo杊,GfzAeYכ)X,8t6oތSNa1bNG$鹬J(ZjlNz!iyUVqK,~ { Z*IKKSP%%% D-ǎh.?~cbb"뇤$b qҿvڵçN>#GGGX'xQ5Zz8EW_}RcQq!lݺU>t0Lh ֵ}ܹs;v}LLN$wyG8q"]^.))ɓqI:t<ѣGsg;wN9y)##C( 0E3~52/Q`#0,WEQL p,/g.uكΝ;?-;;شi\:e˖!??_}Ĉ#JEpp3{!33њb[x.e˖~!I&aʔ&C|8Ո|III$+EQJcX S4izÆ 00F'N`ܸq²ejǏ׭[̛7zznҥK~޽{}ݹsJ``3oyPPYfYYqu$&&bȐ!رcƌK TգGqV2VVWBfffŋtr.M@F IDAT$((mۖԩ+{.piܹs3gR/}ӡy(uECj߾=W_Aӽ@ٌܹrAÃqrrښ<9 Nk֬y4f̘{ob_7111dȐ!P2%%e-M8- ˲-xg4k+R}FÆ Q:O&ƍCRR6mڄ{޽{عses<?Mӥ.pBH((?G\U}4݅iMӦM-ڵ{=<=P Xrrwd2k"@Sa)h#0MP^/9;;3۷o_v}a̙HOOMf7|~VZ&*j( i&jZ$Ç{|_8?~\㾒ey,u5`XhJʕ+ǏLBgffz^iRTmY&bls!<<(IN n߾=j.JPZ5 4={Dpp08ӯ^={l[_ꛒ$̜? ƚkں(eYI&3a.⯆(xE$!C/"pAKVVz^Ed:(J8[1111@ggg?f>Wu/?^˲{l[a6~`@ժU*T UV&Md1DQe:uN봜/ 0LB=˲rŊ5kdWzi 0Xt?]0p(˗O7n;G +qu |u?8a4+Vnݺ%ԬY9stСyy{|ĉ@OOOO>w(HLLۥ_~h6lVQ.ፁ#TƢ(ad( AAAиq*׮ '1c e޽ Vo۶~Ν#a =CaDҀa ^ߢWegϟ%W}$%%ar\\q8V5:vر#|||[*(,,ӧDGGej:h4}P @a*lrPBBb"##5_(**ӱ~z(ҥKNöe\[n=g?i4+t:Khh(ӴiSwoMY7g( VbXȓ'OW &0 `,˛^A}~XfYQi\z^Mcǎ!77=Brr29qݍd|0L_RCBBv2$<14}Ro D 1 ! ᅬ *bŊoοo޽˗;v`A7V!x9Ë}7B࣏>{E٩ 4nW^^vv+c8B%µ/?0d2ٗRIAZ푒f^|yРA}YZjz(!$f͚э55jOO2 qde=!P8b/\V8;;'ODDD;ԩS':::*(G$Id `ϟ^=...c }[jeׯ׼yv1z-_m۶tQAVV֯_O6n(<P\\u:]}EQ<~ (nnn}ReHEw[p-$&&Ǐ}X`'VS d2U???ĉe k8q~zF$z=jn [^7X,5kքY-L%/.߿_Ure"IҮiMQEEEayGRM(jm[<ܢE>}0+W~mٳg,5jOzQ=z`, Ο?D1j:Xͯ *QmSLݸq^Ǐ!IتU+N:ey 999HHH &<ߒ$[ A|$I*MtEQ<JPPթS'|elْ1bլY3b -- Mu:]kI!o"Yc\PT&e+]]]PN:LH/VѥK111q>Eqbs RRRt,^wܹsѣzߓ&MzU999HLL$ 2ƍL҈iӦ#F`Ϟ=X|9ټyyJ ˲<8o߾ \vppP,YB}$IBrr#=yRQ߿ W͛b+VXΞ= 獎 !JJJ^߷BIIIݻwۻwL9CٳgU2~V://W^(`,; g4+W~²b4qM1??L.իcڴiիWUGZjFDD`SaEF돠h~!ݖ5)D\\j5yY?­[J [nn.>C%#?IdYرct㏖hp,YBN> шݻLBküh"h4D$? ܸq$I )k||}W^]$IV _Wf.Mӊ?6mڼv$IP6mj_|k׮XSmڴv,YB/>~5kT:t,X@&7o̙3SrJq̾}S ٳ'sΑ۷owArrBbbb~4x|t橲"''CUݻn޼Μ9ݱL"ڵ +VPa֭JJJ rssǏ}رcrss1}t6667n܀/8q"6m Gsssbܹraa!ui$%%q|W&G޽;ɓÇ,;vݻB0}t{bܸqΝ;m68p.\Ȭ\Ra 0m4cǎ ޚ5k̙3邂~~ʕ+SVO8˓׮]kg3gDϞ=-5VAA!ؽ{7i&˗/ !TFF֬Ycn݊)S/^ (((ڵkIxx8~zzj["##ɾ}VۋyDΝF327n>JHHNgS 9r6lx)>(((`(o-]p111/4yyy}:),,|Nnll,9vl7޽%%Ź ~gXJk/;7::zbi _ޱ6ju& 5jlԩW\.]@վgѵkW9s9r$ٿ oO fKΝ㙎;";;)))t^+JKKe[M&O)úhd"k֬A^0z貹v˲شi]RRC]v?5ҥÇѣGɓ'|NNj*RTTDr[juZ߻w/dYoܸFSлw&DGGV:uqqqHKK!SA%I!C,EEEܠAb˲sҲeK)..hܸ~TBFF;w4ٷ}g+Xݿ$QѣGAnݺeٶmEvUPP߻wGX~=X}gݻw-={6999Xp fQT(2Dllie͛7ǏuY 77[RӘ|71b;VNO>44rHK~~>j5INNRSSk.ǹlDDD&L eddHյk`ڵkb… M;v.\`ׯ#**0Z_²썒!>>$ھ} wju+W|ri$P{_jD\\\QkT*yyYYYU?@qơyyyذa:wd3gp!'$$`ʔ)s43f@dd$F ۷©S{=76~x={C#6oތ[")) Z* غu+ߏDoƆ (Y͛? 4=ykذ!p,]&`Сf+WpA~?sB tݺug̛7Z*,h b*/_֭SӧOiv=vUV-dggO>>`0tC˗/۷oNGwÇN:@TBX%~i,@oNh&ٳg˗N#]v%!!!jժ$## J3v1 $**4-,+%&&=t%,N裏d@( :t0ۏ۷o@"##IppÆ {@5kf(aÆ&1۶m#,p޽{rQQQߎw?6lLQT1$99gܸqz, X4=m q !z6mפI4M[/&eҗ` a;@/V_^8.{xxZj;t`9r$6mY~}rKիWIjj*i׮HQh4- E䀀&-Tl'''c1 ^LӴ²o߾xP%k4a.]\x̛7O)3'55Zի9f͚4Mλgg*UL{|OFu:|+Jervv[Dwww?04vnnn6888ۿme-v:*#NNNm`2z}&˲ D ,[TXqN{h+ҫW/$V}M6ׯ^oFqpp80RL-[g/Gǎ}WoFY5ETR=˶wwZVh2R@\]]/TR{m۶\\\u: GyHdJ4hЀPTTԺuJNK1 dРAՕ,K<==I&M^'&L(Uqrr"|W\rDM͛Gr1H<==IZZ(4i҄TTTݻw/ )5:u"dҤId„ \rd`0w}ԩSpGڴiC Y|9qvv~{UVc//]ti6 q\rcǎ>;/@:w,2 clР)990 #0`qu@+-**j־N:H6mP`k 0:q IDATͭo߾RTTT ɷ~+rNqorR`iHL<٢h$WIQ7AtlVDՊƬj-:N.AK18 `0T*aXFol`5oL۾PA~n + [jZz glן0edȐ!DRIbJJJ[ʑdӦM$$$EYhy `mlmߏm_ozaQ0 Yg5eBaq{[ydXgq\ ˲ =m;=uFwV BZ~`<͝;;.Xxxxh4RrrmYJ,[Vٶ PjLmoe;>݀AϝJ*y~Mlԁ5 fha(N...g$$$d(EٶhѢZw零? ~~~x!j51gAQQ ,X{nnnsVZFa\<|EM޽{ϱ.Xh֬k"8kE8cҥr 8,˰X,8tv QwZ'OT=j孏%~i N8wQQQ7xbcc%>>>>>dժU֪UKٻw/=z_?~0Lٹ;P#ATЦM,Zr*--9sLԬY3j 5VV,Y)2o,lj Ju/la&d2} њa3L&ieÚ"]( CӋ$IߪTmEa&AQ(QF+W/C/ *}EPPY!D(<˾ V+ dfn|N-JŲXeq,$i-*j _J,*Ees@40rAm;g\y9pd2eX,fمK{.'Oʬˎ;ԩSNA $ɹ3eb˲DH<h4F? <0Lܶmi^(re~cfdsG4e6|cY ƫ,N0ChEQGyGQ;}vSH ! !ACA*rlGl(E;f $B 6ٝ>cHHv7x>f}v&y6K4U8֦Mϋ/[n;ЧO۷oGEEƍ>v}A^^ƌ7y$%%aΜ9Xr%***ЫW/$%%!$$,cرXf #..ƍCHH}x{{SNՆI&•+WV՘6m_UVVƒ$Nxx޽S6jm޼W\)E1b-Y$4%%XzY @$?i~i q #""`d=Yׯ޽{튢Yxy @ɓ'ÕD/+|  45a„e˖pM c(Լys/tu~Y=ZOOO4(5$?E`zOveũS/K.9,yQxxx ++ 7oFii),9Aށ-{0;3--`0DLaatEQ2k֬APPWy7/^… Ç7oܸN,7)B3se xkO$躾$ɗt]WQQq᫢ q%IZqIK `3{,Y:s L:HIIqdee1AP)))<[ڎ;@2UUw puLh Ir,sY'p,.e)Vu̒FAVȲ M7Mӆ߸qhҤ ZnD4iıcphѣaZQPPǏM6h۶-RSSp80|pŋE&M I?Ç#88]v8v0tяq%x{{^,=s挅BO_ϟ?WZZJmۖ0`ƅ(xxxEEEE$I˲v$I7`k:}t]`H^^1119r$Yr\ro~9"00R%鶚-I09400w5cml@ a^AAA'/^X`PrrO>>>LVVq\$I`T0Kf<$qYsyhzB$cZMAX,7neddʢEVZ9qz&6lP.\(oܸ+**HUU>qfL9-{DӴ`uP,̝3jAQw4 .Fll,|}},bȐ!E-[A֭ VLCZ2U_[lׯի7&A0`N,bPU.EQ.(o7]9iiip8kbȲ#ׯ'TU-9%6b\eHX,WaސU&⊢(5+w|lcƌu]ߚ:O<nz'Nի(((P'E fYa[ຈ<СCMMF={kR/cXjvZfĉ gTU'+:tҥKXt)6mj4aر 1k֬axȲ U8l8X`%]QoܸA͘1k?~\c^cZjE|g(vh `?4Mm/U7s!BUU"_6N88ɲ2dff&*Id>L)F;߿b ˪Q2U1GEdd$裏V 6Dƍ^dd$ª  HtaY'Ȳ|Z3)w Ӆu ] Q øz2X 9`JI )F5 c eX,ArJ:t(4iQQQ5BQ߿_|2m[dIy:Z_.`&MD(Ɔ CITxx8K/owya.EY2ǝe90 "??;v5kFEE˲N$܊+dɒgY }t0P f͚Eߟ rBa2C |rc޽0 $ɹB0'N&IXd |yԨQQ.\~۷@ 0 o 'iORTu0@rrr2$!55EsssۂFUժ>},DU]-c%Q/֥`鋅c#y#?vs7]'hC :wr+ӧOcʕZBB0/ |`#jaEx 0~󁚦Eڵk-l6vލkԩS՞!eʲ %IU ФUVիѧOSNQT^ؼyOzcP5BӴ?6e 6v뺎+Wܹsؾ}qȑ{'dg} LEQ PcĈD߾}Ѯ]?] 'OVwu{2U<Ͽ)Ik 8 À*H Ξ={ ,_\_v-p840LјZeذaw&%%eYc/|ע& 8PNJJb **J~0=i0>o+;رҌ!,?w^( Ɵ.BhX,2މĎ;䄄̙34qJgШQ#C޽;PPP@4KGUUIt0 R$?/// >;w.N^&Mbq)_~,ZE9(~?~M%={/2JQu֘0aڶmkׯ_l;IqEE:e]@g>s7nܨ[1bp8OOO(4$ig EQ'YYyDnOt8ٳgEEQxgGU_z%`u]'XhnQ{uwر}Æ _r%GYyZZq,h۶OE^wuYvGoBuw Yq$Ie˖ĸq0zhvmUh3gķ~KK.%dYi)_FZ)={iӦ!!!餺|mۘɓ'{hCӴo+++RjJ80a…XnK1___:**r84ѣUUDQ-g]7`$I^lRϱ{n"""idHHضm۫0=x?Ya0uԪ_N_~֭:BjM4YxQ#F%.I>CcŊ:AKTU} _00몟d(ޞFTTC5oޜ6mSaYW_… FffpNE HTiΗe'jVkz_iF Tu$IC=ХKcΝ[nI0&R^S+X_v/^l;ReaƍXn@ӴHQT$IO~.Z8{E'z쩽 f͛7㧟~R._L<(jF^$YjxŒ@hh(&O|_UUl2l=ߢ )Nh⟆a ̙cy'l)HRRUEQ䜜0 N4ؚ5kBeY<<<_~r.]~4xxxdɲ|LQ4+Pw> nժU͛7W>ﵼ6mÇ'N@ERii&sN5_7KJJ{4iQ9PSo߮M2 eart]a[κdQW@6mnݺ񁯯/ڵkT.(>}v`u]ϧ4M#a6r]U Y 3Kiw_lmpYT FfhfzLQԳWf͚0 (͛䯿Jk׮FJ<`ѢEy~/Gڕ?<YAggϞ ϟO|G+U YYYn(*WӴa+IYa ,**"4h`t C !Jh©)µk@$eZZhH{">>.]IϏ˻iZ 0_% al6ʕ+tjP00c ʕ+ʯv(e|*v;6mڄJMOO0DQ\{NqLMbnyΝ;m۶UJ0w ,&yyy㸀ɓ'>,l9DEEJKKap-ax"x/Eu_~؃Mvomc qeO?xV4O>rʄx55VMڈ0֥K*&&ԩ[ ~wﮮ:`Yv,k\'( MӪǽп >{xزe 6nܨ^tfF1cFu^ j5&L@M6EppKG(((>>w\u:rUvpN` |$! -=0 lܸ꥕pM8sҥC7믗X ZlItq\u|СC4áN筯Y-,$I3gmۖ7oݪU+4hiKQFMO2h۶-:PU՗ySU `B]0AijlEE*++EIRyN'W]v | ,Y",_%4hp `inQW p,,,F߾}$''[wڥ_pСC9EQ%yKJJCy[,q$Iv4-C9$0cy$i9~8al6.U|7Җ-[ϓ(..>s^?y{rvUb̙Nu YSp`&^!e UU 6oޜjݺ53cƌ:wڅW^yE+(( z}}4Y! f{O0aS +֭S0 C4f9Y'VUuvi<400pzQQVU۾};ٲeK接K*~h aPe|,'?S!88>}:7vX;f/b׮]زe!2M$iN(˲/TfO>KB$Wr IDATU`ΝmfEQ7IxGƩS<܎ (1x`L>[v_~EZlu>hu4y4M̞cƌA͛; dJJJԌ ` i-eJK`WUJq7Q"aСtAK۶m 6-[tz#GǏGvv6)I u=MPԆmu DQѣGjw?8sLu'ZÏ)) BrrTXXȥlxx8BCCk<`qq1\W",, VZ hcYH,))u]'Μ9×ƍs:f!++ mڴAAA~֭z4}IUՓncZs8O8L6 r9`xśG-}h%KnV p7T萐yРAl޽ѵkW$&&bFYYZҡCۗ¬?LC=2$UYYI$i4lƍ#?~*7ѫW/ɓs%IJAW+":vu eW˲<*""AEEE|rr2Li7|S())!oݺ޾olEQ^a/-_L:hvK ½CP@1 eYn!CCCt/aӧRW֯_?ފ~]QEߟt1&$$Dc:9}Tll,uQd0i hS/.4MiZ?0 2B`Fڵ٩۷bbb]x0 90̳Eul> CT׮]ڤ$&&b۶mHIIQJpКE<_,Bf;00PhƋ"BCC .64$KtDQG]j'㪃3gܷ& % 54u]%IR &sE۶m{EEEN[rF=ztw$<&<ϧƍٳSx{{W5F _|?0'B]X anځSU+>5""yFcՄjEddRUUG>>M:,hvS˨gksWT&[Mp8裏lZк WJJJl1yd!C\vi:u /j^Iơn֣ 0b41 #TŪk׮ڵkk111"A:u".\$Iy0=0΢n2˲ҥ@< 7o'ocl6:cΝFNNΝ VItp;b.IҏS-[ƏO=crj]viӦM#E EQ+4M{Sn˲ȲܤE;w.""" g7ϟ?+Wr~~> Q`NQ9Qs6@;`&tܹV'%%!==0@<[3$9u1bDŋcZnrf͛#G_~%v UUIMӚ1.>&$ٔeY!#""Bӧݿƙ `mr$˲i xm[(ҽ:j]tǏO8ktSTb;芊 cǎ?/t]8_@UU%((ț+,6vhXVadffJWA"0v;A$֚e3 kxZ_|9,֩StmݺurZZ_^^jwС<..@RRDUUv4/%IBPPPY8pee՛v6 %0 E T 2xyyץ7oNi,kw)a, ,ۄ EQl0 1zhbxcٲeS TTT0ax1 *yMO0oN:3gP~-ѱcG4M QF0 csii0D +**X֭[S}uQ7zAL:޽;rrr,%u[̐Wi~Eubƍg5ymDD2;v$TUE^^mpB4),Ju&#F5nዊ3Uş$'W 7o~衇v8g̘AΞ=۩2R`ێ;0~Ìq\3!WAHd8qsН\x{]pҥjsUU@/pR/h }AϞ=7o3y Ӛ$ ,>KdCңFBΝ^eׯܹs8{~i-##!IR` |3fY``. 5… +qݻw>, 0c0W`*xbknjX,mر?JJJw^ٳI kI] xALL2k,GqEd8x ~g̙3A*IcQ;`LÆ _,,,ڵ4vXnРANڵC[|3 \;ڥcUW0cru]-б GNjȁ",,i F#Oݺu˸x"Ct?x`e\aN^UUGڀtm֬Om۶0 ׮] 0e1 A| `Taa!k+VqlٲǎSSSr(e\Y7~ᒒ1͛͛iƩ?/._\۱cKP<_Hf24 0҇(]t~a` %77W4jԨQUgꄵb|7>|u7ok+ 0M;>S}Ȑ!urqkuXna^AueoQ% VǏ'D]בZ/ŋ-q\ q[dY^䞳=`Q0t˘?׭[,\ $IDnK%,^p?ƨQ')m6YRRXjUi]DQW)0 c)TUEll޹sg<%Ɗ+K.1_5٪U"j0=_/ÛS<-dggyxu[eya jҥhӦK/iiixyyU<_ke M۶m[QTTSNq:{1(/wVRݫq$I^$)Y(Ey0K{1 >-os Zə esIH|aF7k׮ RkӮ]ɓ͛ѫW3gΌy>| ep"Y?`\V\xϞ=z\\>uT|(a$///FSyh!y7n^~=.]Tu6x=Czz.]\6 #M6ƒ%KgjS+++qFZJd{ꕿo߾1a,۟$(_˖-޽{s={t\Ll߾}rsTKT $IW_UMTN4ػw$00" 3Z[,ǵeiU/ Nl6> ÁO|2H"I(5A)5ojFɁj)6&&O?SXlvA@d+;6wu٥~/nlhTMjF7ӻe˖ ^z59s&yY.\ŋq9gYV8n$IBӯ}8['IRwYg9ڵ"##ѤI_~GJOfh @pDaIӧ4i ]vukEQ3f |}}r=z dYݻw| Jq<7E!!!>}"=1uT<<͚5 , `z|PEi;^y4jA+++x;_jSu (_4-zԩSɦMe_GŦMpInIY,..n#G4I, >>5   믺  w$IY<QcF;.X}MDQrI)99"˲xsָ,Xh I&\$IT/[ʠj5]Q~Ĉ hgwwu׎pxq5 t]_SNCUUu]u;ww܉`?g)UU0 <<<Pq\jGH!I~A)--mXVCTddכbуX,X~}gIu#p_'WaɲLt֍hР$Ir𦧧cժUچ @W^]6^Ӵz? /`Hf&;(,K[Ȳfգܹs׮]1 c8eYxgtRxsS_n CӴIvڿ?Zhgfaʔ)ґ#G8___CӴa^0%6wKG:eY=iZh޽{C$_ ޲E/[^X#bV edYA{=O>N1bĝpD\ϐZ& 5jYYY3'N4uQ/yg)6l`PeeedBBBOOODUU n7I߆:J;)-K^ɆMǔʈgyƘ;w.a̘1tLL .]b/+03M*c!""_|^^^DGGcN7>Y-[ap勫Tv`[ne$wy{W_{9$Iy)GʲteeEqʝ/4@Zu\]vg}E!++ V=Ad7]uupպ@O?( VհX,QUUݶmi)UU-4MeY>3-nmx3s{Kiq<ޫW/c sjժ'$rEQiHM|l7 Q(|eJbb"AӴ$ݒ$m)Xyy_W_},ڋ/]`ӵkW>|G  6M P'N0*++%WEѳ[θnjcYdz$Xul6zʔ)GѠA$Ii]vO~nC17YUpKf 0GQ״iSmȐ!;bUKL\\N:'OjO6 c0A t5pJ 4MаaCL4 cƌaUvYf) &::ZMOOtemK(0"<ϫ]t17ot8w?Caz-;O>dΝ;wڵhذ?5O$ wƺu#GP4MǏjժ^$@c<<<nݺ)?ˍ>Rzz:w{U f]}h`j#BL˖-sǏ31U cÆ ؿ?t]A0dY}7oތ\vmaX=<<+`ĉ5=y&ך%H IDAT~mڵː$yr޼y . (40a41w1O=Sr͆k׮!==ľ}Q<[ f̘ E/o; }iiiLNEj4}fڹB[$OΓ$nݺivvڹ4f˗"!!^с0[WeYIQT(Aj,X`4X`߳}%***f>};Ipp$X,gvtt:rHzСv i?M6i;w4 ANQ_CNqG(%66 Bxx85jm:%NnZu):tu_|O<[M :=fÒ%K|$ ޖ$)r̘1x7\vWe$&&Gկ_NsWN!A ~<0'Iž}K,qJ)##3fp$I*~~~EݻwߴiޘnvYaX˗/; ?#n*]p(..y(*nTUB(]FigΜƍxwԲ2LKKcg(i7~]$ba蛻w۴iS-cǐK.iRffEEbH Leĺ\Usc.2Vqzf4k$IZ `̎;ٳ'dĉ oVȐ;]wng,UaQQ+byA;M66m:3Y%PQV>qDg*ʭ[~Q8k(k/K Wݖ<999s ={6V999HOOΝ;iӦ\ak|]Gx$ɠ]:q7#&&ƩAeYƙ3gW_aϞ=rӲ,,/k@o$r7DVZIgmmAp$Ik[ރ{(<mFذaC$IUUUĉ3ώ,Arr2~Tc<狢86-Z4#;;O?\nzY^yᆪu]7 ?} ;꡻~xʤr%S_)+Uޢ('OԚ7oN9'p8pQ]VIMMeX' 0~e4߉/bLLLֿgunܸQ]hNRU:jԨxYЈK(:@JJ >I9sx("z!ø꺎[ncYGYGb IY@EHH7'55Z4idBB>mڴ6$Iޔey/~)xM(6N4Ν;qqqx衇бcG]p!7o<_}]ȑ#V3I=VL3;eU$LV`8qo*;;{@ƍٳg[/^L=ک K,s=={ӧOիW߇yɃaź/Paetf͚TTTD YC$f cEQl(ӺI A è6<4M~~~D݉dH[.,,dY{i {CӴop$v$I7ogê?=9dHDDyFQsRɡWMӲL!-r.s6rMYQqYAA˽3Bh E}g=gZ߅yQQQQ%5kxzza.A Йer숞.-B`Srr2yرCnѢ /ۚdYYY[" urr(*x+^)x @N~  &I6`L8Ĺ`2bHLLDZhѢJV)V).l`Y( nݺ/֖EѼys0 ܹs矱j*ҥK0e~$uݻWeY̙3StAΝq!lMӢal6Zx$!1Zn Qe"44X L&ܹCozR~i@׼yqfCJǎ *sq}ڴie`,^ opރu}1IXb#GqƱ97ccEQ 5M[ggk<99={6j׮mm۶3rYtR,X jVr,XaÆ(LEqfOOOqƌ/Dq?MX,$I udXփ9A0u8/k޽{pJJ s… =CiSN0*/DQ y>F걱utʕ+6ooo Ӈٳ'~w۷ ,0rrr̀]v2) o$y4 e窂=왋u t]74hP iܸq|ƍÇ1i$>}A({3gVB8O Ȳkq VZr,CW^8|0<۷}ذa,iP7 _whfdꓑQի B:uJEAll,޽K$%blZ3"sh+==$Iu֘7o^fݻwnܸuڵk4OUSTJdI -[zRSS.]h{3==mۦ߿EeQQӴ _WeWAtT5ꫯ;j |M E`/̙3 Ξ={W^ èԤI>ԩS䶒uVlܸQ1|~UϤ}a2>ZXot_$aiv/^H]xQ;w.}vrʅ_t [nũS'NwY5lܹs~Nv4Uغlٲ3s5N8A0j(rOOMӆS,DQLvp eΝs>>>S,Y"~Ɉ,֭[۷`UU%70rxE4L&SïGGG+g.VΝ;!"222j*]P,y? m6[Y^=4i۷oOOOǡCvZ̟?_OOO78lS ]ZV~!TZeTU-V ѣGYYY+eŐ={6d2U(y<!X]f3ggg]vTIy~ڼy . 0HA^xE"TҵRJ_ٳ޳gbŊɓ'}饗p)ŋwVSUuLp@f2ʕ+疓rE!''Gj߾=bxZ)±cwRpmVQʕMh62.</ {UBfz5k czg0e?VUWxxpEALL ΝkvݩS6lhgd\Q%r&>>/ң{$)))HLL8}4;]EGyrMbġl{-E4n``mȐ!\!N`8}4=cǎ$I$MgE`&OeX,˲8;;+aaaK/~~~RJ0rrrpMܸqׯ_ǒ%KpE$~w܈.<ϯQU% (*T/^;w[lٳg$%O#W2bIxѥi>033q! ooC=a"Q>:u^{mp5#FCڟ?l:ӂ_؆ i{7UU]jժh۶-"""oo"5M6UHMMe`JD ]}F7 /rXv~%V/qUn߾ȲL<UVsY/$ScYCf)55ΪG￯O%7 $O8ugI*ICM&HQCॗ^R5kƻyWzuʕ+q2v#F<Ċw7H +R7o ЩSbY  `SzO?JLL1sL3zh>::Ѕ)]?04M#8l-[VqqqRCxcY e[zy{{7fz-(} 4שSm>35 *!q\#__&f6LkCCCu",, +Rrʡuhݺ51qDSvv6:Μ9ճg0{xkbr7fs\]]1`tڕ) ~,ˢAmۆիWXGd,=], ^4X,wDW8PGXʕ+͛jժpuuV+ժSb68"0LWQ8|||(WWW Ȳ\"ëa65LMwUUMuWuAdާOY;?GžrTdȲϚ(իW?u=G-J]P~B( >tBBׯ9Xb8=1Tn?!ba~3f U 11.\PaW [$6ɲaɒ%Xd (2OǏݻw'TU%iiӦ={|EaO+ @gDppڪU+J*(}+W0萐TP3glӝ62Nk^>k,Qu tǏ/h@@qeVE TU-0Ld ZG]_&I?4M J~;AAAѣլY3jڵسg?ł \E{44kvڵRǏGRҟ_ m#,ܹk7w,3䴸|2ѠA(^ǑaÆi۷oW-K [Se4M^j,(4lo&Ѵib @:AbNN}Eu8 PdY3& ).lyU={"$$&SɅК6mJկ_UVa8ydsEQ1Mӻt]bAh*IG^\9_NJ 8nnnmQ܏=!^Nhf@Q͎| i6ϯرcӧm۶%\]]K бcGlٲW^ "Mӱ(&=4x^Lڵpao-X={㱚aΜ9ɯ 8+WwƍW[je0aW[A^}Gڵ;¾: p@YWczꆄDկ_+WޒeO!C#t=8q6m¦M Q b!"$Ie{EeqKEKԈ$I{K,top!oz\\jp~u8b`EeOdY>>>֭j֬Ν;llٲ999$ ۷o3A/bslI{Çɒ~.\8ڵK?rAd,LPZf'%2ڵk1pyȲJQñwo~M䁰>33!yɯqC IDAT\ҳQFرÀ0f8ݻwcjrwބ$IQuMlf0l6cǎAV*vQh۶mx\R=r䈜Zpy 0ueCz_8j5IkjA$!k&41x`Ogddݻnܸaۻw/=l0bŊŚ+~gFFF[cǎ-SNɲ977贴sEppܹsbo\t M$f͚YI\(vL IR^vg^%Il޽{E/yػw!7o0LEQ[Z+6<Ǝ[Μ9ӧOk׮Əh:u 999$IF; 9;;;Ocf#m,4/<Ͽ+iQ3hOEJ+2ʕKu'Owa{лwozݻMw6ܹ]r%e9L-jl{V{eU཮x0Um<8Z3fJyc.y>Fzn40ɓ'#22Rw|,Djj~(BrwUQv)M]\\^IJxy 6 AAAJ۶mXvrJΝ;b!E gQ^|xf2GGAj00~3bt3*E z*$I8;;[n^|{b@@@JqG`U{A_:x`eΜ9HO/|+nݺpqqy|W(Vs0pEq$qj2a(~ @{ (j(PUӴPba?$IzE EqG }G$y~ŗ EM Ň~ ժUsuaFmdgg+ʳcYv-Wffpqy~EQ. ,[O='b$I"I2aE-rѮ];ѣxwL#GTrcto&-[F4-Z E1P :dE$''gj|޽'Nq7===Yf8ys璕*URgf0UӴS%uU`͛7cƌӅ#G&`_ܹVj֬`YA(3dt3xyy h۶-L&ZjCgp oarweODQ윒͛i]RRRlwC T̟?fP8nUUTU *pqqMUU/˲ àl6[;Mu@ Ct]~I@ъ87egl#jՒƏ(ʴi>"11quMhӦ p!|gX"8;E-4&4hРcdd$ڷok.yСC?(QQz cx)zx]Px~pYaÆQ&Mr^f WZ3}t]$/ѺukGEEMr)SaA(a IҾ)SN4IMMCvNNNy;ԍ7~H􄗗׫f[o܆m̘1X,NJ WKU999 'Lu%R)jbСjʕ+V<ϣ|ܷ~}g}Fw&6mEE0L >';wccc ÐӦM7qĄrĈlHHȺyņ5k֤'NxCcW 2efĉ #6lÇS...dggiii_>vխ[ז͝;Ԍ_RR5j$u?$It]L&=o/ە}e8r䈾uVz˖-đ#GR3 +M Px`lv WB[$I7nĀPn]Ŋ4 ƍbbb~a4`t) QIaKݺu+q5(:ݼ'ݣ|h]P=8(Q۷oGR* ϟǚ5k;*#F'Njڴa$m^vVFFmҥ8y$h6FѤIyߺn:zuE߾}qQCeu[lxbm۶`Y΄  `Ib&L@1n8UBB ׬YC P%3?ZV\Y_3""^z%I 6DPP?Drr\z-̘1#{wѤI .7jw} V+T7rÆ =< 777e˖Q$IޱZK뜅 @]M'"::Ze ,2;w,Y92Қ?EQ^rKZ5jnݺ6TwwwK.T ew\FSNXr%f30(J eG>Ci|/MӅV/a/aX#-|cـN䑌 #::|~رB=KAAA7|sEQ'OTxxի^^^׭[ǹ ** k׮EJvګGPM6IF~hР q_`m6f_|ɪ 4=M4M^pYjU͛c޽<;99f͚`ȑl5ӰaCJ7x ]6o6:WvmÇ,K۵kWP%fjժ  :umҤ q/9s& __&L~'J_$;vm۶233:^`.ͪ߹sf={6ǨQ]b޼yt=]#M&SAndWZE'&&SLZr3ʕ+#&&FBHHFh4,?)P7?Fxp%[fޫPrmxV ZQر_~X"k׮S6cƌaҤIի߭iѣ)___UVиqcv\qrrB mڴɓƍS5j԰uÆ }}}fҥ ;v7nl[R6m$ލ?3L^a {o(uUU6<<\bY=.|g~7n ѿ:t(~e1b#,, zEQFhh(؃u*UdE4(WoEy=zb#͛7e__߿L\%yd_իRSSW9s kW8(0#!!fBLό3طzKcYVgFX,ӭVkI9e'' $Iy> m˖->sb_[("!!Ɋ$I E%K) UQ"! G)]UV-pBVVVxhw{^Ytij%u]_Z5i+R"AeC幝d+o_xW9`۶m.q4Mf[n=gϞ@w:?)_]@b`FFF;wЭZ 6o FnBCCѾ}{=R wK6mki >+~K&Oر#m20uT899=^j+ۇ?LyjYn]Xa//y'՟P Øy$*TкuV`;]۷okT||<`5˲,Ws%oXVNd;ܹ]R%_ڴiSkK$,]T4MEu=r1=r!)0 u?˔>Tu\UCFiϦ^yUVU֭CשSԪP}'Ggk֪)l-x.h Ga\ޒopm{)DgI+W:y$Qr EOVZ5۸qL8q|M<ݻeddF *<套^*Zju򰔡b']tqLLHH( hѢ1̙3;u]/<lX]j/K7okժO?5֭[Dׅ p&\+tyyyexyy]puֹ-YFZ$jR9s1h ê_/[ilْ ௳G{رlذjk֬?U1IDxzzYYYD&777 /M&HRJAhSL 0r!** N޽{Mފara<]8RӴ8doߞ+VՊ^xȿeWWWt={/, ib+&ԫJk78deeJ- _)G=SU ]=a/w2::vҥኢԯ_6vXѮӧÇm& I0[n,˵|,NeybaڲX,q...!999{wpg|X2F*Upm2++Eovc DQ|u/0 s E&@t]3NNN2MӒb)ѢE RJ3f (I|lϐWi|%k׮AUBԲeKj@*T@߾} |ҥY_O!A:kמ<}tnZ:~GTUEFF!2$Q M,$Ir -'ViCeY7n %.]frɒ%(?%0 "%%ʝ;w@Ӵ{zMPm`Swm^|y}:UU`Z(T\0 #R4(+w>}:]ב!IuAV .@ӴSb<.$=^4Ǽyb/PhFӦM1g-%%] MUrijH M UUSח:t(fϞM;*NVjEzz:uU*%%VIDATU5#b. Z(͎ͨY$I?)R?yO/ƿG@#숌#jX!uE wũSpiٳ8v ä+GE]f$m۶%ŭoN;v }͍ذaSyz9p@i3g΀ </[=_9y `mۖ@ѿ,NB=-[י<Ϸ$;U}y ԪZW^4 +VTO>^uòlEQx㍪...N>}b4 ֈ}p c #^0,n*ijĊ+Ta(b׮]KSUgٳ ]|^zu[VIU,uDI,K7UU!qFZj0E?>н{w$&&^3dY `m BOYWh&v5¾}bn33376DQ͚5СCr7a~v^{5TZTNARv (j#{سg"##Ԣ۷ocٲeƲe͖+3gtY< ++j7n`V[nVYn`-,,j:jpw0?ˍ :TFj<_rT% X=OOO,ѣ1lذ" ?dgg#22R2:v+p(Cxj:tQ6lȑVkӦ͡;vDJe6 {uC> k$IŠ+ؑ4p¡'*,~'[ صklZjժqqq4EVܾ}=MdY/ҕ+W'O$I&,k$3AMPU7Ο?dz1w\ƍ-uVoʜ9sh1%%8 d-ZD0@$fs~$Ic׮]߿:<&?<_''y>{,^Xqssӗ,Y\T%uo>ۨP\_J0 cV/#A( xUUW麎N:N> [VII*Mv +`?juHnn.~ᇂF]㸣, ijN6a„·~|G~5TZ8z(vٌɓ'8FQTbN']'ITe*,;4eY^Zg`ƍD\\yNrJ$IBlllssrr~z&)) (7o._̽⋄$IX,4hb[gϞ|,㫯… x}Ŋ|Gѯ_?0 ̙3IQ47EQ\|,OOOmȑ#1f_^yr$IFIDÆ 0L'Ne˖GdYiwJq<IJ'Xx123K ~U|wY5G506 /99ԩSue֭[WiHMM$I{./_غuVrŋ6!{ԨQ2_|EgA&N|Pű`bΜ9ظq#֯_UU1c c/_`۷`i oGeyi)y-ge׋mԩ b۰Z$ FxAv؁k׮fa*._ٜ&!{nlذᑃ9~ UUqY\x8p K,QEu777k yzu&tnn.h"$$$fٶmyfhҤzYtRu˖-8|c|rnn.w@zFEQ+ Ir7osa!2_=z#vC2eL>)))l;vMu5UU=&NXbI( Q46dȐAAA`/j2Mʛ7oFbb"l٢mڴ)*DCUU|ŋaE͛75kV%K^$4M9g"3<qѩS'c„ Ƨ~Z#G5k40 }]أ9$/_6Zn{͛ڵkiiiFZZ1g[MII1ҌaÆ...rgΜ18+WlIKK3fΜlZZyf駟iiiشiS[+V֨Q#cwQTiwoխGN'$;A `@2, dyxpFWPa8Dvr`qV`]ʠY֙ JqY #]QIgx9}V{{577SRRRv,X@`0uV!EQ^<{}$M@l0sn&%%H4y 7|;PVVVtϞ=dD|A?Sg@=AK,!C}Ggnڴ-[FAUUUSaaaĉdM6-f-[,HwqGDUchѢE^xF޹sgGz m999Q}Fݻ Ec( |4][`#$ŊZkkk 6<OrssCnFv8-(==};,#w׀s 1L$ԵsnYf+V ͛7c>w\SӴ_֫W;W+`i&<߿=󔓓 EEEm3g͛Ghƍt]o? g7VvG4vXGsޕ{j ę3gGmVUUE_~!EcOaѣ àVtA2 fϞmr[nwh9_C8:4;ٳg϶'Oڵk;_(##u/2pss3RRRږ/_NhԫW6׶w^ vmaT[[kA~snV\IEc^znw877 UUUEVZEh "r)bHm_4rssHMMmӝeJ5zIX0rȍ ŵ'|HvߘE֭[']^a0#G۷$Ik>iZh͚5q&3555aԻwѣy<-ZD'NW@А!C"ʹ~@999R4hЮuBDzNMM59-[N\&,(g,w}{tX۶m4zZL91ƨ^t:駟&XVedd۷/-3fzI,ӣ>ڪ*m޼9iҤIJJJ(99٬ 1~( 8̌Bix>} 4(( y<5 6mZnje9QR~~~ǹ8OzQeA>cQ_MpGc@ uֳ :c8@ǜN'edd->O*AӴ'8K` D9 ñ R>ŋJDwЪw1t,\Ӵ?OEi w"˰|@+3O}yRRR9yӧO{Wd3  y<ȑ#MN\r%KҜ9s0|ȑ&4tP0aBK~~~dڴiTZZjN|iժU^p8^{8zhSVVVtʔ)ta@.˜;w.}b397n5k97\KcMݯ_E~ƈ#Vâۿi*^zS3ǎ4Ǝ~ a^@ϟ`C544SU597gΜY7xPvv]hҤIOp;$I_ =LӴg..XkVr~1,@swmDt2_NZc1UUdY^eUƍ3UU8Q{xg?nJ@S&YӴv B UU-Gfi$IXQIq@}ʲܦ(a_{p|p8r8$) `}A;c=z3,kwb-r+!6ɲˮ0:x's$Iwj8EU՟{ !Bl1dY!,^4 Yd;w4M;@RUmA`#:w_#|T1~`:<,.?` 6|@QU#[ LrOGmu]?!EȔ$)t: `c,.SsP3PUu^0c;%$`^ۂEQn-H6I/?^(?*76K> kN6cRy8W$g3zNQ:MΫt簾a]$ 4MKO% dy#I4ƮGGeY.SQ y#ey8%"soHºQUz\|a; $U^]+✇ErVsӹs^ E}^#XQ2cat? e7q)(|p8Ng|5SBi:4tp:eq RS`{V!rִye T}uM{ `, +Ƭ%"@7H ]RbVr8o^1bsa,$" ޅE>Zq$Ikq +=!f/)X7qv .Ia  Y$][FJ]|,ɫtX Ɨb t SW݁%эnt /pg/aF7эo% 4SNIENDB`sqlfluff-2.3.5/docs/source/_static/images/sqlfluff-sm2-sq.png000066400000000000000000000307311451700765000241470ustar00rootroot00000000000000PNG  IHDR}&fsBIT|d pHYs qKtEXtSoftwarewww.inkscape.org< IDATxy@TUǿwgC46w ť\ -zJ]kYi .Y)&.l2ìM̽{f{99B&O0h  `:0u&a>ϹkkXt uSN 峲o mmUwu:dx٦-azf\GMLp,hm0.6HqP{P( V 5AmbQ3vTTT ;;.\ 7%0222m_e;#]J%&Mk)zol;-~N7㸟ܔ)!7RT  qޜ8)A0 Sz7N ?>^ UozQQQ2=nx&Av{wq@`&˲jɍm6_%C[b111w2n<@=zMb1|||pu8IZ$Z8827nܸ ]BnZy(Z?/g}P0 y!-- ϟDž {ncǎErr21c #)) c999Xv-D"***|ZZjeU*q5ji2 .SnݺyL8eO>V*P(T=CW*+ Ã>@=JVk\3p@t-gϞG,d.YJr○V$tA|Y^^ȑ#6{T$IQ^QQlZg:2 3f 7n[nڵ+Ǝ T*1|pk׮` 82 f{D"oR!00ѣr9Ν;O?~:V\ ???a̙޽;qi___EQLTTI"""T6(xzzb֬Y`zŎ='///ɬY̙3okon3fmرbX|FѠ'Of;X,k0&Z,PjEY-ͬ Wa &<V]fZ~GEիW+M& $͆fvTͅmLt $YsiX,xgqa>|+W+! 1c lذ2 & .\~J?~>O@$r9~W @ 1u_˖-3n޼5 \v˖-#ϝ;5A7  ~hlkj4Ԃ d3g:ggΜAHHH7eQTTooo@ii)T*~i]XXX}1pר$o{bŜqS%%%jvu:]A8 jN#666#*jVYYnOIoooL0מ_dff",,.ɒBj+..~c%X,S<G,wWZ鷣T*8#F 2 >,.].]vs-yyyظq#}vH$:QXX?*bRRRKo"IZѝ78Fx{`IϞ=3VQZ d2,99Ye*++4Myyy(D"`8Jy9SڒO1L_4BQ~gX,No:{,˾+88_~WRRRZ_bOXÑ]RRr9b! եKq޽{5ѵkWtܙ;q/q%;(L3Ac?N銗3U\\ܽO>)S BX d2X,,ˢW^5L"33_=z$Ibǎoo޼|M8q}P(3?,{p ۿy5kVVV>UXX ++3Lcfs/׮][aVlٳgb`0 )Sypdq\l\\d7h4oj4L<Yq<& 7oFll,/^\禇T;<==+z-֭[ ===}@)aY(\Dr$IAPPА3gblN Vn: jiFї gFCaaaf]+: 63sEL`IwЁ^~==hPc-CYY/^ ///,]މ-V:&H򊋋g,(j{bbpJTYYYP(*++d0vTJuV" 0*qnhgB]N_9٧#\>)9e#F N9r$ڪfӦM8u֯__um6S̹ƍR1UUF28U'*~eeeWWUL&h| T*է$O*** oArKzz k}R0 Vۨ1y衇p M1cx3ä$ر">>Fı5ۜFge;,X?I===8!!!qƩnݺfz-3f ?zrUn` ޮu([;'3Yj'(;;QVVfmtH$BAAFǏwڥtyzzj4er[1=**_Z֦jj6(**y) 6=xϏkVNVV(ƒKP*J$&&4=eًK Řl22!!Qrp߿1Q^^^DO6M裏^^/55M]ʂH$˗2IIIX,l6h+ Z:)ޞ͆R c4!%K'7Ytqqq. k 7** [lǝ|bKcnn%%%q}]'r#DD-_m0>hћvW\O.СCy#""Hdݑ*e̊+lBeb8극<3gr8jl4ձ~-K4٧Oh4|7tmxaÆ BBBHsNNh07 %Ntλ[q㸵[SRu8?\1|p~~~s?p@HRx'K 1p@hZܹj639~GDDL%"|6[sWLD&EQGٓ lP8G>c դIhxk#G?W̤~w駟&AGD˲ lmT.f$@PD;u$1c*""h4xCO> xt3j- ZwMZ,Greee$xm2pRX67~\&flW=MPGP&Ir˲AdܹgϞ9!!!شi6ne[m֚3999NjRRRnX,UEŰ,$rW{^N*j:bH$:қҦz{zTTl6O"bx?pO>I,YkY M#ZhzΜ9OY}Ai!!!4hzCKI&,c2 T*ջeee9P(.//@T*6LjAQ~~k*w~'I2nL&,ժFMWq+Sz;75e0RYp߾}U`tp81bDن :46`spYBZ;vDйsg<ӧm^cǎ5VTTP+Wd2w`0t!Co>["ls8o(~~~ӿROL&noJ#I2$c$IPdkwx@uegqsZ `L&LdgRܹ3ݺuEGG (..Faa!qudff"11fʕ+9r-<<=t(..ڵ+z쉵krW\~嗜P((?~ҥKmB0_,H4窆}|0q$[bkP ,˒At0 IJ ~=EGGf̘![Dt>D"|8qO<+,Y /Bbr> HPPoQׯ_ǖ-[GᢢAd2z-%/$ "/˲-ˊAN ^Ip8kx2ea_!^hd@ * E`Zeڎ;ϟ?fǎ|K"44޲o)==ݱsN[iƍS e2Y]nnn ;%KTΟ? ݻ7V^@ڿC",7 +јq/g57h˖-#ϟ?*hhӷo_vꐐ*88̦n޼^x ,?\g4cǎ3`s1X|yia>osws[U-Ҳ&<9pu{MV%}hc6I$o%FuRVVYfaŊvjv+V@LL [RRBDJJuj.+--71#p7&Iz}N!AnބBO.3ӧOǸq㨀tԩ5J_~%^|EٳmD ĉP(߿ݸqcd֭fJE 04Zk.|N+WVkTTTMII)˃l?… رcǢGŶm`0i&l۶ yyytl6{^W1WZ ڵ is0Zɡ6olÇh 繹sv܉˗/} ?|?˱m۶?\^ίfcbb*oܸb54 ] eƾ{揄BE0@~eeG6l`vVGbضmҐшb Djj* ei&>}gϞ|ddd`޽ѣ._8",, ,IС]UVnCRYfMs!"(q9B*gggرc͕8pş9sZXn]@ ;s 222,c'|bLMMnl2Vl6M*^jKPk; O$nFD1AiiijdRbIIhT%TΝp8j1tP|נ( ΝC~~>r94 N< Z łdt .]B߾}p̟?Æ CAAJ\bdee!##HMMEN|h4Ü8qL&әW>f2.[,#ׯ__RTTkׂVkg`Wח+LNN^ʲlb 0F7;G8;S\\JVXVVfg5k ((gϞEHHj5?~A*"((t:u@DGGC$K.8]t3p? >ԕa͚5a忿kO?1 xJJ m2e˖+W$J rqq~mauVŋI h4~n谣W'Rq7s8L&Ly+**r HDѯ_?aê4; VEhh( tBAAAʪ=( ǏlXYYYY-..c΂Ok(h< .H 2lPu d} $[߰lb Pdګr:{\.Ө(uEU g[aPoVPR iϮqpJ?q?qKW3wMc l"Ri`m˗V~ׇ:Uh}GGG nܸQ#'EQccc RO#ݜQOF>}A1L&8 ְТ=}Æ v&We"˲gL29ѽ7v]CZ|ݎODAze_DNX,ŋ[Ţ%11ѹJV_۷oEQOp^u:wdH8|ʖe,^iu={~%I1 ]M`N#4luzm^]v v0(((i3޽{[od2/UմDΕ b_UDMHHdBשhҽ{&x<$IdG'LPq\ ]'A-r _.^ !dLLTEMjyU91&LZl `5,i6WΠ*ʲ`6~MS+lCC\-8qbc2+9 ÌFAitBB*=]m]AdcHO~a' 㸵+):YbfoنH"©'jz} .]ױ^8szlK4-q# saS&)ȑ# &HlrUew܃$ɾ}F]Fŵs t邹skƑ 6[{f[ zrq\I ɲ\f^6>h߂W w 崈oqO pY:t$h}i:PlȨ/IܸQ-S-ӤF1ǭ͒uβ:822_-|C@$rsIprwNpUHߍ Jς~Xx 9[<9[ַ}O5zl1L;v`ڌF7d2ADfh߾}pg'"##8!hTW5ms=BU̵%K@"n233w|5A"hs*%-\tFTII :w nEq G #[;5mch~9nR@y(666H#mfZ+v ->J;` P.=z3M'5r? Ҧ\{6A,tOMZ5TIl @ kO IjIn0*Uj(V]kܯB F<F(l$0ۘ.!=p ou?0j;tW ~~Hr{x ͸߄UA[kf-~OS(OOO4)zN۷:F(= .D 7rFF\kЩS'<Ώ ZzJ,WǕ'Ot q;5 {'? {W![m0ipz\]T~PU©cOLLt[$ꠃڀq7/Nb1,d۫ݝ-Z _;u{ق>LX,d2,]={~iiinKRd[EpǏT(hYYu{JV=;dnq99=n8իWᚖ[O!ˆ@jjK1cFgbzfs_z_NJB.]c'%ZڛW{_.|u gϞ5u|nߺrI$Wkw>E&WԅӴVϭ6kLj$л'ܜ5ni S33^]v5=a Xf|Fƺڵ+իWo |mjn~6~I@z#f… Btt4~+QK9G„޳MЍFcyX"""P(HcرuVehK7_~ :_xCu[i3cƌfkhG( ƍÐ!C\Un)//ѣG]di!L"Fy؏g^7|*T*1dۻZn24\xu =.M [TTb,wo'`vR( " Ȓ\EZ5;Z=mF 0'IHZZڱVc 7qnwե.2j&'WZ:eG1 _ 98Ak,Z.JEkR4~IENDB`sqlfluff-2.3.5/docs/source/api.rst000066400000000000000000000020451451700765000171040ustar00rootroot00000000000000.. _apiref: API Reference ============= SQLFluff exposes a public api for other python applications to use. A basic example of this usage is given here, with the documentation for each of the methods below. .. literalinclude:: ../../examples/01_basic_api_usage.py :language: python Simple API commands ------------------- .. automodule:: sqlfluff :members: lint, fix, parse Advanced API usage ------------------ The simple API presents only a fraction of the functionality present within the core SQLFluff library. For more advanced use cases, users can import the :code:`Linter()` and :code:`FluffConfig()` classes from :code:`sqlfluff.core`. As of version 0.4.0 this is considered as *experimental only* as the internals may change without warning in any future release. If you come to rely on the internals of SQLFluff, please post an issue on GitHub to share what you're up to. This will help shape a more reliable, tidy and well documented public API for use. .. automodule:: sqlfluff.core :members: Linter, Lexer, Parser, FluffConfig sqlfluff-2.3.5/docs/source/cli.rst000066400000000000000000000001631451700765000171010ustar00rootroot00000000000000.. _cliref: CLI Reference ============= .. click:: sqlfluff.cli.commands:cli :prog: sqlfluff :show-nested: sqlfluff-2.3.5/docs/source/conf.py000066400000000000000000000104031451700765000170750ustar00rootroot00000000000000"""Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html """ import configparser import os import sys # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath("./_ext")) # Get the global config info as currently stated # (we use the config file to avoid actually loading any python here) config = configparser.ConfigParser() config.read(["../../setup.cfg"]) stable_version = config.get("sqlfluff_docs", "stable_version") # -- Project information ----------------------------------------------------- project = "SQLFluff" copyright = "2023, Alan Cruickshank" author = "Alan Cruickshank" # The full version, including alpha/beta/rc tags release = stable_version # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ # Autodocumentation from docstrings "sphinx.ext.autodoc", # Allow Google style docstrings "sphinx.ext.napoleon", # Documenting click commands "sphinx_click.ext", # Redirects "sphinx_reredirects", # SQLFluff domain "sqlfluff_domain", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ # Exclude the partials folder, which is made up of files intended # to be included in others. "partials", ] # Master doc master_doc = "index" # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "alabaster" html_favicon = "favicon-fluff.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # -- Options for Alabaster Theme --------------------------------------------- html_theme_options = { # Set branch to main (used by Codecov button) "badge_branch": "main", "logo": "images/sqlfluff-lrg.png", # Icon for iOS shortcuts "touch_icon": "images/sqlfluff-sm2-sq.png", "github_user": "sqlfluff", "github_repo": "sqlfluff", # GitHub Fork button (points at a broken link, so disabling it) "github_banner": False, # GitHub star button "github_type": "star", # Use `"true"` instead of `True` for counting GitHub star, see https://ghbtns.com "github_count": "true", } # -- Options for redirects --------------------------------------------- # https://documatt.gitlab.io/sphinx-reredirects/usage.html redirects = { # There's an old link to /indentation in config files. # That should point to the layout section now. "indentation": "layout.html#configuring-indent-locations", "architecture": "internals.html#architecture", } def ultimate_replace(app, docname, source): """Replaces variables in docs, including code blocks. From: https://github.com/sphinx-doc/sphinx/issues/4054#issuecomment-329097229 """ result = source[0] for key in app.config.ultimate_replacements: result = result.replace(key, app.config.ultimate_replacements[key]) source[0] = result ultimate_replacements = {"|release|": release} def setup(app): """Configures the documentation app.""" app.add_config_value("ultimate_replacements", {}, True) app.connect("source-read", ultimate_replace) sqlfluff-2.3.5/docs/source/configuration.rst000066400000000000000000001204011451700765000211770ustar00rootroot00000000000000.. _config: Configuration ============= SQLFluff accepts configuration either through the command line or through configuration files. There is *rough* parity between the two approaches with the exception that *templating* configuration must be done via a file, because it otherwise gets slightly complicated. For details of what's available on the command line check out the :ref:`cliref`. .. _`config-files`: Configuration Files ------------------- For file based configuration *SQLFluff* will look for the following files in order. Later files will (if found) will be used to overwrite any values read from earlier files. - :code:`setup.cfg` - :code:`tox.ini` - :code:`pep8.ini` - :code:`.sqlfluff` - :code:`pyproject.toml` Within these files, the first four will be read like a `cfg file`_, and *SQLFluff* will look for sections which start with :code:`sqlfluff`, and where subsections are delimited by a semicolon. For example the *jinjacontext* section will be indicated in the section started with :code:`[sqlfluff:jinjacontext]`. For example, a snippet from a :code:`.sqlfluff` file (as well as any of the supported cfg file types): .. code-block:: cfg [sqlfluff] templater = jinja sql_file_exts = .sql,.sql.j2,.dml,.ddl [sqlfluff:indentation] indented_joins = False indented_using_on = True template_blocks_indent = False [sqlfluff:templater] unwrap_wrapped_queries = True [sqlfluff:templater:jinja] apply_dbt_builtins = True For the `pyproject.toml file`_, all valid sections start with :code:`tool.sqlfluff` and subsections are delimited by a dot. For example the *jinjacontext* section will be indicated in the section started with :code:`[tool.sqlfluff.jinjacontext]`. For example, a snippet from a :code:`pyproject.toml` file: .. code-block:: toml [tool.sqlfluff.core] templater = "jinja" sql_file_exts = ".sql,.sql.j2,.dml,.ddl" [tool.sqlfluff.indentation] indented_joins = False indented_using_on = True template_blocks_indent = False [tool.sqlfluff.templater] unwrap_wrapped_queries = True [tool.sqlfluff.templater.jinja] apply_dbt_builtins = True # For rule specific configuration, use dots between the names exactly # as you would in .sqlfluff. In the background, SQLFluff will unpack the # configuration paths accordingly. [tool.sqlfluff.rules.capitalisation.keywords] capitalisation_policy = "upper" .. _`cfg file`: https://docs.python.org/3/library/configparser.html .. _`pyproject.toml file`: https://www.python.org/dev/peps/pep-0518/ .. _starter_config: New Project Configuration ^^^^^^^^^^^^^^^^^^^^^^^^^ When setting up a new project with SQLFluff, we recommend keeping your configuration file fairly minimal. The config file should act as a form of *documentation* for your team i.e. a record of what decisions you've made which govern how your format your SQL. By having a more concise config file, and only defining config settings where they differ from the defaults - you are more clearly stating to your team what choices you've made. *However*, there are also a few places where the *default* configuration is designed more for *existing projects*, rather than *fresh projects*, and so there is an opportunity to be a little stricter than you might otherwise be with an existing codebase. Here is a simple configuration file which would be suitable for a starter project: .. literalinclude:: partials/starter_config.cfg :language: cfg .. _nesting: Nesting ^^^^^^^ **SQLFluff** uses **nesting** in its configuration files, with files closer *overriding* (or *patching*, if you will) values from other files. That means you'll end up with a final config which will be a patchwork of all the values from the config files loaded up to that path. The exception to this is the value for `templater`, which cannot be set in config files in subdirectories of the working directory. You don't **need** any config files to be present to make *SQLFluff* work. If you do want to override any values though SQLFluff will use files in the following locations in order, with values from later steps overriding those from earlier: 0. *[...and this one doesn't really count]* There's a default config as part of the SQLFluff package. You can find this below, in the :ref:`defaultconfig` section. 1. It will look in the user's os-specific app config directory. On macOS and Unix this is `~/.config/sqlfluff`, Windows is `\\AppData\\Local\\sqlfluff\\sqlfluff`, for any of the filenames above in the main :ref:`config` section. If multiple are present, they will *patch*/*override* each other in the order above. 2. It will look for the same files in the user's home directory (~). 3. It will look for the same files in the current working directory. 4. *[if parsing a file in a subdirectory of the current working directory]* It will look for the same files in every subdirectory between the current working dir and the file directory. 5. It will look for the same files in the directory containing the file being linted. This whole structure leads to efficient configuration, in particular in projects which utilise a lot of complicated templating. .. _in_file_config: In-File Configuration Directives -------------------------------- In addition to configuration files mentioned above, SQLFluff also supports comment based configuration switching in files. This allows specific SQL file to modify a default configuration if they have specific needs. When used, these apply to the whole file, and are parsed from the file in an initial step before the rest of the file is properly parsed. This means they can be used for both rule configuration and also for parsing configuration. To use these, the syntax must start as an *inline sql comment* beginning with :code:`sqlfluff` (i.e. :code:`-- sqlfluff`). The line is then interpreted as a colon-seperated address of the configuation value you wish to set. A few common examples are shown below: .. code-block:: sql -- Set Indented Joins -- sqlfluff:indentation:indented_joins:True -- Set a smaller indent for this file -- sqlfluff:indentation:tab_space_size:2 -- Set keywords to be capitalised -- sqlfluff:rules:capitalisation.keywords:capitalisation_policy:upper SELECT * FROM a JOIN b USING(c) We recommend only using this configuration approach for configuration that applies to one file in isolation. For configuration changes for areas of a project or for whole projects we recommend :ref:`nesting` of configuration files. .. _ruleconfig: Rule Configuration ------------------ Rules can be configured with the :code:`.sqlfluff` config files. Common rule configurations can be set in the :code:`[sqlfluff:rules]` section. For example: .. code-block:: cfg [sqlfluff:rules] allow_scalar = True single_table_references = consistent unquoted_identifiers_policy = all Rule specific configurations are set in rule specific subsections. For example, enforce that keywords are upper case by configuring the rule :sqlfluff:ref:`CP01`: .. code-block:: cfg [sqlfluff:rules:capitalisation.keywords] # Keywords capitalisation_policy = upper All possible options for rule sections are documented in :ref:`ruleref`. For an overview of the most common rule configurations that you may want to tweak, see `Default Configuration`_ (and use :ref:`ruleref` to find the available alternatives). .. _ruleselection: Enabling and Disabling Rules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The decision as to which rules are applied to a given file is applied on a file by file basis, by the effective configuration for that file. There are two configuration values which you can use to set this: * :code:`rules`, which explicitly *enables* the specified rules. If this parameter is unset or empty for a file, this implies "no selection" and so "all rules" is taken to be the meaning. * :code:`exclude_rules`, which explicitly *disables* the specified rules. This parameter is applied *after* the :code:`rules` parameter so can be used to *subtract* from the otherwise enabled set. Each of these two configuration values accept a comma separated list of *references*. Each of those references can be: * a rule *code* e.g. :code:`LN01` * a rule *name* e.g. :code:`layout.indent` * a rule *alias*, which is often a deprecated *code* e.g. :code:`L003` * a rule *group* e.g. :code:`layout` or :code:`capitalisation` These different references can be mixed within a given expression, which results in a very powerful syntax for selecting exactly which rules are active for a given file. .. note:: It's worth mentioning here that the application of :code:`rules` and :code:`exclude_rules`, with *groups*, *aliases* and *names*, in projects with potentially multiple nested configuration files defining different rules for different areas of a project can get very confusing very fast. While this flexibility is intended for users to take advantage of, we do have some recommendations about how to do this is a way that remains manageable. When considering configuration inheritance, each of :code:`rules` and :code:`exclude_rules` will totally overwrite any values in parent config files if they are set in a child file. While the subtraction operation between both of them is calculated *"per file"*, there is no combination operation between two definitions of :code:`rules` (just one overwrites the other). The effect of this is that we recommend one of two approaches: #. Simply only use :code:`rules`. This has the upshot of each area of your project being very explicit in which rules are enabled. When that changes for part of your project you just reset the whole list of applicable rules for that part of the project. #. Set a single :code:`rules` value in your master project config file and then only use :code:`exclude_rules` in sub-configuration files to *turn off* specific rules for parts of the project where those rules are inappropriate. This keeps the simplicity of only having one value which is inherited, but allows slightly easier and simpler rollout of new rules because we manage by exception. For example, to disable the rules :sqlfluff:ref:`LT08` and :sqlfluff:ref:`RF02`: .. code-block:: cfg [sqlfluff] exclude_rules = LT08, RF02 To enable individual rules, configure :code:`rules`, respectively. For example, to enable :sqlfluff:ref:`RF02`: .. code-block:: cfg [sqlfluff] rules = RF02 Rules can also be enabled/disabled by their grouping. Right now, the only rule grouping is :code:`core`. This will enable (or disable) a select group of rules that have been deemed 'core rules'. .. code-block:: cfg [sqlfluff] rules = core More information about 'core rules' can be found in the :ref:`ruleref`. Additionally, some rules have a special :code:`force_enable` configuration option, which allows to enable the given rule even for dialects where it is disabled by default. The rules that support this can be found in the :ref:`ruleref`. The default values can be seen in `Default Configuration`_. See also: `Ignoring Errors & Files`_. Downgrading rules to warnings ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To keep displaying violations for specific rules, but not have those issues lead to a failed run, rules can be downgraded to *warnings*. Rules set as *warnings* won't cause a file to fail, but will still be shown in the CLI to warn users of their presence. The configuration of this behaves very like :code:`exclude_rules` above: .. code-block:: cfg [sqlfluff] warnings = LT01, LT04 With this configuration, files with no other issues (other than those set to warn) will pass. If there are still other issues, then the file will still fail, but will show both warnings and failures. .. code-block:: == [test.sql] PASS L: 2 | P: 9 | LT01 | WARNING: Missing whitespace before + == [test2.sql] FAIL L: 2 | P: 8 | CP02 | Unquoted identifiers must be consistently upper case. L: 2 | P: 11 | LT01 | WARNING: Missing whitespace before + This is particularly useful as a transitional tool when considering the introduction on new rules on a project where you might want to make users aware of issues without blocking their workflow (yet). Layout & Spacing Configuration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :code:`[sqlfluff:layout]` section of the config controls the treatment of spacing and line breaks across all rules. To understand more about this section, see the section of the docs dedicated to layout: :ref:`layoutconfig`. .. _templateconfig: Templating Configuration ------------------------ This section explains how to configure templating for SQL files. When writing SQL files, users might utilise some kind of templating. The SQL file itself is written with placeholders which get rendered to proper SQL at run time. This can range from very simple placeholder templating to complex Jinja templating. SQLFluff supports templated sections in SQL, see :ref:`templater`. This is achieved by the following set of operations: 1. SQLFluff pre-renders the templated SQL 2. SQLFluff applies the lint and fix operations to the rendered file 3. SQLFluff backports the rule violations to the templated section of the SQL. SQLFluff does not automatically have access to the same environment used in production template setup. This means it is necessary to either provide that environment or provide dummy values to effectively render the template and generate valid SQL. Refer to the templater sections below for details. SQLFluff natively supports the following templating engines - `Jinja templater`_ - `Placeholder templater`_ - `Python templater`_ Also, SQLFluff has an integration to use :code:`dbt` as a templater. - `dbt templater`_ (via plugin which is covered in a different section). .. note:: Templaters may not be able to generate a rendered SQL that cover the entire raw file. For example, if the raw SQL uses a :code:`{% if condition %}` block, the rendered version of the template will only include either the :code:`{% then %}` or the :code:`{% else %}` block (depending on the provided configuration for the templater), but not both. In this case, because SQLFluff linting can only operate on the output of the templater, some areas of the raw SQL will never be seen by the linter and will not be covered by lint rules. This is functionality we hope to support in future. .. _generic_variable_templating: Generic Variable Templating ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Variables are available in all the templaters. By default the templating engine will expect variables for templating to be available in the config, and the templater will be look in the section corresponding to the context for that templater. By convention, the config for the ``jinja`` templater is found in the ``sqlfluff:templater:jinja:context`` section, the config for the ``python`` templater is found in the ``sqlfluff:templater:python:context`` section, the one for the ``placeholder`` templater is found in the ``sqlfluff:templater:placeholder:context`` section. For example, if passed the following *.sql* file: .. code-block:: SQL+Jinja SELECT {{ num_things }} FROM {{ tbl_name }} WHERE id > 10 LIMIT 5 ...and the following configuration in *.sqlfluff* in the same directory: .. code-block:: cfg [sqlfluff:templater:jinja:context] num_things=456 tbl_name=my_table ...then before parsing, the sql will be transformed to: .. code-block:: sql SELECT 456 FROM my_table WHERE id > 10 LIMIT 5 .. note:: If there are variables in the template which cannot be found in the current configuration context, then this will raise a `SQLTemplatingError` and this will appear as a violation without a line number, quoting the name of the variable that couldn't be found. Jinja templater ^^^^^^^^^^^^^^^ The Jinja templater uses Jinja2_ to render templates. .. _Jinja2: https://jinja.palletsprojects.com/ There are multiple, complementary ways of configuring the Jinja templater. - Reading variables and Jinja macros directly from the SQLFLuff config file - Loading macros from a path - Using a library .. list-table:: Overview of Jinja templater's configuration options :header-rows: 1 * - Configuration - Variables - Macros - Filters - Documentation * - Config file - ✅ - ✅ - ❌ - `Complex Jinja Variable Templating`_ and `Jinja Macro Templating (from config)`_ * - Macro Path - ❌ - ✅ - ❌ - `Jinja Macro Templating (from file)`_ * - Library - ✅ - ✅ - ✅ - `Library Templating`_ For example, a snippet from a :code:`.sqlfluff` file that uses all config options: .. code-block:: cfg [sqlfluff] templater = jinja [sqlfluff:templater:jinja] apply_dbt_builtins = True load_macros_from_path = my_macros library_path = sqlfluff_libs [sqlfluff:templater:jinja:context] my_list = ['a', 'b', 'c'] MY_LIST = ("d", "e", "f") my_where_dict = {"field_1": 1, "field_2": 2} [sqlfluff:templater:jinja:macros] a_macro_def = {% macro my_macro(n) %}{{ n }} + {{ n * 2 }}{% endmacro %} Complex Jinja Variable Templating """"""""""""""""""""""""""""""""" Apart from the Generic variable templating that is supported for all templaters, two more advanced features of variable templating are available for Jinja. *case sensitivity* and *native python types*. Both are illustrated in the following example: .. code-block:: cfg [sqlfluff:templater:jinja:context] my_list = ['a', 'b', 'c'] MY_LIST = ("d", "e", "f") my_where_dict = {"field_1": 1, "field_2": 2} .. code-block:: SQL+Jinja SELECT {% for elem in MY_LIST %} '{{elem}}' {% if not loop.last %}||{% endif %} {% endfor %} as concatenated_list FROM tbl WHERE {% for field, value in my_where_dict.items() %} {{field}} = {{value}} {% if not loop.last %}and{% endif %} {% endfor %} ...will render as... .. code-block:: sql SELECT 'd' || 'e' || 'f' as concatenated_list FROM tbl WHERE field_1 = 1 and field_2 = 2 Note that the variable was replaced in a case sensitive way and that the settings in the config file were interpreted as native python types. Jinja Macro Templating (from config) """""""""""""""""""""""""""""""""""" Macros (which also look and feel like *functions* are available only in the *jinja* templater. Similar to `Generic Variable Templating`_, these are specified in config files, what's different in this case is how they are named. Similar to the *context* section above, macros are configured separately in the *macros* section of the config. Consider the following example. If passed the following *.sql* file: .. code-block:: SQL+Jinja SELECT {{ my_macro(6) }} FROM some_table ...and the following configuration in *.sqlfluff* in the same directory (note the tight control of whitespace): .. code-block:: cfg [sqlfluff:templater:jinja:macros] a_macro_def = {% macro my_macro(n) %}{{ n }} + {{ n * 2 }}{% endmacro %} ...then before parsing, the sql will be transformed to: .. code-block:: sql SELECT 6 + 12 FROM some_table Note that in the code block above, the variable name in the config is *a_macro_def*, and this isn't apparently otherwise used anywhere else. Broadly this is accurate, however within the configuration loader this will still be used to overwrite previous *values* in other config files. As such this introduces the idea of config *blocks* which could be selectively overwritten by other configuration files downstream as required. Jinja Macro Templating (from file) """""""""""""""""""""""""""""""""" In addition to macros specified in the config file, macros can also be loaded from files or folders. This is specified in the config file: .. code-block:: cfg [sqlfluff:templater:jinja] load_macros_from_path = my_macros ``load_macros_from_path`` is a comma-separated list of :code:`.sql` files or folders. Locations are *relative to the config file*. For example, if the config file above was found at :code:`/home/my_project/.sqlfluff`, then SQLFluff will look for macros in the folder :code:`/home/my_project/my_macros/` (but not subfolders). Any macros defined in the config will always take precedence over a macro defined in the path. * :code:`.sql` files: Macros in these files are available in every :code:`.sql` file without requiring a Jinja :code:`include` or :code:`import`. * Folders: To use macros from the :code:`.sql` files in folders, use Jinja :code:`include` or :code:`import` as explained below. **Note:** The :code:`load_macros_from_path` setting also defines the search path for Jinja `include `_ or `import `_. Unlike with macros (as noted above), subdirectories are supported. For example, if :code:`load_macros_from_path` is set to :code:`my_macros`, and there is a file :code:`my_macros/subdir/my_file.sql`, you can do: .. code-block:: jinja {% include 'subdir/include_comment.sql' %} .. note:: Throughout the templating process **whitespace** will still be treated rigorously, and this includes **newlines**. In particular you may choose to provide *dummy* macros in your configuration different from the actual macros used in production. **REMEMBER:** The reason SQLFluff supports macros is to *enable* it to parse templated sql without it being a blocker. It shouldn't be a requirement that the *templating* is accurate - it only needs to work well enough that *parsing* and *linting* are helpful. Builtin Jinja Macro Blocks """""""""""""""""""""""""" One of the main use cases which inspired *SQLFluff* as a project was `dbt`_. It uses jinja templating extensively and leads to some users maintaining large repositories of sql files which could potentially benefit from some linting. .. note:: *SQLFluff* has now a tighter integration with dbt through the "dbt" templater. It is the recommended templater for dbt projects. If used, it eliminates the need for the overrides described in this section. To use the dbt templater, go to `dbt templater`_. *SQLFluff* anticipates this use case and provides some built in macro blocks in the `Default Configuration`_ which assist in getting started with `dbt`_ projects. In particular it provides mock objects for: * *ref*: The mock version of this provided simply returns the model reference as the name of the table. In most cases this is sufficient. * *config*: A regularly used macro in `dbt`_ to set configuration values. For linting purposes, this makes no difference and so the provided macro simply returns nothing. .. note:: If there are other builtin macros which would make your life easier, consider submitting the idea (or even better a pull request) on `github`_. .. _`dbt`: https://www.getdbt.com/ .. _`github`: https://www.github.com/sqlfluff/sqlfluff .. _jinja_library_templating: Library Templating """""""""""""""""" If using *SQLFluff* with jinja as your templater, you may have library function calls within your sql files that can not be templated via the normal macro templating mechanisms: .. code-block:: SQL+Jinja SELECT foo, bar FROM baz {{ dbt_utils.group_by(2) }} To template these libraries, you can use the `sqlfluff:jinja:library_path` config option: .. code-block:: cfg [sqlfluff:templater:jinja] library_path = sqlfluff_libs This will pull in any python modules from that directory and allow sqlfluff to use them in templates. In the above example, you might define a file at `sqlfluff_libs/dbt_utils.py` as: .. code-block:: python def group_by(n): return "GROUP BY 1,2" If an `__init__.py` is detected, it will be loaded alongside any modules and submodules found within the library path. .. code-block:: SQL+Jinja SELECT {{ custom_sum('foo', 'bar') }}, {{ foo.bar.another_sum('foo', 'bar') }} FROM baz `sqlfluff_libs/__init__.py`: .. code-block:: python def custom_sum(a: str, b: str) -> str: return a + b `sqlfluff_libs/foo/__init__.py`: .. code-block:: python # empty file `sqlfluff_libs/foo/bar.py`: .. code-block:: python def another_sum(a: str, b: str) -> str: return a + b Additionally, the library can be used to expose `Jinja Filters `_ to the Jinja environment used by SQLFluff. This is achieve by setting a global variable named ``SQLFLUFF_JINJA_FILTERS``. ``SQLFLUFF_JINJA_FILTERS`` is a dictionary where * dictionary keys map to the Jinja filter name * dictionary values map to the Python callable For example, to make the Airflow filter ``ds`` available to SQLFLuff, add the following to the `__init__.py` of the library: .. code-block:: python # https://github.com/apache/airflow/blob/main/airflow/templates.py#L53 def ds_filter(value: datetime.date | datetime.time | None) -> str | None: """Date filter.""" if value is None: return None return value.strftime("%Y-%m-%d") SQLFLUFF_JINJA_FILTERS = {"ds": ds_filter} Now, ``ds`` can be used in SQL .. code-block:: SQL+Jinja SELECT "{{ "2000-01-01" | ds }}"; Interaction with ``--ignore=templating`` """""""""""""""""""""""""""""""""""""""" Ignoring Jinja templating errors provides a way for users to use SQLFluff while reducing or avoiding the need to spend a lot of time adding variables to ``[sqlfluff:templater:jinja:context]``. When ``--ignore=templating`` is enabled, the Jinja templater behaves a bit differently. This additional behavior is *usually* but not *always* helpful for making the file at least partially parsable and fixable. It definitely doesn’t **guarantee** that every file can be fixed, but it’s proven useful for some users. Here's how it works: * Within the expanded SQL, undefined variables are automatically *replaced* with the corresponding string value. * If you do: ``{% include query %}``, and the variable ``query`` is not defined, it returns a “file” containing the string ``query``. * If you do: ``{% include "query_file.sql" %}``, and that file does not exist or you haven’t configured a setting for ``load_macros_from_path``, it returns a “file” containing the text ``query_file``. For example: .. code-block:: SQL+Jinja select {{ my_variable }} from {% include "my_table.sql" %} is interpreted as: .. code-block:: sql select my_variable from my_table The values provided by the Jinja templater act *a bit* (not exactly) like a mixture of several types: * ``str`` * ``int`` * ``list`` * Jinja's ``Undefined`` `class `_ Because the values behave like ``Undefined``, it's possible to replace them using Jinja's ``default()`` `filter `_. For example: .. code-block:: SQL+Jinja select {{ my_variable | default("col_a") }} from my_table is interpreted as: .. code-block:: sql select col_a from my_table Placeholder templater ^^^^^^^^^^^^^^^^^^^^^ Libraries such as SQLAlchemy or Psycopg use different parameter placeholder styles to mark where a parameter has to be inserted in the query. For example a query in SQLAlchemy can look like this: .. code-block:: sql SELECT * FROM table WHERE id = :myid At runtime `:myid` will be replace by a value provided by the application and escaped as needed, but this is not standard SQL and cannot be parsed as is. In order to parse these queries is then necessary to replace these placeholders with sample values, and this is done with the placeholder templater. Placeholder templating can be enabled in the config using: .. code-block:: cfg [sqlfluff] templater = placeholder A few common styles are supported: .. code-block:: sql :force: -- colon WHERE bla = :my_name -- colon_nospaces -- (use with caution as more prone to false positives) WHERE bla = table:my_name -- numeric_colon WHERE bla = :2 -- pyformat WHERE bla = %(my_name)s -- dollar WHERE bla = $my_name or WHERE bla = ${my_name} -- question_mark WHERE bla = ? -- numeric_dollar WHERE bla = $3 or WHERE bla = ${3} -- percent WHERE bla = %s -- ampersand WHERE bla = &s or WHERE bla = &{s} or USE DATABASE MARK_{ENV} These can be configured by setting `param_style` to the names above: .. code-block:: cfg [sqlfluff:templater:placeholder] param_style = colon my_name = 'john' then you can set sample values for each parameter, like `my_name` above. Notice that the value needs to be escaped as it will be replaced as a string during parsing. When the sample values aren't provided, the templater will use parameter names themselves by default. When parameters are positional, like `question_mark`, then their name is simply the order in which they appear, starting with `1`. .. code-block:: cfg [sqlfluff:templater:placeholder] param_style = question_mark 1 = 'john' In case you need a parameter style different from the ones above, you can pass a custom regex. .. code-block:: cfg [sqlfluff:templater:placeholder] param_regex = __(?P[\w_]+)__ my_name = 'john' N.B. quotes around `param_regex` in the config are interpreted literally by the templater. e.g. `param_regex='__(?P[\w_]+)__'` matches `'__some_param__'` not `__some_param__` the named parameter `param_name` will be used as the key to replace, if missing, the parameter is assumed to be positional and numbers are used instead. Also consider making a pull request to the project to have your style added, it may be useful to other people and simplify your configuration. Python templater ^^^^^^^^^^^^^^^^ Uses native Python f-strings. As described in :ref:`generic_variable_templating`, an example usage would look be configured as follows: If passed the following *.sql* file: .. code-block:: SELECT * FROM {tbl_name} ...and the following configuration in *.sqlfluff* in the same directory: .. code-block:: cfg [sqlfluff] templater = python [sqlfluff:templater:python:context] tbl_name = my_table ...then before parsing, the sql will be transformed to: .. code-block:: sql SELECT * FROM my_table :code:`dbt` templater ^^^^^^^^^^^^^^^^^^^^^ .. note:: From sqlfluff version 0.7.0 onwards, the dbt templater has been moved to a separate plugin and python package. Projects that were already using the dbt templater may initially fail after an upgrade to 0.7.0+. See the installation instructions below to install the dbt templater. dbt templating is still a relatively new feature added in 0.4.0 and is still in very active development! If you encounter an issue, please let us know in a GitHub issue or on the SQLFluff slack workspace. :code:`dbt` is not the default templater for *SQLFluff* (it is :code:`jinja`). :code:`dbt` is a complex tool, so using the default :code:`jinja` templater will be simpler. You should be aware when using the :code:`dbt` templater that you will be exposed to some of the complexity of :code:`dbt`. Users may wish to try both templaters and choose according to how they intend to use *SQLFluff*. A simple rule of thumb might be: - If you are using *SQLFluff* in a CI/CD context, where speed is not critical but accuracy in rendering sql is, then the `dbt` templater may be more appropriate. - If you are using *SQLFluff* in an IDE or on a git hook, where speed of response may be more important, then the `jinja` templater may be more appropriate. Pros: * Most (potentially all) macros will work Cons: * More complex, e.g. using it successfully may require deeper understanding of your models and/or macros (including third-party macros) * More configuration decisions to make * Best practices are not yet established or documented * If your :code:`dbt` model files access a database at compile time, using SQLFluff with the :code:`dbt` templater will **also** require access to a database. * Note that you can often point SQLFluff and the :code:`dbt` templater at a test database (i.e. it doesn't have to be the production database). * Runs slower Installation & Configuration """""""""""""""""""""""""""" In order to get started using *SQLFluff* with a dbt project you will first need to install the relevant `dbt adapter`_ for your dialect and the :code:`sqlfluff-templater-dbt` package using your package manager of choice (e.g. :code:`pip install dbt-postgres sqlfluff-templater-dbt`) and then will need the following configuration: .. _`dbt adapter`: https://docs.getdbt.com/docs/available-adapters In *.sqlfluff*: .. code-block:: cfg [sqlfluff] templater = dbt In *.sqlfluffignore*: .. code-block:: text target/ # dbt <1.0.0 dbt_modules/ # dbt >=1.0.0 dbt_packages/ macros/ You can set the dbt project directory, profiles directory and profile with: .. code-block:: cfg [sqlfluff:templater:dbt] project_dir = profiles_dir = profile = target = .. note:: If the `profiles_dir` setting is omitted, SQLFluff will look for the profile in the default location, which varies by operating system. On Unix-like operating systems (e.g. Linux or macOS), the default profile directory is `~/.dbt/`. On Windows, you can determine your default profile directory by running `dbt debug --config-dir`. To use builtin dbt Jinja functions SQLFluff provides a configuration option that enables usage within templates. .. code-block:: cfg [sqlfluff:templater:jinja] apply_dbt_builtins = True This will provide dbt macros like `ref`, `var`, `is_incremental()`. If the need arises builtin dbt macros can be customised via Jinja macros in `.sqlfluff` configuration file. .. code-block:: cfg [sqlfluff:templater:jinja:macros] # Macros provided as builtins for dbt projects dbt_ref = {% macro ref(model_ref) %}{{model_ref}}{% endmacro %} dbt_source = {% macro source(source_name, table) %}{{source_name}}_{{table}}{% endmacro %} dbt_config = {% macro config() %}{% for k in kwargs %}{% endfor %}{% endmacro %} dbt_var = {% macro var(variable, default='') %}item{% endmacro %} dbt_is_incremental = {% macro is_incremental() %}True{% endmacro %} If your project requires that you pass variables to dbt through command line, you can specify them in `template:dbt:context` section of `.sqlfluff`. See below configuration and its equivalent dbt command: .. code-block:: cfg [sqlfluff:templater:dbt:context] my_variable = 1 .. code-block:: text dbt run --vars '{"my_variable": 1}' Known Caveats """"""""""""" - To use the dbt templater, you must set `templater = dbt` in the `.sqlfluff` config file in the directory where sqlfluff is run. The templater cannot be changed in `.sqlfluff` files in subdirectories. - In SQLFluff 0.4.0 using the dbt templater requires that all files within the root and child directories of the dbt project must be part of the project. If there are deployment scripts which refer to SQL files not part of the project for instance, this will result in an error. You can overcome this by adding any non-dbt project SQL files to .sqlfluffignore. CLI Arguments ------------- You already know you can pass arguments (:code:`--verbose`, :code:`--exclude-rules`, etc.) through the CLI commands (:code:`lint`, :code:`fix`, etc.): .. code-block:: text $ sqlfluff lint my_code.sql -v --exclude-rules LT08,RF02 You might have arguments that you pass through every time, e.g rules you *always* want to ignore. These can also be configured: .. code-block:: cfg [sqlfluff] verbose = 1 exclude_rules = LT08,RF02 Note that while the :code:`exclude_rules` config looks similar to the above example, the :code:`verbose` config has an integer value. This is because :code:`verbose` is *stackable* meaning there are multiple levels of verbosity that are available for configuration. See :ref:`cliref` for more details about the available CLI arguments. For more details about rule exclusion, see `Enabling and Disabling Rules`_. Ignoring Errors & Files ----------------------- Ignoring individual lines ^^^^^^^^^^^^^^^^^^^^^^^^^ Similar to `flake8's ignore`_, individual lines can be ignored by adding :code:`-- noqa` to the end of the line. Additionally, specific rules can be ignored by quoting their code or the category. .. code-block:: sql -- Ignore all errors SeLeCt 1 from tBl ; -- noqa -- Ignore rule CP02 & rule CP03 SeLeCt 1 from tBl ; -- noqa: CP02,CP03 -- Ignore all parsing errors SeLeCt from tBl ; -- noqa: PRS .. _`flake8's ignore`: https://flake8.pycqa.org/en/3.1.1/user/ignoring-errors.html#in-line-ignoring-errors Ignoring line ranges ^^^^^^^^^^^^^^^^^^^^ Similar to `pylint's "pylint" directive"`_, ranges of lines can be ignored by adding :code:`-- noqa:disable=[,...] | all` to the line. Following this directive, specified rules (or all rules, if "all" was specified) will be ignored until a corresponding `-- noqa:enable=[,...] | all` directive. .. code-block:: sql -- Ignore rule AL02 from this line forward SELECT col_a a FROM foo -- noqa: disable=AL02 -- Ignore all rules from this line forward SELECT col_a a FROM foo -- noqa: disable=all -- Enforce all rules from this line forward SELECT col_a a FROM foo -- noqa: enable=all .. _`pylint's "pylint" directive"`: http://pylint.pycqa.org/en/latest/user_guide/message-control.html .. _sqlfluffignore: Ignoring types of errors ^^^^^^^^^^^^^^^^^^^^^^^^ General *categories* of errors can be ignored using the ``--ignore`` command line option or the ``ignore`` setting in ``.sqlfluffignore``. Types of errors that can be ignored include: * ``lexing`` * ``linting`` * ``parsing`` * ``templating`` .sqlfluffignore ^^^^^^^^^^^^^^^ Similar to `Git's`_ :code:`.gitignore` and `Docker's`_ :code:`.dockerignore`, SQLFluff supports a :code:`.sqlfluffignore` file to control which files are and aren't linted. Under the hood we use the python `pathspec library`_ which also has a brief tutorial in their documentation. An example of a potential :code:`.sqlfluffignore` placed in the root of your project would be: .. code-block:: cfg # Comments start with a hash. # Ignore anything in the "temp" path /temp/ # Ignore anything called "testing.sql" testing.sql # Ignore any ".tsql" files *.tsql Ignore files can also be placed in subdirectories of a path which is being linted and the sub files will also be applied within that subdirectory. .. _`Git's`: https://git-scm.com/docs/gitignore#_pattern_format .. _`Docker's`: https://docs.docker.com/engine/reference/builder/#dockerignore-file .. _`pathspec library`: https://python-path-specification.readthedocs.io/ .. _defaultconfig: Default Configuration --------------------- The default configuration is as follows, note the `Builtin Jinja Macro Blocks`_ in section *[sqlfluff:templater:jinja:macros]* as referred to above. .. note:: This shows the *entire* default config. **We do not recommend that users** **copy this whole config as the starter config file for their project**. This is for two reasons: #. The config file should act as a form of *documentation* for your team. A record of what decisions you've made which govern how your format your sql. By having a more concise config file, and only defining config settings where they differ from the defaults - you are more clearly stating to your team what choices you've made. #. As the project evolves, the structure of the config file may change and we will attempt to make changes as backward compatible as possible. If you have not overridden a config setting in your project, we can easily update the default config to match your expected behaviour over time. We may also find issues with the default config which we can also fix in the background. *However*, the longer your local config file, the more work it will be to update and migrate your config file between major versions. If you are starting a fresh project and are looking for a good *starter config*, check out the :ref:`starter_config` section above. .. literalinclude:: ../../src/sqlfluff/core/default_config.cfg :language: cfg sqlfluff-2.3.5/docs/source/developingplugins.rst000066400000000000000000000077001451700765000220740ustar00rootroot00000000000000.. _developingpluginsref: Developing Plugins ================== *SQLFluff* is extensible through "plugins". We use the `pluggy library`_ to make linting Rules pluggable, which enable users to implement rules that are just too "organization specific" to be shared, or too platform specific to be included in the core library. .. note:: We recommend that the module in a plugin which defines all of the hook implementations (anything using the ``@hookimpl`` decorator) must be able to fully import before any rule implementations are imported. More specifically, SQLFluff must be able to both *import* **and** *run* any implementations of ``get_configs_info()`` before any plugin rules (i.e. any derivatives of :py:class:`BaseRule `) are *imported*. Because of this, we recommend that rules are defined in a separate module to the root of the plugin and then only imported within the ``get_rules()`` method. Importing in the main body of the module was previously our recommendation and so may be the case for versions of some plugins. If one of your plugins does use imports in this way, a warning will be presented, recommending that you update your plugin. .. code-block:: python :emphasize-lines: 7,8 # The root module will need to import `hookimpl`, but # should not yet import the rule definitions for the plugin. from sqlfluff.core.plugin import hookimpl @hookimpl def get_rules(): # Rules should be imported within the `get_rules` method instead from my_plugin.rules import MyRule return [MyRule] .. _`pluggy library`: https://pluggy.readthedocs.io/en/latest/ Creating a plugin ----------------- We have an example plugin in `sqlfluff/plugins/sqlfluff-plugin-example`_ which you can use as a template for rules, or the `sqlfluff/plugins/sqlfluff-templater-dbt`_ which you can use as a template for templater plugins. Few things to note about plugins: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Currently, only Rules and Templaters can be added through plugins. Over time we expect more elements of SQLFluff will be extensible with plugins. Each plugin can implement multiple Rules or Templaters. We recommend that the name of a plugin should start with *"sqlfluff-"* to be clear on the purpose of your plugin. A plugin may need to include a default configuration if its rules are configurable: use plugin default configurations **only for that reason**! We advise against overwriting core configurations by using a default plugin configuration, as there is no mechanism in place to enforce precedence between the core library configs and plugin configs, and multiple plugins could clash. A plugin Rule class name should have the structure: "Rule_PluginName_L000". The 'L' can be any letter and is meant to categorize rules; you could use the letter 'S' to denote rules that enforce security checks for example. An important thing to note when running custom implemented rules: Run ``pip install -e .``, inside the plugin folder so custom rules in linting are included. A plugin Rule code includes the PluginName, so a rule "Rule_L000" in core will have code "L000", while "Rule_PluginName_L000" will have code "PluginName_L000". Codes are used to display errors, they are also used as configuration keys. We make it easy for plugin developers to test their rules by exposing a testing library in *sqlfluff.utils.testing*. .. _`sqlfluff/plugins/sqlfluff-plugin-example`: https://github.com/sqlfluff/sqlfluff/tree/main/plugins/sqlfluff-plugin-example .. _`sqlfluff/plugins/sqlfluff-templater-dbt`: https://github.com/sqlfluff/sqlfluff/tree/main/plugins/sqlfluff-templater-dbt Giving feedback --------------- Would you like to have other parts of *SQLFluff* be "pluggable"? Tell us about it in a `GitHub issue`_ 😄. .. _`GitHub issue`: https://github.com/sqlfluff/sqlfluff/issues/new?assignees=&labels=enhancement&template=enhancement.md sqlfluff-2.3.5/docs/source/developingrules.rst000066400000000000000000000075001451700765000215430ustar00rootroot00000000000000.. _developingrulesref: Developing Rules ================ `Rules` in `SQLFluff` are implemented as classes inheriting from ``BaseRule``. SQLFluff crawls through the parse tree of a SQL file, calling the rule's ``_eval()`` function for each segment in the tree. For many rules, this allows the rule code to be really streamlined and only contain the logic for the rule itself, with all the other mechanics abstracted away. Traversal Options ----------------- ``recurse_into`` ^^^^^^^^^^^^^^^^ Some rules are a poor fit for the simple traversal pattern described above. Typical reasons include: * The rule only looks at a small portion of the file (e.g. the beginning or end). * The rule needs to traverse the parse tree in a non-standard way. These rules can override ``BaseRule``'s ``recurse_into`` field, setting it to ``False``. For these rules ``False``, ``_eval()`` is only called *once*, with the root segment of the tree. This can be much more efficient, especially on large files. For example, see rules ``LT13`` and ``LT12`` , which only look at the beginning or end of the file, respectively. ``_works_on_unparsable`` ^^^^^^^^^^^^^^^^^^^^^^^^ By default, `SQLFluff` calls ``_eval()`` for all segments, even "unparsable" segments, i.e. segments that didn't match the parsing rules in the dialect. This causes issues for some rules. If so, setting ``_works_on_unparsable`` to ``False`` tells SQLFluff not to call ``_eval()`` for unparsable segments and their descendants. Performance-related Options --------------------------- These are other fields on ``BaseRule``. Rules can override them. ``needs_raw_stack`` ^^^^^^^^^^^^^^^^^^^ ``needs_raw_stack`` defaults to ``False``. Some rules use ``RuleContext.raw_stack`` property to access earlier segments in the traversal. This can be useful, but it adds significant overhead to the linting process. For this reason, it is disabled by default. ``lint_phase`` ^^^^^^^^^^^^^^ There are two phases of rule running. 1. The ``main`` phase is appropriate for most rules. These rules are assumed to interact and potentially cause a cascade of fixes requiring multiple passes. These rules run the `runaway_limit` number of times (default 10). 2. The ``post`` phase is for post-processing rules, not expected to trigger any downstream rules, e.g. capitalization fixes. They are run in a post-processing loop at the end. This loop is identical to the ``main`` loop, but is only run 2 times at the end (once to fix, and once again to confirm no remaining issues). The two phases add complexity, but they also improve performance by allowing SQLFluff to run fewer rules during the ``main`` phase, which often runs several times. NOTE: ``post`` rules also run on the *first* pass of the ``main`` phase so that any issues they find will be presented in the list of issues output by ``sqlfluff fix`` and ``sqlfluff lint``. Base Rules ---------- `base_rules` Module ^^^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.core.rules.base :members: Functional API -------------- These newer modules provide a higher-level API for rules working with segments and slices. Rules that need to navigate or search the parse tree may benefit from using these. Eventually, the plan is for **all** rules to use these modules. As of December 30, 2021, 17+ rules use these modules. The modules listed below are submodules of `sqlfluff.utils.functional`. `segments` Module ^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.utils.functional.segments :members: `segment_predicates` Module ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.utils.functional.segment_predicates :members: `raw_file_slices` Module ^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.utils.functional.raw_file_slices :members: `raw_file_slice_predicates` Module ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: sqlfluff.utils.functional.raw_file_slice_predicates :members: sqlfluff-2.3.5/docs/source/dialects.rst000066400000000000000000000136611451700765000201310ustar00rootroot00000000000000.. _dialectref: Dialects Reference ================== SQLFluff is designed to be flexible in supporting a variety of dialects. Not all potential dialects are supported so far, but several have been implemented by the community. Below are a list of the currently available dialects. Each inherits from another, up to the root `ansi` dialect. For a canonical list of supported dialects, run the :program:`sqlfluff dialects` command, which will output a list of the current dialects available on your installation of SQLFluff. .. note:: For technical users looking to add new dialects or add new features to existing ones, the dependent nature of how dialects have been implemented is to try and reduce the amount of repetition in how different elements are defined. As an example, when we say that the :ref:`redshift_dialect_ref` dialect *inherits* from the :ref:`postgres_dialect_ref` dialect this is not because there is an agreement between those projects which means that features in one must end up in the other, but that the design of the :ref:`redshift_dialect_ref` dialect was heavily *inspired* by the postgres dialect and therefore when defining the dialect within sqlfuff it makes sense to use :ref:`postgres_dialect_ref` as a starting point rather than starting from scratch. Consider when adding new features to a dialect: - Should I be adding it just to this dialect, or adding it to a *parent* dialect? - If I'm creating a new dialect, which dialect would be best to inherit from? - Will the feature I'm adding break any *downstream* dependencies within dialects which inherit from this one? .. _ansi_dialect_ref: ANSI ---- This is the base dialect which holds most of the definitions of common SQL commands and structures. If the dialect which you're actually using isn't specifically implemented by SQLFluff, using this dialect is a good place to start. This dialect doesn't intend to be brutal in adhering to (and only to) the ANSI SQL spec *(mostly because ANSI charges for access to that spec)*. It aims to be a representation of vanilla SQL before any other project adds their spin to it, and so may contain a slightly wider set of functions than actually available in true ANSI SQL. .. _athena_dialect_ref: Athena -------- The dialect for `Amazon Athena`_. .. _`Amazon Athena`: https://aws.amazon.com/athena/ .. _bigquery_dialect_ref: BigQuery -------- The dialect for `Google BigQuery`_. .. _`Google BigQuery`: https://cloud.google.com/bigquery/ .. _clickhouse_dialect_ref: ClickHouse ---------- The dialect for `ClickHouse`_. .. _`ClickHouse`: https://clickhouse.com/ .. _databricks_dialect_ref: Databricks ---------- The dialect `Databricks`_. .. _`Databricks`: https://databricks.com/ .. _db2_dialect_ref: Db2 ------ The dialect for `Db2`_. .. _`Db2`: https://www.ibm.com/analytics/db2 .. _duck_dialect_ref: DuckDB ------ The dialect for `DuckDB`_. .. _`DuckDB`: https://duckdb.org/ .. _exasol_dialect_ref: Exasol ------ The dialect for `Exasol`_. .. _`Exasol`: https://www.exasol.com/ .. _hive_dialect_ref: Greenplum --------- The dialect for `Greenplum`_. .. _`Greenplum`: https://www.greenplum.org/ .. _greens_dialect_ref: Hive ---- The dialect for `Hive`_. .. _`Hive`: https://hive.apache.org/ .. _materialize_dialect_ref: Materialize ----------- The dialect for `Materialize`_. .. _`Materialize`: https://materialize.com/ .. _mysql_dialect_ref: MySQL ----- The dialect for `MySQL`_. .. _`MySQL`: https://www.mysql.com/ .. _oracle_dialect_ref: Oracle ------ The dialect for `Oracle`_ SQL. Note: this does not include PL/SQL. .. _`Oracle`: https://www.oracle.com/database/technologies/appdev/sql.html .. _postgres_dialect_ref: PostgreSQL ---------- This is based around the `PostgreSQL spec`_. Many other SQL instances are often based on PostreSQL syntax. If you're running an unsupported dialect, then this is often the dialect to use (until someone makes a specific dialect). .. _`PostgreSQL spec`: https://www.postgresql.org/docs/9.6/reference.html .. _redshift_dialect_ref: Redshift ---------- The dialect for `Amazon Redshift`_. .. _`Amazon Redshift`: https://aws.amazon.com/redshift/ .. _snowflake_dialect_ref: Snowflake --------- The dialect for `Snowflake`_, which has much of its syntax inherited from :ref:`postgres_dialect_ref`. .. _`Snowflake`: https://docs.snowflake.com/en/sql-reference.html .. _soql_dialect_ref: SOQL ---- The dialect for `SOQL`_ (Salesforce Object Query Language). .. _`SOQL`: https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql.htm .. _sparksql_dialect_ref: SparkSQL -------- The dialect for Apache `Spark SQL`_. It inherits from :ref:`ansi_dialect_ref` and includes relevant syntax from :ref:`hive_dialect_ref` for commands that permit Hive Format. Spark SQL extensions provided by the `Delta Lake`_ project are also implemented in this dialect. This implementation focuses on the `Ansi Compliant Mode`_ introduced in Spark3, instead of being Hive Compliant. The introduction of ANSI Compliance provides better data quality and easier migration from traditional DBMS. Versions of Spark prior to 3.x will only support the Hive dialect. .. _`Spark SQL`: https://spark.apache.org/docs/latest/sql-ref.html .. _`Delta Lake`: https://docs.delta.io/latest/quick-start.html#set-up-apache-spark-with-delta-lake .. _`Ansi Compliant Mode`: https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html .. _sqlite_dialect_ref: SQLite ------ The dialect for `SQLite`_. .. _`SQLite`: https://www.sqlite.org/ .. _tsql_dialect_ref: T-SQL ----- The dialect for `T-SQL`_ (aka Transact-SQL). .. _`T-SQL`: https://docs.microsoft.com/en-us/sql/t-sql/language-reference .. _teradata_dialect_ref: Teradata -------- The dialect for `Teradata`_. .. _`Teradata`: https://www.teradata.co.uk/ .. _trino_dialect_ref: Trino -------- The dialect for `Trino`_. .. _`Trino`: https://trino.io/docs/current/ sqlfluff-2.3.5/docs/source/favicon-fluff.png000066400000000000000000000334761451700765000210500ustar00rootroot00000000000000PNG  IHDR@@iqzTXtRaw profile type exifxڭkr8p>@o[ή쬤VꑕY?xTjn9-6h?? K^Ư_yS0ɞoFz_-s,c&WpЇ/5`Do3wWS\w~ߐKYEn?޳zX*_>[x.-K|_ooexs\sntm?>k5䂨nZWkZ=Wyt;n/?uV+Y<.kd׏86dflb$[x_+&ىŸBrvrx䧻6 !l)?/-8" iSb%z )^)JC9KFJ,RJ-jZj[R˭\z硝[wGqGuOzSO?$guٗ[Ҋ+ʪvi]vm/^k75y,ʷx-$gxGNj<@@{.F/gw$Ex%9g:y |wߞG])s\K{=Wk[E elz U6='7snvtUx]Zէ&|֞+_{N֝t-.u'A@MҕF#n]\ЛUyyu_;Moj55<[7qڌQx}[we,puל,P1,"s%FZų!'rm+n@J.# N,Ê]zC'0vOV.\e f1?rM˱g={\tɷʳ?7%?kXk}sf: ְ3l;To5f;'p!r|js )8h1em&T!ӶfJsW_u Jl$Xxy`H'o#]y=v#@wS=@"0,%[XbWǣb87ϧgٝG+dn 85zpa!Q]*Y+#{XORZB(Fc'Hϫ1dl,67[b.VE`,۸`K+K'z>Tg_\ ؛!NN )[A~ \yࢂ^-;Sxܟ%+"`-&L~]e\KxOY Z3HkP31Gж 0@~M$e@R'zLmfX@zK>ci@ 0G]O.W" ٦ ` BLkQz끂Xx砲rxǡ'lwmBZqցJ!iNI%v*# d lM$GP9:5<>)5Vxn|GV Gu5t}FVgNA"m Yof=cm77.\JVInE `̶)uM 'e:ͮQU 5?XB!m&U!B-L4g<{CrXF.a߄50yI&;mSqA@ jn:/`.nEƓJ-FWU (@ST \D8B PKC1&oy%(d?hn.t0DQdȝN!HP?OvF>RIPТO -j2#R7N6r "G~AQY)TEƓ^KHc2!יLbZ kbٺϣ%`M".XN["rɎ@ا #wp[)Xh$ `A 6wECB7zzX cڭj/VٲJz\nj F[$uzd"e;?ѓK~|o2aL,5ŠKA>eBBgEsdz"nŦiy#{W`On{]KʠZ ՆIZbeG U*>(x$CH#عhljnC.Oj׎btCAt*u>CiM6nQH#/k;zfORĊLF6 hVh/G=WeB}x 0~#⒕KnSM]N`Á&̜BTXRZWGt8,~J+56xҭ S=6笄:{_kNC/ %}ODAL.]EG%; jHL-wMP\^y3$(DS"xYXRxRر(n@}J61[|ˈE]-o@ʵ -M.OU.<4I\<] ;R0}ܿ_t4%,'/+kW1)o^e㎲wϕ i46</}ǘn U{B}7Vn8a}ک]jBMqjBƷIxL/*'9*i>Ѹb{{:qRCLO:u\T~֖T]R=H[JuM$kB5^FXY7hC o$SEqY;hڟ f"Uy$X5ڐh((eHO)iųH lI#HЂ̔q i')~&Jр !jZڶ&SPVG j}_])k,AeyL.cEI!{$dǵD.x`vY(41پoXi1kh1x sea|Xv2{+DX!\BgOW5JeD6w=/pX*$4+~ښ>71b|]D֖KfS6v$`xN ,]B-\ <T3Wª&p"$hacJA ,B%("kV+׼G$v9k S}jȳ@*`_@;yǂғj\:X |[q+yvF}$vt+4kaWjsZWjp|>#04 ɆB|b@BҞ<$e2{]*hO]o}bZ[&ʲ"H 6\c nso]Sn1= G73lt>t5獬I DSLdoCzbJjޕDߡ_V(9&Z%hؗ\/n'/E>^P`D".k3jэi^4 m5ߴW)騊e*ubb{=؈-_FoC!JC=S3Q1Ecƽ(ƚ5#J)˪޾);~٣UB81(՗U.gzRE֝PE0%lt>9NȗZ))ԫs>ur\zuQrrJOɻۀ&叴j9!WQNNPFnymxdf;eN#o .1sGgII` CEKΉr }l\ˮ[l}x$ d(,rQٳua`w_zWɚ"X:a~ۧZЉn~αc(S(7F yR* }k8ҩ:t S?w~~~mDgQ;}r:^1>zSO&`8V|QL,/z{դHǴ⟵J;k܏Hґ<-˲}º.ֵW J$)vJjIB.:2xKU{hTE7| h&Ϛ"") t_H6r6cc3 Zyx/JtԺ*&jÚwj>Ө὿4 Y6੠) x>H M |'NOy^KZ O\՝lȾxu bJ&~z+c\ȱ}9AQח=YTġC2\ ?@-b YrNsAv<ůQDJgS7]6cf~ɦXz '#6)Xj[irNԷ+NҎ!"sӃT Uu+ DoJ'EӁ/G3:t9~S6hjI㚹 -~wj¶ϐU 2:/J)) ] ̏@oC:74y Pܭ,MhVrEQ$x7*Ɔ%~)v5YnIk&mLLb;ிil^AE\:=2|IꋈͫM=iZ&vL(iZic8i 4urgukYS_FH3mW\Q/U;Dt0+No[,;c` ?Z OEu+4H65d)݋L5l#DY'pƙ4؆C\t2f!I26!4dm:uZz)#uS; fu*0ĘQ>>22[IT ,;`l'I%[%"k:}{tJpޞw/k(RCM:$8jX!ZUB|:G:-6Ӻq>!SF:n!RH5fT#KAPviJfU1ј騂kQ0&;\驡dH/ξ_΍7LV"ޗkyGPEۆQp,C:Б׏Dk "Juh/BTyf!rJ8ZG:u(;)5nRЩ-)#ҁf|i8C3n$by9x+6 r:pAl9lHf~*~?A/^6 n tiǽ8yPsd͆ % gh;;>f!厢)C~CzwW';z0#ib~\1Ql>N@ }~7[^!=woX*=*S> sqsz(` ;z` # >3χbrVIIuN#_=3A2}|~:8y(8}+i\vw8_aE RoŶ.֤3\j!K`x@Ƕ?S:D jݟq;UWcUAslz"汝CHyIlyk>rxI!+iqOSs|o[;H!!Xl;M9I2ӝaZz$!kq}g\&c}DkoHtkN(ecgZ|{fCjIm G.#]t٦n5=R1s#ǂy\.옾kLzfzOњYlfkL*O9zbK%xLubKGD pHYs@V=tIME  kI^IDATx{ktTekrP FsuhΙլF3ts5=ڞݞ*v{Q0hB=J*{TNt8k'^K 'X`Y6"N !:G𺮏b(%Bs>e7nܘG 80 !  ~i5]S7o.{0M/fz{ggg9 !SRR]ǯ{3[ X0V0 M12 cY$"h4 BxQ ˲!1cipeńƙL&b0 _8А5JiIjj 2fD"v躎p#& %۷o1-[Viٲu]ߒ:NNUՇ%I`08\.Sg$IRW] DQX,TVV"`x # jعs'N'V\\+VY|^ojEEeʕ<0fVTT;w%I2,ʂqVvW8g۶m۷ ]v_|bn7F#V+n]hŗ_~DQ$ V|zii)J)RHuaHRR{f(D"q4M_ lt\O>dz^^h6aۙ:N!??ɈBDkk+*N%Ǐ+,7`Ϝ9=z 䤱)˾=yyy&٣GƬV+cf!A[[[uuu}---PJ @nUU )))$-U#eYl6$''###cYqX,$kCCCW9ruÆ |Y:uJooox8hUUW^EQOQJ VU5 b&qyUX dONNxjKL4Mɓ'P蹃RQ` PU`XRu]_fZA㐲PSSdtuu!E"C蚦MRJx<>(JqkF5Mc,躮RJY8==]_z0a'khh-0Sj<3sftvvbݺuLvvB <,] NNN6fdds{^<ݍ"XbCZZk׮AUEvgϪ~ᄪ]Z㪮uwwDV GFF"yyy{w&󳷴 Rt4pr c,{`0HKK-ˬ4h4 i Fhkkի6J)~+??~\.pBMM ˗<Or<x#<ݹs))';pAAq:tEvAEaZb !d F#S\\} 6Hom6QPP͆ӧOeY8NBʲ}Ǒu։ׯ粲Hyyrǽ[wt]'  @@,,,ԋ/'O=iZbknnjkk=!--ܬ7 (((@WWEUVql63a\.bXȊ+I7xD>Alo۷۸0<<{Nu󽯪Т‚ V5Do+--]\z5i`0 È(yC0r2 [VTTT>eYneqFan/++KZv-x/0^D~~>sbt]ͨ q999|vvŶ0--]xDpGoo455aIbţ>jtL:t]ގ{Ay_w*WWWÇIoo/كZ455Aul֭[q1#"%%_~%ݫFFF~g[kIII=׮]|Gq8*˲R޽{ݧOv7ǏiZ,˟9H$"lLIz6rTU_bʾ~ \w˖-FՊ_`xDVV?tBvTWW8&%%`އeff˗/ѣyyyl={[o|h|nwZZZZVVNEUU166h=P4 @<oe0i2ta0 3#(((@8III0oW\0@ 0!00^/x<|ɐ,yRc,S!ܪ]_[,s~:|BfVf&1(~s3:<(oh%OIIӴ{֯_x <ͭZ*űX‚ "1hիWHOO|>_+),4IW_}u… N`Y6N9+‡PJ? 7i4MbT4-ܴirf] !Ν+Sew |,hBRљߏ̒lV |E-!fSJR]]]{Ъ:^J5M{ TLNN P]~Ң(r]]]E&`TӴU__C-˗]b4kt$43^Ͳlb$nTE} }wϱ, `h)wOsh4J",ٺ߽> !#xOSݭSJ?/Xnmk֬]r%&aZIFFa3X"*M)4 WUU]jhh$IBWW FxtF޴8WGG?B)))zQ\pA'( (}>PJ/,˖- Lo14lڴ &Y^\(둢X,Ji/npEΒybX ܋jkM8DIcc=p8O>DommY_Yz'fff+˗ ؜C)wD7]t M&S>hd2vn>kvV˵d?w(++3[6s  a,zjn{KKK-3Q"0SA8{e)w/IF <ӎYX,Q !Rx4??E)Ecc#TUŋӝdزQ .b[]]]{ァG WTTOqa2`9Ky~lK$IYSeYF$2s:#73 ,ˢy) P|+sϴ iiid2~GJOO?'S]׿[<㹹{#`6ͳHCCC~B山NY]vh0%bSÖY+Vx\2FAgg^^^ΆB;v%}f8/dժUlnnP(j^{իW3V%33sMhpRj!dggw78q||DQiڵDUUB̉r}YY1L 04qE>/B~](,k3K 788̦S](SZZj BTQ 񹢢׎OO׮]ӏ92 CwP($L3srrr.--YzR&ER?KA2~͎vv'oe]1N)rHQTTey睜b~555o\XJΖ}ݒu֙YN>ej xF<๋/Fg2?ш]]]tR'|3uSN.++sηtTUEcccv޽SYGn< OLL999dMD{M(7Z LIItff^$(BA 4##DQN?U$IL<?^{FPg]]]$Ib80|>ɓ';dYK=SÓ7xl6Fvߞz+njj ^zFbp$I۪,7_m7kP,MIJ*Bj;#oUZl4/ObAAk555]vp4$UU G{zz~x3F" h3~?B= bdbID"aUUu[4%>bZY( BB`6MӾžʲlv, RwEg=IENDB`sqlfluff-2.3.5/docs/source/gettingstarted.rst000066400000000000000000000261341451700765000213700ustar00rootroot00000000000000.. _gettingstartedref: Getting Started =============== To get started with *SQLFluff* you'll need python and pip installed on your machine, if you're already set up, you can skip straight to `Installing sqlfluff`_. Installing Python ----------------- How to install *python* and *pip* depends on what operating system you're using. In any case, the python wiki provides up to date `instructions for all platforms here`_. There's a chance that you'll be offered the choice between python versions. Support for python 2 was dropped in early 2020, so you should always opt for a version number starting with a 3. As for more specific options beyond that, *SQLFluff* aims to be compatible with all current python versions, and so it's best to pick the most recent. You can confirm that python is working as expected by heading to your terminal or console of choice and typing :code:`python --version` which should give you a sensible read out and not an error. .. code-block:: text $ python --version Python 3.9.1 For most people, their installation of python will come with :code:`pip` (the python package manager) preinstalled. To confirm this you can type :code:`pip --version` similar to python above. .. code-block:: text $ pip --version pip 21.3.1 from ... If however, you do have python installed but not :code:`pip`, then the best instructions for what to do next are `on the python website`_. .. _`instructions for all platforms here`: https://wiki.python.org/moin/BeginnersGuide/Download .. _`on the python website`: https://pip.pypa.io/en/stable/installation/ Installing SQLFluff ------------------- Assuming that python and pip are already installed, then installing *SQLFluff* is straight forward. .. code-block:: text $ pip install sqlfluff You can confirm its installation by getting *SQLFluff* to show its version number. .. code-block:: text $ sqlfluff version 2.3.5 Basic Usage ----------- To get a feel for how to use *SQLFluff* it helps to have a small :code:`.sql` file which has a simple structure and some known issues for testing. Create a file called :code:`test.sql` in the same folder that you're currently in with the following content: .. code-block:: sql SELECT a+b AS foo, c AS bar from my_table You can then run :code:`sqlfluff lint test.sql --dialect ansi` to lint this file. .. code-block:: text $ sqlfluff lint test.sql --dialect ansi == [test.sql] FAIL L: 1 | P: 1 | LT09 | Select targets should be on a new line unless there is | only one select target. | [layout.select_targets] L: 1 | P: 1 | ST06 | Select wildcards then simple targets before calculations | and aggregates. [structure.column_order] L: 1 | P: 7 | LT02 | Expected line break and indent of 4 spaces before 'a'. | [layout.indent] L: 1 | P: 9 | LT01 | Expected single whitespace between naked identifier and | binary operator '+'. [layout.spacing] L: 1 | P: 10 | LT01 | Expected single whitespace between binary operator '+' | and naked identifier. [layout.spacing] L: 1 | P: 11 | LT01 | Expected only single space before 'AS' keyword. Found ' | '. [layout.spacing] L: 2 | P: 1 | LT02 | Expected indent of 4 spaces. | [layout.indent] L: 2 | P: 9 | LT02 | Expected line break and no indent before 'from'. | [layout.indent] L: 2 | P: 10 | CP01 | Keywords must be consistently upper case. | [capitalisation.keywords] All Finished 📜 🎉! You'll see that *SQLFluff* has failed the linting check for this file. On each of the following lines you can see each of the problems it has found, with some information about the location and what kind of problem there is. One of the errors has been found on *line 1*, *position * (as shown by :code:`L: 1 | P: 9`) and it's a problem with rule *LT01* (for a full list of rules, see :ref:`ruleref`). From this (and the following error) we can see that the problem is that there is no space either side of the :code:`+` symbol in :code:`a+b`. Head into the file, and correct this issue so that the file now looks like this: .. code-block:: sql SELECT a + b AS foo, c AS bar from my_table Rerun the same command as before, and you'll see that the original error (violation of *LT01*) no longer shows up. .. code-block:: text $ sqlfluff lint test.sql --dialect ansi == [test.sql] FAIL L: 1 | P: 1 | LT09 | Select targets should be on a new line unless there is | only one select target. | [layout.select_targets] L: 1 | P: 1 | ST06 | Select wildcards then simple targets before calculations | and aggregates. [structure.column_order] L: 1 | P: 7 | LT02 | Expected line break and indent of 4 spaces before 'a'. | [layout.indent] L: 1 | P: 13 | LT01 | Expected only single space before 'AS' keyword. Found ' | '. [layout.spacing] L: 2 | P: 1 | LT02 | Expected indent of 4 spaces. | [layout.indent] L: 2 | P: 9 | LT02 | Expected line break and no indent before 'from'. | [layout.indent] L: 2 | P: 10 | CP01 | Keywords must be consistently upper case. | [capitalisation.keywords] To fix the remaining issues, we're going to use one of the more advanced features of *SQLFluff*, which is the *fix* command. This allows more automated fixing of some errors, to save you time in sorting out your sql files. Not all rules can be fixed in this way and there may be some situations where a fix may not be able to be applied because of the context of the query, but in many simple cases it's a good place to start. For now, we only want to fix the following rules: *LT02*, *LT12*, *CP01* .. code-block:: text $ sqlfluff fix test.sql --rules LT02,LT12,CP01 --dialect ansi ==== finding violations ==== == [test.sql] FAIL L: 1 | P: 7 | LT02 | Expected line break and indent of 4 spaces before 'a'. | [layout.indent] L: 2 | P: 1 | LT02 | Expected indent of 4 spaces. | [layout.indent] L: 2 | P: 9 | LT02 | Expected line break and no indent before 'FROM'. | [layout.indent] L: 2 | P: 10 | CP01 | Keywords must be consistently upper case. | [capitalisation.keywords] ==== fixing violations ==== 4 fixable linting violations found Are you sure you wish to attempt to fix these? [Y/n] ...at this point you'll have to confirm that you want to make the changes by pressing :code:`y` on your keyboard... .. code-block:: text Are you sure you wish to attempt to fix these? [Y/n] ... Attempting fixes... Persisting Changes... == [test.sql] PASS Done. Please check your files to confirm. If we now open up :code:`test.sql`, we'll see the content is now different. .. code-block:: sql SELECT a + b AS foo, c AS bar FROM my_table In particular: * The two columns have been indented to reflect being inside the :code:`SELECT` statement. * The :code:`FROM` keyword has been capitalised to match the other keywords. We could also fix *all* of the fixable errors by not specifying :code:`--rules`. .. code-block:: text $ sqlfluff fix test.sql --dialect ansi ==== finding violations ==== == [test.sql] FAIL L: 1 | P: 1 | ST06 | Select wildcards then simple targets before calculations | and aggregates. [structure.column_order] L: 2 | P: 10 | LT01 | Expected only single space before 'AS' keyword. Found ' | '. [layout.spacing] ==== fixing violations ==== 2 fixable linting violations found Are you sure you wish to attempt to fix these? [Y/n] ... Attempting fixes... Persisting Changes... == [test.sql] PASS Done. Please check your files to confirm. If we now open up :code:`test.sql`, we'll see the content has been updated again. .. code-block:: sql SELECT c AS bar, a + b AS foo FROM my_table The SQL statement is now well formatted according to all the rules defined in SQLFluff. The :code:`--rules` argument is optional, and could be useful when you or your organisation follows a slightly different convention than what we have defined. Custom Usage ------------ So far we've covered the stock settings of *SQLFluff*, but there are many different ways that people style their sql, and if you or your organisation have different conventions, then many of these behaviours can be configured. For example, given the example above, what if we actually think that indents should only be two spaces, and rather than uppercase keywords, they should all be lowercase? To achieve this we create a configuration file named :code:`.sqlfluff` and place it in the same directory as the current file. In that file put the following content: .. code-block:: cfg [sqlfluff] dialect = ansi [sqlfluff:indentation] tab_space_size = 2 [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower Then rerun the same command as before. .. code-block:: text $ sqlfluff fix test.sql --rules LT02,LT12,CP01,ST06,LT09,LT01 Then examine the file again, and you'll notice that the file has been fixed accordingly. .. code-block:: sql select c as bar, a + b as foo from my_table For a full list of configuration options check out :ref:`defaultconfig`. Note that in our example here we've only set a few configuration values and any other configuration settings remain as per the default config. To see how these options apply to specific rules check out the "Configuration" section within each rule's documentation in :ref:`ruleref`. Going further ------------- From here, there are several more things to explore. * To understand how *SQLFluff* is interpreting your file explore the :code:`parse` command. You can learn more about that command and more by running :code:`sqlfluff --help` or :code:`sqlfluff parse --help`. * To start linting more than just one file at a time, experiment with passing SQLFluff directories rather than just single files. Try running :code:`sqlfluff lint .` (to lint every sql file in the current folder) or :code:`sqlfluff lint path/to/my/sqlfiles`. * To find out more about which rules are available, see :ref:`ruleref`. * To find out more about configuring *SQLFluff* and what other options are available, see :ref:`config`. One last thing to note is that *SQLFluff* is a relatively new project and you may find bugs or strange things while using it. If you do find anything, the most useful thing you can do is to `post the issue on GitHub`_ where the maintainers of the project can work out what to do with it. The project is in active development and so updates and fixes may come out regularly. .. _`post the issue on GitHub`: https://github.com/sqlfluff/sqlfluff/issues sqlfluff-2.3.5/docs/source/index.rst000066400000000000000000000051131451700765000174410ustar00rootroot00000000000000📜 The SQL Linter for Humans ============================ Bored of not having a good SQL linter that works with whichever dialect you're working with? Fluff is an extensible and modular linter designed to help you write good SQL and catch errors and bad SQL before it hits your database. Notable releases: * **1.0.x**: First *stable* release, no major changes to take advantage of a point of relative stability. * **2.0.x**: Recode of rules, whitespace fixing consolidation, :code:`sqlfluff format` and removal of support for dbt versions pre `1.1`. Note, that this release brings with it some breaking changes to rule coding and configuration, see :ref:`upgrading_2_0`. For more detail on other releases, see our :ref:`releasenotes`. Want to see where and how people are using SQLFluff in their projects? Head over to :ref:`inthewildref` for inspiration. Getting Started ^^^^^^^^^^^^^^^ To get started just install the package, make a sql file and then run SQLFluff and point it at the file. For more details or if you don't have python or pip already installed see :ref:`gettingstartedref`. .. code-block:: text $ pip install sqlfluff $ echo " SELECT a + b FROM tbl; " > test.sql $ sqlfluff lint test.sql --dialect ansi == [test.sql] FAIL L: 1 | P: 1 | LT01 | Expected only single space before 'SELECT' keyword. | Found ' '. [layout.spacing] L: 1 | P: 1 | LT02 | First line should not be indented. | [layout.indent] L: 1 | P: 1 | LT13 | Files must not begin with newlines or whitespace. | [layout.start_of_file] L: 1 | P: 11 | LT01 | Expected only single space before binary operator '+'. | Found ' '. [layout.spacing] L: 1 | P: 14 | LT01 | Expected only single space before naked identifier. | Found ' '. [layout.spacing] L: 1 | P: 27 | LT01 | Unnecessary trailing whitespace at end of file. | [layout.spacing] L: 1 | P: 27 | LT12 | Files must end with a single trailing newline. | [layout.end_of_file] All Finished 📜 🎉! Contents ^^^^^^^^ .. toctree:: :maxdepth: 3 :caption: Documentation for SQLFluff: gettingstarted realworld vision teamrollout layout rules dialects production configuration cli api releasenotes internals developingrules developingplugins inthewild jointhecommunity Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` sqlfluff-2.3.5/docs/source/internals.rst000066400000000000000000000167011451700765000203360ustar00rootroot00000000000000Internals ========= It is recommended that the following is read in conjunction with exploring the codebase. `dialect_ansi.py` in particular is helpful to understand the recursive structure of segments and grammars. Some more detail is also given on our Wiki_ including a `Contributing Dialect Changes`_ guide. .. _Wiki: https://github.com/sqlfluff/sqlfluff/wiki/ .. _`Contributing Dialect Changes`: https://github.com/sqlfluff/sqlfluff/wiki/Contributing-Dialect-Changes Architecture ------------ At a high level, the behaviour of SQLFluff is divided into a few key stages. Whether calling `sqlfluff lint`, `sqlfluff fix` or `sqlfluff parse`, the internal flow is largely the same. .. _templater: Stage 1, the templater ^^^^^^^^^^^^^^^^^^^^^^ This stage only applies to templated SQL, most commonly Jinja and dbt. Vanilla SQL is sent straight to stage 2, the lexer. In order to lint templated SQL, SQLFluff must first convert the 'raw' or pre-templated code into valid SQL, which can then be parsed. The templater returns both the raw and post-templated SQL so that any rule violations which occur in templated sections can be ignored and the rest mapped to their original line location for user feedback. .. _Jinja: https://jinja.palletsprojects.com/ .. _dbt: https://docs.getdbt.com/ *SQLFluff* supports two templating engines: Jinja_ and dbt_. Under the hood dbt also uses Jinja, but in *SQLFluff* uses a separate mechanism which interfaces directly with the dbt python package. For more details on how to configure the templater see :ref:`templateconfig`. Stage 2, the lexer ^^^^^^^^^^^^^^^^^^ The lexer takes SQL and separates it into segments of whitespace and code. Where we can impart some high level meaning to segments, we do, but the result of this operation is still a flat sequence of typed segments (all subclasses of :code:`RawSegment`). Stage 3, the parser ^^^^^^^^^^^^^^^^^^^ The parser is arguably the most complicated element of SQLFluff, and is relied on by all the other elements of the tool to do most of the heavy lifting. #. The lexed segments are parsed using the specified dialect's grammars. In SQLFluff, grammars describe the shape of SQL statements (or their components). The parser attempts to apply each potential grammar to the lexed segments until all the segments have been matched. #. In SQLFluff, segments form a tree-like structure. The top-level segment is a :code:`FileSegment`, which contains zero or more :code:`StatementSegment`\ s, and so on. Before the segments have been parsed and named according to their type, they are 'raw', meaning they have no classification other than their literal value. #. A segment's :code:`.match()` method uses the :code:`match_grammar`, on which :code:`.match()` is called. SQLFluff parses in a single pass through the file, so segments will recursively match the file based on their respective grammars. In the example of a :code:`FileSegment`, it first divides up the query into statements, and then the :code:`.match()` method of those segments works out the structure within them. * *Segments* must implement a :code:`match_grammar`. When :code:`.match()` is called on a segment, this is the grammar which is used to decide whether there is a match. * *Grammars* combine *segments* or other *grammars* together in a pre-defined way. For example the :code:`OneOf` grammar will match if any one of its child elements match. #. During the recursion, the parser eventually reaches segments which have no children (raw segments containing a single token), and so the recursion naturally finishes. #. If no match is found for a segment, the contents will be wrapped in an :code:`UnparsableSegment` which is picked up as a *parsing* error later. This is usually facilitated by the :code:`ParseMode` on some grammars which can be set to :code:`GREEDY`, allowing the grammar to capture additional segments as unparsable. As an example, bracketed sections are often configured to capture anything unexpected as unparsable rather than simply failing to match if there is more than expected (which would be the default, :code:`STRICT`, behaviour). #. The result of the :code:`.match()` method is a :code:`MatchResult` which contains the instructions on how to turn the flat sequence of raw segments into a nested tree of segments. Calling :code:`.apply()` on this result at the end of the matching process is what finally creates the nested structure. When working on the parser there are a couple of design principles to keep in mind. - Grammars are contained in *dialects*, the root dialect being the *ansi* dialect. The ansi dialect is used to host logic common to all dialects, and so does not necessarily adhere to the formal ansi specification. Other SQL dialects inherit from the ansi dialect, replacing or patching any segments they need to. One reason for the *Ref* grammar is that it allows name resolution of grammar elements at runtime and so a *patched* grammar with some elements overridden can still rely on lower-level elements which haven't been redeclared within the dialect - All grammars and segments attempt to match as much as they can and will return partial matches where possible. It is up to the calling grammar or segment to decide whether a partial or complete match is required based on the context it is matching in. Stage 4, the linter ^^^^^^^^^^^^^^^^^^^ Given the complete parse tree, rule classes check for linting errors by traversing the tree, looking for segments and patterns of concern. If the rule discovers a violation, it returns a :code:`LintResult` pointing to the segment which caused the violation. Some rules are able to *fix* the problems they find. If this is the case, the rule will return a list of fixes, which describe changes to be made to the tree. This can include edits, inserts, or deletions. Once the fixes have been applied, the updated tree is written to the original file. .. _reflowinternals: Reflow Internals ---------------- Many rules supported by SQLFluff involve the spacing and layout of different elements, either to enforce a particular layout or just to add or remove code elements in a way sensitive to the existing layout configuration. The way this is achieved is through some centralised utilities in the `sqlfluff.utils.reflow` module. This module aims to achieve several things: * Less code duplication by implementing reflow logic in only one place. * Provide a streamlined interface for rules to easily utilise reflow logic. * Given this requirement, it's important that reflow utilities work within the existing framework for applying fixes to potentially templated code. We achieve this by returning `LintFix` objects which can then be returned by each rule wanting to use this logic. * Provide a consistent way of *configuring* layout requirements. For more details on configuration see :ref:`layoutconfig`. To support this, the module provides a :code:`ReflowSequence` class which allows access to all of the relevant operations which can be used to reformat sections of code, or even a whole file. Unless there is a very good reason, all rules should use this same approach to ensure consistent treatment of layout. .. autoclass:: sqlfluff.utils.reflow.ReflowSequence :members: .. autoclass:: sqlfluff.utils.reflow.elements.ReflowPoint :members: :inherited-members: .. autoclass:: sqlfluff.utils.reflow.elements.ReflowBlock :members: :inherited-members: sqlfluff-2.3.5/docs/source/inthewild.rst000066400000000000000000000116771451700765000203350ustar00rootroot00000000000000.. _inthewildref: SQLFluff in the Wild ==================== Want to find other people who are using SQLFluff in production use cases? Want to brag about how you're using it? Just want to show solidarity with the project and provide a testimonial for it? Just add a section below by raising a PR on GitHub by `editing this file ✏️ `_. - SQLFluff in production `dbt `_ projects at `tails.com `_. We use the SQLFluff cli as part of our CI pipeline in `codeship `_ to enforce certain styles in our SQL codebase (with over 650 models) and keep code quality high. Contact `@alanmcruickshank `_. - `Netlify `_'s data team uses SQLFluff with `dbt `_ to keep code quality in more than 350 models (and growing). Previously, we had our SQL Guidelines defined in a site hosted with Netlify, and now we're enforcing these rules in our CI workflow thanks to SQLFluff. - `Drizly's `_ analytics team uses SQLFluff with `dbt `_ for over 700 models as part of our CI checks in GitHub. Before SQLFluff, we had SQL best practices outlined in a google doc and had to manually enforce through PR comments. We're now able to enforce much of our style guide automatically through SQLFluff. - `Petal's `_ data-eng team runs SQLFluff on our 100+ model `dbt `_ project. As a pre-commit hook and as a CI check, SQLFluff helps keep our SQL readable and consistent. - `Surfline `_'s Analytics Engineering team implemented SQLFluff as part of our continuous integration (CI) suite across our entire `dbt `_ project (700+ models). We implement the CI suite using `GitHub Actions and Workflows `_. The benefits of using SQLFluff at Surfline are: - The SQL in our dbt models is consistent and easily readable. - Our style guide is maintained as :code:`code`, not a README that is rarely updated. - Reduced burden on Analytics Engineers to remember every single style rule. - New Analytics Engineers can quickly see and learn what "good SQL" looks like at Surfline and start writing it from day 1. - The `HTTP Archive `_ uses SQLFluff to automatically check for quality and consistency of code submitted by the many contributors to this project. In particular our annual `Web Almanac `_ attracts hundreds of volunteers to help analyse our BigQuery dataset and being able automatically lint Pull Requests through GitHub Actions is a fantastic way to help us maintain our growing repository of `over a thousand queries `_. - `Brooklyn Data Co `_ has a `dbt_artifacts `_ dbt package from which runs SQLFluff in CI to lint pull requests automatically. It uses the `GitHub Actions workflow `_ contributed by Greg Clunies, with annotations on pull requests to make it easy for contributors to see where their SQL has failed any rules. See an `example pull request with SQLFluff annotations `_. - `Markerr `_ has tightly integrated SQLFluff into our CI/CD process for data model changes and process improvements. Since adopting SQLFluff across the organization, the clarity of our SQL code has risen dramatically, freeing up review time to focus on deeper data and process-specific questions. - `Symend `_ has a microservices platform supporting our SaaS product. We use SQLFLuff in the CI/CD process of several of our data-oriented microservices. Among other things, it validates our database migration scripts, deployed using `schemachange `_ and we have near-term plans to implement it for our `dbt`_ projects. - At `CarePay `_ we use SQLFLuff to lint and fix all our dbt models as well as several other SQL heavy projects. Locally we use SQLFluff with pre-commit and have also integrated it into our CI/CD pipelines. - Core Analytics Team from `Typeform `_ and `videoask `_ uses SQLFluff in the production `dbt `_ project for building our datawarehouse layer for both products: - We use it locally in our day to day work, helping us to write cleaner code. - We added SQLFluff to our CI processes, so during a PR we can check that any new or modified sql file has a consistent and easy-to-read format. sqlfluff-2.3.5/docs/source/jointhecommunity.rst000066400000000000000000000005511451700765000217400ustar00rootroot00000000000000.. _jointhecommunity: SQLFluff Slack ==================== We have a fast-growing `community on Slack `_, come and join us! SQLFluff on Twitter ==================== Follow us On Twitter `@SQLFluff `_ for announcements and other related posts. sqlfluff-2.3.5/docs/source/layout.rst000066400000000000000000000632631451700765000176610ustar00rootroot00000000000000.. _layoutref: Let's talk about whitespace =========================== If there is one part of building a linter that is going to be controversial it's going to be **whitespace** (closely followed by **cApiTaLiSaTiOn** 😁). More specifically, **whitespace** divides into three key themes: #. **Spacing**: The amount of whitespace between elements on the same line. #. **Line Breaks**: The choice of where within the code it is inappropriate, appropriate or even compulsory to have a line break. #. **Indentation**: Given a line break, how much whitespace should precede the first code element on that line. *SQLFluff* aims to be *opinionated* on this theme, but also *configurable* (see :ref:`layoutconfig`). The tool will have a default viewpoint and will aim to have views on all of the important aspects of SQL layout, but if you (or your organisation) don't like those views then we aim to allow enough configuration that you can lint in line with your views, and still use *SQLFluff*. For more information on how to configure rules to your own viewpoint see :ref:`config`. .. note:: This section of the docs handles the intent and reasoning behind how layout is handled by SQLFluff. For a deeper look at how this is achieved internally see :ref:`reflowinternals`. Spacing ------- Of the different elements of whitespace, spacing is likely the least controversial. By default, all elements are separated by a single space character. Except for very specific circumstances (see section on :ref:`alignedelements`), any additional space between elements is usually unwanted and a distraction for the reader. There are however several common cases where *no whitespace* is more appropriate, which fall into two cases (for more details on where to configure these see :ref:`layoutspacingconfig`). #. *No whitespace but a newline is allowed.* This option is configured using the :code:`touch` option in the :code:`spacing_*` configuration settings. The most common example of this is the spacing around commas. For example :code:`SELECT a , b` would be unusual and more normally be written :code:`SELECT a, b`. Inserting a newline between the :code:`a` and comma would not cause issues and may even be desired, for example: .. code-block:: sql SELECT col_a , col_b -- Newline present before column , col_c -- When inline, comma should still touch element before. , GREATEST(col_d, col_e) as col_f FROM tbl_a #. *No whitespace and a newline is not allowed.* This option is configured using the :code:`inline` option in the :code:`spacing_*` configuration settings. The most common example of this is spacing within the parts of qualified identifier e.g. :code:`my_schema.my_table`. If a newline were present between the :code:`.` and either :code:`my_schema` or :code:`my_table`, then the expression would not parse and so no newlines should be allowed. .. _alignedelements: Aligned elements ^^^^^^^^^^^^^^^^ A special case of spacing is where elements are set to be aligned within some limits. This is not enabled by default, but can be be configured to achieve layouts like: .. code-block:: sql SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS bar In this example, the alias expressions are all aligned with each other. To configure this, SQLFluff needs to know what elements to align and how far to search to find elements which should be aligned with each other. The configuration to achieve this layout is: .. code-block:: ini [sqlfluff:layout:type:alias_expression] # We want non-default spacing _before_ the alias expressions. spacing_before = align # We want to align them within the next outer select clause. # This means for example that alias expressions within the FROM # or JOIN clause would _not_ be aligned with them. align_within = select_clause # The point at which to stop searching outward for siblings, which # in this example would likely be the boundary of a CTE. Stopping # when we hit brackets is usually a good rule of thumb for this # configuration. align_scope = bracketed Of these configuration values, the :code:`align_scope` is potentially the least obvious. The following example illustrates the impact it has. .. code-block:: sql -- With -- align_scope = bracketed -- align_within = select_clause WITH foo as ( SELECT a, b, c AS first_column d + e AS second_column ) SELECT a AS first_column, (a + b) / 2 AS third_column FROM foo AS bar; -- With -- align_scope = bracketed -- align_within = statement WITH foo as ( SELECT a, b, c AS first_column d + e AS second_column ) SELECT a AS first_column, (a + b) / 2 AS third_column FROM foo AS bar -- Now the FROM alias is also aligned. -- With -- align_scope = file -- align_within = select_clause WITH foo as ( SELECT a, b, c AS first_column -- Now the aliases here are aligned d + e AS second_column -- with the outer query. ) SELECT a AS first_column, (a + b) / 2 AS third_column FROM foo AS bar -- With -- align_scope = file -- align_within = statement WITH foo as ( SELECT a, b, c AS first_column d + e AS second_column ) SELECT a AS first_column, (a + b) / 2 AS third_column FROM foo AS bar Line Breaks ----------- When controlling line breaks, we are trying to achieve a few different things: #. Do we have *enough* line breaks that *line length* doesn't become excessive. Long lines are hard to read, especially given that readers may be on varying screen sizes or have multiple windows open. This is (of course) configurable, but the default is 80 characters (in line with the `dbt Labs SQL style guide`_.) #. Is the positioning of *blank lines* (i.e. lines with nothing other than whitespace on them) appropriate. There are some circumstances where a blank line is *desired* (e.g. between CTEs). There are others where they are not, in particular *multiple blank lines*, for example at the beginning of a file. #. Where we do have line breaks, are they positioned appropriately and consistently with regards to other elements around them. This is most common when it comes to *commas*, and whether they should be *leading* (e.g. :code:`, my_column`) or *trailing* (e.g. :code:`my_column,`). In less common cases, it may also be desirable for some elements to have both a line break *before and after* (e.g. a set operator such as `UNION`). Indentation ----------- Lastly, given we have multiple lines of SQL, to what extent should we indent some lines to provide visual cues to the structure of that SQL. It's important to note that SQL is *not* whitespace sensitive in its interpretation and that means that any principles we apply here are entirely for the benefit of humans. *Your database doesn't care*. The indentation therefore should be treated as a *hint* to the reader of the structure of the code. This explains the common practice within most languages that nested elements (for example the contents of a set of brackets in a function call) should be indented one step from the outer elements. It's also convention that elements *with the same level* in a nested structure should have *the same indentation*, at least with regards to their local surroundings. As an example: .. code-block:: sql SELECT nested_within_select AS first_column, some_function( nested_within_function, also_nested_within_function ) AS indented_the_same_as_opening_bracket FROM indented_the_same_as_select Comment Indents ^^^^^^^^^^^^^^^ .. note:: The notes here about block comments are not implemented prior to 2.0.x. They should be coming in that release or soon after. **Comments** are dealt with differently, depending on whether they're *block* comments (:code:`/* like this */`), which might optionally include newlines, or *inline* comments (:code:`-- like this`) which are necessarily only on one line. * *Block comments* cannot share a line with any code elements (so in effect they must start on their own new line), they cannot be followed by any code elements on the same line (and so in effect must be followed by a newline, if we are to avoid trailing whitespace). None of the lines within the block comment may have an indent less than the first line of the block comment (although additional indentation within a comment is allowed), and that first line should be aligned with the first code element *following* the block comment. .. code-block:: sql SELECT /* This is a block comment starting on a new line which contains a newline (continuing with at least the same indent. - potentially containing greater indents - having no other code following it in the same line - and aligned with the line of code following it */ this_column as what_we_align_the_column_to FROM my_table * *Inline comments* can be on the same line as other code, but are subject to the same line-length restrictions. If they don't fit on the same line (or if it just looks nicer) they can also be the only element on a line. In this latter case, they should be aligned with the first code element *following* the comment. .. code-block:: sql SELECT -- This is fine this_column as what_we_align_to, another_column as something_short, -- Is ok case -- This is aligned correctly with below when indented then take_care else try_harder end as the_general_guidance -- Even here we align with the line below FROM my_table .. note:: When fixing issues with comment indentation, SQLFluff will attempt to keep comments in their original position but if line length concerns make this difficult, it will either abandon the fix, or move *same line* comments up and *before* the line they are currently on. This is in line with the assumption that comments on their own line refer to the elements of code which they come *before*, not *after*. .. _hangingindents: Hanging Indents ^^^^^^^^^^^^^^^ One approach to indenting nested elements is a layout called a *hanging indent*. In this layout, there is no line break before the first nested element, but subsequent elements are indented to match the line position of that first element. Two examples might be: .. code-block:: sql -- A select statement with two hanging indents: SELECT no_line_break_before_me, indented_to_match_the_first, 1 + (a + b) AS another_more_complex_example FROM my_table; -- This TSQL example is also in essence a hanging indent: DECLARE @prv_qtr_1st_dt DATETIME, @last_qtr INT, @last_qtr_first_mn INT, @last_qtr_yr INT; In some circumstances this layout can be quite neat (the :code:`DECLARE` statement is a good example of this), however once indents are nested or indentation styles are mixed it can rapidly become confusing (as partially shown in the first example). Additionally, unless the leading element of the first line is very short, hanging indents use much *larger indents* than a traditional simple indent where a line break is used before the first element. Hanging indents have been supported in SQLFluff up to the 1.x versions, however **they will no longer by supported from 2.0.0** onwards. This is due to the ambiguity which they bring to fixing poorly formatted SQL. Take the following code: .. code-block:: sql SELECT this_is, badly_formatted, code_and, not_obvious, what_was, intended FROM my_table Given the lack of line break between :code:`SELECT` and :code:`this_is`, it would appear that the user is intending a hanging indent, however it is also plausible that they did not and they just forgot to add a line break between them. This ambiguity is unhelpful, both for SQLFluff as a tool, but also for people who write SQL that there two ways of indenting their SQL. Given SQLFluff aims to provide consistency in SQL layout and remove some of the burden of needing to make choices like this - and that it would be very unusual to keep *only hanging indents and disable traditional ones* - the only route left to consistency is to **not allow hanging indents**. Starting in 2.0.0, any hanging indents detected will be converted to traditional indents. .. _implicitindents: Implicit Indents ^^^^^^^^^^^^^^^^ A close cousin of the hanging indent is the *implicit indent*. While it does look a little like a hanging indent, it's much more consistent in its behaviour and is supported from SQLFluff 2.0.0 onwards. An implicit indent is exactly like a normal indent, but doesn't have to be actually *taken* to influence the indentation of lines after it - it just needs to be left un-closed before the end of the line. These are normally available in clauses which take the form of :code:`KEYWORD `, like :code:`WHERE` clauses or :code:`CASE` expressions. .. code-block:: sql -- This WHERE clause here takes advantage of an implicit indent. SELECT * FROM my_table WHERE condition_a AND condition_b; -- With implicit indents disabled (which is currently the -- default), the above formulation is not allowed, and instead -- there should be a newline immediately after `WHERE` (which -- is the location of the _implicit_ indent). SELECT * FROM my_table WHERE condition_a AND condition_b; When addressing both indentation and line-length, implicit indents allow a slightly more compact layout, without significant drawbacks in legibility. They also enable a style much closer to some established style guides. They are however not recommended by many of the major style guides at time of writing (including the `dbt Labs SQL style guide`_ and the `Mozilla SQL style guide`_), and so are disabled by default. To enable them, set the :code:`allow_implicit_indents` flag in :code:`sqluff.indentation` to :code:`True`. .. _templatedindents: Templated Indents ^^^^^^^^^^^^^^^^^ SQLFluff supports templated elements in code, such as those offered by jinja2 (or dbt which relies on it). For simple cases, templated elements are handled as you would expect by introducing additional indents into the layout. .. code-block:: SQL+Jinja SELECT a, {% for n in ['b', 'c', 'd'] %} -- This section is indented relative to 'a' because -- it is inside a jinja for loop. {{ n }}, {% endfor %} e FROM my_table This functionality can be turned off if you wish using the :code:`template_blocks_indent` option in your :ref:`config`. It's important to note here, that SQLFluff lints the code after it has been rendered, and so only has access to code which is still present after that process. .. code-block:: SQL+Jinja SELECT a, {% if False %} -- This section of the code cannot be linted because -- it is never rendered due to the `if False` condition. my + poorly + spaced - and/indented AS section_of_code {% endif %} e FROM my_table More complex templated cases are usually characterised by templated tags *cutting across the parse tree*. This more formally is where the opening and closing tags of a templated section exist at different levels in the parsed structure. Starting in version 2.x, these will be treated differently (Prior to version 2.x, situations like this were sometimes handled inconsistently or incorrectly). Indentation should act as a visual cue to the structure of the written SQL, and as such, the most important thing is that template tags belonging to the same block structure use the same indentation. In the example below, this is the opening and closing elements of the second :code:`if` statement. If treated as a simple case, these tags would have different indents, because they are at different levels of the parse tree and so clearly there is a conflict to be resolved. The view SQLFluff takes on how to resolve this conflict is to pull all of the tags in this section down to the indent of the *least indented* (in the example below that would be the closing :code:`endif` tag). This is similar to the treatment of `C Preprocessor Directives`_, which are treated somewhat as being outside the structure of the rest of the file. In these cases, the content is also *not further indented* as in the simple case because it makes it harder to line up elements within the affected section and outside (in the example below the :code:`SELECT` and :code:`FROM` are a good illustration). .. code-block:: SQL+Jinja SELECT a, {% if True %} -- This is a simple case. The opening and closing tag are -- both at the same level within the SELECT clause. simple_case AS example, {% endif %} b, {% if True %} -- This is a complex case. The opening tag is within the SELECT -- clause, but the closing tag is outside the statement -- entirely. complex_case AS example FROM table_option_one {% else %} complex_case_two AS example FROM table_option_two {% endif %} .. _layoutconfig: Configuring Layout ------------------ Configuration for layout is spread across three places: #. Indent behavior for particular dialect elements is controlled by the parser. This is because in the background SQLFluff inserts :code:`Indent` and :code:`Dedent` tokens into the parse tree where those things are expected. For more detail see :ref:`layoutindentconfig`. #. Configuration for the spacing and line position of particular types of element (such as commas or operators) is set in the :code:`layout` section of the config file. For more detail see :ref:`layoutspacingconfig`. #. Some elements of layout are still controlled by rules directly. These are usually very specific cases, see :ref:`ruleref` for more details. .. _layoutindentconfig: Configuring indent locations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ One of the key areas for this is the indentation of the :code:`JOIN` expression, which we'll use as an example. Semantically, a :code:`JOIN` expression is part of the :code:`FROM` expression and therefore would be expected to be indented. However according to many of the most common SQL style guides (including the `dbt Labs SQL style guide`_ and the `Mozilla SQL style guide`_) the :code:`JOIN` keyword is expected to at the same indent as the :code:`FROM` keyword. By default, *SQLFluff* sides with the current consensus, which is to *not* indent the :code:`JOIN` keyword, however this is one element which is configurable. By setting values in the :code:`sqlfluff:indentation` section of your config file you can control how this is parsed. For example, the default indentation would be as follows: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 By setting your config file to: .. code-block:: cfg [sqlfluff:indentation] indented_joins = True Then the expected indentation will be: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 There is a similar :code:`indented_using_on` config (defaulted to :code:`True`) which can be set to :code:`False` to prevent the :code:`USING` or :code:`ON` clause from being indented, in which case the original SQL would become: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 It's worth noting at this point, that for some users, the additional line break after :code:`ON` is unexpected, and this is a good example of an :ref:`implicit indent `. By setting your config to: .. code-block:: cfg [sqlfluff:indentation] indented_using_on = False allow_implicit_indents = True Then the expected indentation will be: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 There is also a similar :code:`indented_on_contents` config (defaulted to :code:`True`) which can be set to :code:`False` to align any :code:`AND` subsections of an :code:`ON` block with each other. If set to :code:`False` (assuming implicit indents are still enabled) the original SQL would become: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 These can also be combined, so if :code:`indented_using_on` config is set to :code:`False`, :code:`indented_on_contents` is also set to :code:`False`, and :code:`allow_implicit_indents` is set tot :code:`True` then the SQL would become: .. code-block:: sql SELECT a, b FROM my_table JOIN another_table ON condition1 AND condition2 There is also a similar :code:`indented_ctes` config (defaulted to :code:`False`) which can be set to :code:`True` to enforce CTEs to be indented within the :code:`WITH` clause: .. code-block:: sql WITH some_cte AS ( SELECT 1 FROM table1 ), some_other_cte AS ( SELECT 1 FROM table1 ) SELECT 1 FROM some_cte There is also a similar :code:`indented_then` config (defaulted to :code:`True`) which can be set to :code:`False` to allow :code:`THEN` without an indent after :code:`WHEN`: .. code-block:: sql SELECT a, CASE WHEN b >= 42 THEN 1 ELSE 0 END AS c FROM some_table By default, *SQLFluff* aims to follow the most common approach to indentation. However, if you have other versions of indentation which are supported by published style guides, then please submit an issue on GitHub to have that variation supported by *SQLFluff*. .. _layoutspacingconfig: Configuring layout and spacing ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :code:`[sqlfluff:layout]` section of the config controls the treatment of spacing and line breaks across all rules. The syntax of this section is very expressive; however in normal use, only very small alterations should be necessary from the :ref:`defaultconfig`. The syntax of the section headings here select by *type*, which corresponds to the :code:`type` defined in the dialect. For example the following section applies to elements of the *type* :code:`comma`, i.e. :code:`,`. .. code-block:: cfg [sqlfluff:layout:type:comma] spacing_before = touch line_position = trailing Within these configurable sections there are a few key elements which are available: * **Spacing Elements**: :code:`spacing_before`, :code:`spacing_after` and :code:`spacing_within`. For each of these options, there are a few possible settings: * The default spacing for all elements is :code:`single` unless otherwise specified. In this state, elements will be spaced with a single space character unless there is a line break between them. * The value of :code:`touch` allows line breaks, but if no line break is present, then no space should be present. A great example of this is the spacing before commas (as shown in the config above), where line breaks may be allowed, but if not they should *touch* the element before. * Both of the above can be qualified with the :code:`:inline` modifier - which prevents newlines within the segment. This is best illustrated by the spacing found in a qualified identifier like :code:`my_schema.my_table` which uses `touch:inline` or other clauses where we want to force some elements to be on the same line. * **Line Position**: set using the :code:`line_position` option. By default this is unset, which implies no particular line position requirements. The available options are: * :code:`trailing` and :code:`leading`, which are most common in the placement of commas. Both of these settings *also* allow the option of a comma on its own on a line, or in the middle of a line, *but* if there is a line break on *either side* then they make sure it's on the *correct side*. By default we assume *trailing* commas, but if you (or your organisation) have settled on *leading* commas then you should add the following section to your config: .. code-block:: cfg [sqlfluff:layout:type:comma] line_position = leading * :code:`alone`, which means if there is a line break on either side, then there must be a line break on *both sides* (i.e. that it should be the only thing on that line. * All of the above options can be qualified with the :code:`:strict` modifier - which prevents the *inline* case. For example: .. code-block:: sql -- Setting line_position to just `alone` -- within [sqlfluff:layout:type:set_operator] -- would not allow: SELECT a UNION SELECT b; -- ...or... SELECT a UNION SELECT b; -- but *would* allow both of the following: SELECT a UNION SELECT b; SELECT a UNION SELECT b; -- However the default is set to `alone:strict` -- then the *only* acceptable configuration is: SELECT a UNION SELECT b; .. _`C Preprocessor Directives`: https://www.cprogramming.com/reference/preprocessor/ .. _`dbt Labs SQL style guide`: https://github.com/dbt-labs/corp/blob/main/dbt_style_guide.md .. _`Mozilla SQL style guide`: https://docs.telemetry.mozilla.org/concepts/sql_style.html#joins sqlfluff-2.3.5/docs/source/partials/000077500000000000000000000000001451700765000174175ustar00rootroot00000000000000sqlfluff-2.3.5/docs/source/partials/.gitignore000066400000000000000000000000421451700765000214030ustar00rootroot00000000000000rule_table.rst rule_summaries.rst sqlfluff-2.3.5/docs/source/partials/README.md000066400000000000000000000005761451700765000207060ustar00rootroot00000000000000This folder is ignored from the main build and intended only for files included in others via the `.. include::` directive. Some of those files are also auto-generated by scripts, in which case they should be included in the `.gitignore` and not edited by hand. See [generate-rule-docs.py](https://github.com/sqlfluff/sqlfluff/blob/main/docs/generate-rule-docs.py) for more info. sqlfluff-2.3.5/docs/source/partials/starter_config.cfg000066400000000000000000000053361451700765000231200ustar00rootroot00000000000000[sqlfluff] # Supported dialects https://docs.sqlfluff.com/en/stable/dialects.html # Or run 'sqlfluff dialects' dialect = snowflake # One of [raw|jinja|python|placeholder] templater = jinja # Comma separated list of rules to exclude, or None # See https://docs.sqlfluff.com/en/stable/configuration.html#enabling-and-disabling-rules # AM04 (ambiguous.column_count) and ST06 (structure.column_order) are # two of the more controversial rules included to illustrate usage. exclude_rules = ambiguous.column_count, structure.column_order # The standard max_line_length is 80 in line with the convention of # other tools and several style guides. Many projects however prefer # something a little longer. # Set to zero or negative to disable checks. max_line_length = 120 # CPU processes to use while linting. # The default is "single threaded" to allow easy debugging, but this # is often undesirable at scale. # If positive, just implies number of processes. # If negative or zero, implies number_of_cpus - specified_number. # e.g. -1 means use all processors but one. 0 means all cpus. processes = -1 # If using the dbt templater, we recommend setting the project dir. [sqlfluff:templater:dbt] project_dir = ./ [sqlfluff:indentation] # While implicit indents are not enabled by default. Many of the # SQLFluff maintainers do use them in their projects. allow_implicit_indents = True # The default configuration for aliasing rules is "consistent" # which will auto-detect the setting from the rest of the file. This # is less desirable in a new project and you may find this (slightly # more strict) setting more useful. [sqlfluff:rules:aliasing.table] aliasing = explicit [sqlfluff:rules:aliasing.column] aliasing = explicit [sqlfluff:rules:aliasing.length] min_alias_length = 3 # The default configuration for capitalisation rules is "consistent" # which will auto-detect the setting from the rest of the file. This # is less desirable in a new project and you may find this (slightly # more strict) setting more useful. # Typically we find users rely on syntax highlighting rather than # capitalisation to distinguish between keywords and identifiers. # Clearly, if your organisation has already settled on uppercase # formatting for any of these syntax elements then set them to "upper". # See https://stackoverflow.com/questions/608196/why-should-i-capitalize-my-sql-keywords-is-there-a-good-reason [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower [sqlfluff:rules:capitalisation.identifiers] capitalisation_policy = lower [sqlfluff:rules:capitalisation.functions] extended_capitalisation_policy = lower [sqlfluff:rules:capitalisation.literals] capitalisation_policy = lower [sqlfluff:rules:capitalisation.types] extended_capitalisation_policy = lower sqlfluff-2.3.5/docs/source/production.rst000066400000000000000000000241271451700765000205260ustar00rootroot00000000000000.. _production-use: Production Usage & Security =========================== SQLFluff is designed to be used both as a utility for developers but also to be part of `CI/CD`_ pipelines. .. _security: Security Considerations ----------------------- A full list of `Security Advisories is available on GitHub `_. Given the context of how SQLFluff is designed to be used, there are three different tiers of access which users may have access to manipulate how the tool functions in a secure environment. #. *Users may have edit access to the SQL code which is being linted*. While SQLFluff does not execute the SQL itself, in the process of the :ref:`templating step ` (in particular via jinja or dbt), certain macros may have the ability to execute arbitrary SQL code (e.g. the `dbt run_query macro`_). For the Jinja templater, SQLFluff uses the `Jinja2 SandboxedEnvironment`_ to limit the execution on unsafe code. When looking to further secure this situation, see below for ways to limit the ability of users to import other libraries. #. *Users may have edit access to the SQLFluff :ref:`config-files`*. In some (perhaps, many) environments, the users who can edit SQL files may also be able to access and edit the :ref:`config-files`. It's important to note that because of :ref:`in_file_config`, that users who can edit SQL files which are designed to be linted, will also have access to the vast majority of any configuration options available in :ref:`config-files`. This means that there is minimal additional protection from restricting access to :ref:`config-files` for users who already have access to edit the linting target files (as described above). #. *Users may have access to change how SQLFluff is invoked*. SQLFluff can be invoked either as a command line too or via the python API. Typically the method is fixed for a given application. When thinking about how to restrict the ability of users to call unsecure code, SQLFluff aims to provide options at the point of invocation. In particular, as described above, the primary risk vector for SQLFluff is the macro environment as described in :ref:`templateconfig`. To restrict users being able to bring arbitrary python methods into sqlfluff via the ``library_path`` configuration value (see :ref:`jinja_library_templating`), we recommend that for secure environments you override this config value either by providing an ``override`` option to the :class:`FluffConfig` object if using the Python API or via the ``--library-path`` CLI option: To disable this option entirely via the CLI: .. code-block:: bash $ sqlfluff lint my_path --library-path none To disable this option entirely via the python API: .. literalinclude:: ../../examples/04_config_overrides.py :language: python .. _`Jinja2 SandboxedEnvironment`: https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment .. _`dbt run_query macro`: https://docs.getdbt.com/reference/dbt-jinja-functions/run_query Using SQLFluff on a whole sql codebase -------------------------------------- The `exit code`_ provided by SQLFluff when run as a command line utility is designed to assist usefulness in deployment pipelines. If no violations are found then the `exit code`_ will be 0. If violations are found then a non-zero code will be returned which can be interrogated to find out more. - An error code of ``0`` means *operation success*, *no issues found*. - An error code of ``1`` means *operation success*, *issues found*. For example this might mean that a linting issue was found, or that one file could not be parsed. - An error code of ``2`` means an error occurred and the operation could not be completed. For example a configuration issue or an internal error within SQLFluff. .. _`CI/CD`: https://en.wikipedia.org/wiki/Continuous_integration .. _`exit code`: https://shapeshed.com/unix-exit-codes/ .. _diff-quality: Using SQLFluff on changes using ``diff-quality`` ------------------------------------------------ For projects with large amounts of (potentially imperfect) SQL code, the full SQLFluff output could be very large, which can be distracting -- perhaps the CI build for a one-line SQL change shouldn't encourage the developer to fix lots of unrelated quality issues. To support this use case, SQLFluff integrates with a quality checking tool called ``diff-quality``. By running SQLFluff using ``diff-quality`` (rather than running it directly), you can limit the the output to the new or modified SQL in the branch (aka pull request or PR) containing the proposed changes. Currently, ``diff-quality`` requires that you are using ``git`` for version control. NOTE: Installing SQLFluff automatically installs the ``diff_cover`` package that provides the ``diff-quality`` tool. Adding ``diff-quality`` to your builds ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In your CI build script: 1. Set the current working directory to the ``git`` repository containing the SQL code to be checked. 2. Run ``diff-quality``, specifying SQLFluff as the underlying tool: .. code-block:: text $ diff-quality --violations sqlfluff The output will look something like: .. code-block:: text ------------- Diff Quality Quality Report: sqlfluff Diff: origin/master...HEAD, staged and unstaged changes ------------- sql/audience_size_queries/constraints/_postcondition_check_gdpr_compliance.sql (0.0%): sql/audience_size_queries/constraints/_postcondition_check_gdpr_compliance.sql:5: Unquoted Identifiers must be consistently upper case. ------------- Total: 1 line Violations: 1 line % Quality: 0% ------------- These messages are basically the same as those provided directly by SQLFluff, although the format is a little different. Note that ``diff-quality`` only lists the line _numbers_, not the character position. If you need the character position, you will need to run SQLFluff directly. For more information on ``diff-quality``, see the `documentation `_. It covers topics such as: * Generating HTML reports * Controlling which branch to compare against (i.e. to determine new/changed lines). The default is `origin/master`. * Configuring ``diff-quality`` to return an error code if the quality is too low. * Troubleshooting .. _using-pre-commit: Using `pre-commit`_ ^^^^^^^^^^^^^^^^^^^ `pre-commit`_ is a framework to manage git "hooks" triggered right before a commit is made. A `git hook`_ is a git feature to "fire off custom scripts" when specific actions occur. Using `pre-commit`_ with SQLFluff is a good way to provide automated linting to SQL developers. With `pre-commit`_, you also get the benefit of only linting/fixing the files that changed. SQLFluff comes with two `pre-commit`_ hooks: * sqlfluff-lint: returns linting errors. * sqlfluff-fix: attempts to fix rule violations. .. warning:: For safety reasons, ``sqlfluff-fix`` by default will not make any fixes in files that had templating or parse errors, even if those errors were ignored using ``noqa`` or `--ignore``. Although it is not advised, you *can* tell SQLFluff to try and fix these files by overriding the ``fix_even_unparsable`` setting in ``.sqlfluff`` config file or using the ``sqlfluff fix --FIX-EVEN-UNPARSABLE`` command line option. *Overriding this behavior may break your SQL. If you use this override, always be sure to review any fixes applied to files with templating or parse errors to verify they are okay.* You should create a file named `.pre-commit-config.yaml` at the root of your git project, which should look like this: .. code-block:: yaml repos: - repo: https://github.com/sqlfluff/sqlfluff rev: |release| hooks: - id: sqlfluff-lint # For dbt projects, this installs the dbt "extras". # You will need to select the relevant dbt adapter for your dialect # (https://docs.getdbt.com/docs/available-adapters): # additional_dependencies: ['', 'sqlfluff-templater-dbt'] - id: sqlfluff-fix # Arbitrary arguments to show an example # args: [--rules, "LT02,CP02"] # additional_dependencies: ['', 'sqlfluff-templater-dbt'] When trying to use the `dbt templater`_, uncomment the ``additional_dependencies`` to install the extras. This is equivalent to running ``pip install sqlfluff-templater-dbt``. You can specify the version of ``dbt-adapter`` used in `pre-commit`_, for example: .. code-block:: yaml additional_dependencies : ['dbt-bigquery==1.0.0', 'sqlfluff-templater-dbt'] See the list of available `dbt-adapters`_. Note that you can pass the same arguments available through the CLI using ``args:``. Using `GitHub Actions`_ to Annotate PRs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There are two way to utilize SQLFluff to annotate Github PRs. 1. When `sqlfluff lint` is run with the `--format github-annotation-native` option, it produces output formatted as `Github workflow commands`_ which are converted into pull request annotations by Github. 2. When `sqlfluff lint` is run with the `--format github-annotation` option, it produces output compatible with this `action `_. Which uses Github API to annotate the SQL in `GitHub pull requests`. For more information and examples on using SQLFluff in GitHub Actions, see the `sqlfluff-github-actions repository `_. .. _`pre-commit`: https://pre-commit.com/ .. _`git hook`: https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks .. _`dbt templater`: `dbt-project-configuration` .. _`GitHub Actions`: https://github.com/features/actions .. _`GitHub pull requests`: https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests .. _`Github workflow commands`: https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-notice-message .. _`dbt-adapters`: https://docs.getdbt.com/docs/available-adapters sqlfluff-2.3.5/docs/source/realworld.rst000066400000000000000000000107601451700765000203310ustar00rootroot00000000000000.. _realworldref: SQL in the Wild =============== SQL has been around for a long time, as a language for communicating with databases, like a communication protocol. More recently with the rise of *data* as a business function, or a domain in its own right SQL has also become an invaluable tool for defining the *structure* of data and analysis - not just as a one off but as a form of `infrastructure as code`_. As *analytics* transitions from a profession of people doing one-offs, and moves to building stable and reusable pieces of analytics, more and more principles from software engineering are moving in the analytics space. One of the best articulations of this is written in the `viewpoint section of the docs for the open-source tool dbt`_. Two of the principles mentioned in that article are `quality assurance`_ and `modularity`_. Quality assurance ----------------- The primary aim of `SQLFluff` as a project is in service of that first aim of `quality assurance`_. With larger and larger teams maintaining large bodies of SQL code, it becomes more and more important that the code is not just *valid* but also easily *comprehensible* by other users of the same codebase. One way to ensure readability is to enforce a `consistent style`_, and the tools used to do this are called `linters`_. Some famous `linters`_ which are well known in the software community are `flake8`_ and `jslint`_ (the former is used to lint the `SQLFluff` project itself). **SQLFluff** aims to fill this space for SQL. Modularity ---------- SQL itself doesn't lend itself well to `modularity`_, so to introduce some flexibility and reusability it is often `templated`_. Typically this is done in the wild in one of the following ways: 1. Using the limited inbuilt templating abilities of a programming language directly. For example in python this would be using the `format string syntax`_: .. code-block:: python "SELECT {foo} FROM {tbl}".format(foo="bar", tbl="mytable") Which would evaluate to: .. code-block:: sql SELECT bar FROM mytable 2. Using a dedicated templating library such as `jinja2`_. This allows a lot more flexibility and more powerful expressions and macros. See the :ref:`templateconfig` section for more detail on how this works. - Often there are tools like `dbt`_ or `apache airflow`_ which allow `templated`_ sql to be used directly, and they will implement a library like `jinja2`_ under the hood themselves. All of these templating tools are great for `modularity`_ but they also mean that the SQL files themselves are no longer valid SQL code, because they now contain these configured *placeholder* values, intended to improve modularity. SQLFluff supports both of the templating methods outlined above, as well as `dbt`_ projects, to allow you to still lint these "dynamic" SQL files as part of your CI/CD pipeline (which is great 🙌), rather than waiting until you're in production (which is bad 🤦, and maybe too late). During the CI/CD pipeline (or any time that we need to handle `templated`_ code), SQLFluff needs additional info in order to interpret your templates as valid SQL code. You do so by providing dummy parameters in SQLFluff configuration files. When substituted into the template, these values should evaluate to valid SQL (so SQLFluff can check its style, formatting, and correctness), but the values don't need to match actual values used in production. This means that you can use *much simpler* dummy values than what you would really use. The recommendation is to use *the simplest* possible dummy value that still allows your code to evaluate to valid SQL so that the configuration values can be as streamlined as possible. .. _`infrastructure as code`: https://en.wikipedia.org/wiki/Infrastructure_as_code .. _`viewpoint section of the docs for the open-source tool dbt`: https://docs.getdbt.com/docs/viewpoint .. _`quality assurance`: https://docs.getdbt.com/docs/viewpoint#quality-assurance .. _`modularity`: https://docs.getdbt.com/docs/viewpoint#modularity .. _`consistent style`: https://www.smashingmagazine.com/2012/10/why-coding-style-matters/ .. _`linters`: https://en.wikipedia.org/wiki/Lint_(software) .. _`flake8`: http://flake8.pycqa.org/ .. _`jslint`: https://www.jslint.com/ .. _`templated`: https://en.wikipedia.org/wiki/Template_processor .. _`format string syntax`: https://docs.python.org/3/library/string.html#formatstrings .. _`jinja2`: https://jinja.palletsprojects.com/ .. _`apache airflow`: https://airflow.apache.org .. _`dbt`: https://getdbt.com sqlfluff-2.3.5/docs/source/releasenotes.rst000066400000000000000000000336411451700765000210320ustar00rootroot00000000000000.. _releasenotes: Release Notes ============= This page aims to act as a guide for migrating between major SQLFluff releases. Necessarily this means that bugfix releases, or releases requiring no change for the user are not mentioned. For full details of each individual release, see the detailed changelog_. .. _changelog: https://github.com/sqlfluff/sqlfluff/blob/main/CHANGELOG.md Upgrading to 2.2 ---------------- This release changes some of the interfaces between SQLFluff core and our plugin ecosystem. The only *breaking* change is in the interface between SQLFluff and *templater* plugins (which are not common in the ecosystem, hence why this is only a minor and not a major release). For all plugins, we also recommend a different structure for their imports (especially for rule plugins which are more common in the ecosystem) - for performance and stability reasons. Some users had been experiencing very long import times with previous releases as a result of the layout of plugin imports. Users with affected plugins will begin to see a warning from this release onward, which can be resolved for their plugin by updating to a new version of that plugin which follows the guidelines. Templater plugins ^^^^^^^^^^^^^^^^^ Templaters before this version would pass a :code:`make_template()` callable to the slicing methods as part of being able to map the source file. This method would accept a :code:`str` and return a :code:`jinja2.environment.Template` object to allow the templater to render multiple variants of the template to do the slicing operation (which allows linting issues found in templated files to be mapped accurately back to their position in the unrendered source file). This approach is not very generalisable, and did not support templating operations with libraries other than :code:`jinja2`. As a result, we have amended the interface to instead pass a :code:`render_func()` callable, which accepts a :code:`str` and returns a :code:`str`. This works fine for the :code:`jinja` templater (and by extension the :code:`dbt` templater) as they can simply wrap the original callable with a method that calls :code:`render()` on the original :code:`Template` object. It also however opens up the door to other templating engines, and in particular to *remote* templaters which might pass unrendered code over a HTTP connection for rendering. Specifically: * The :code:`slice_file()` method of the base templater classes no longer accepts an optional :code:`make_template` argument or a :code:`templated_str` argument. * Instead a :code:`render_func` callable should be passed which can be called to generate the :code:`templated_str` on demand. * Unlike the optional :code:`make_template` - :code:`render_func` is **not** optional and should always be present. Rule plugins ^^^^^^^^^^^^ We recommend that the module in a plugin which defines all of the hook implementations (anything using the :code:`@hookimpl` decorator) must be able to fully import before any rule implementations are imported. More specifically, SQLFluff must be able to both *import* **and** *run* any implementations of :code:`get_configs_info()` before any plugin rules (i.e. any derivatives of :py:class:`BaseRule `) are *imported*. Because of this, we recommend that rules are defined in a separate module to the root of the plugin and then only imported *within* the :code:`get_rules()` method. Importing in the main body of the module was previously our recommendation and so may be the case for versions of some plugins. If one of your plugins does use imports in this way, a warning will be presented from this version onward, recommending that you update your plugin. See the :ref:`developingpluginsref` section of the docs for an example. .. _upgrading_2_0: Upgrading from 1.x to 2.0 ------------------------- Upgrading to 2.0 brings several important breaking changes: * All bundled rules have been recoded, both from generic :code:`L00X` formats into groups within similar codes (e.g. an *aliasing* group with codes of the format :code:`AL0X`), but also given *names* to allow much clearer referencing (e.g. :code:`aliasing.column`). * :ref:`ruleconfig` now uses the rule *name* rather than the rule *code* to specify the section. Any unrecognised references in config files (whether they are references which *do* match existing rules by code or alias, or whether the match no rules at all) will raise warnings at runtime. * A complete re-write of layout and whitespace handling rules (see :ref:`layoutref`), and with that a change in how layout is configured (see :ref:`layoutconfig`) and the combination of some rules that were previously separate. One example of this is that the legacy rules :code:`L001`, :code:`L005`, :code:`L006`, :code:`L008`, :code:`L023`, :code:`L024`, :code:`L039`, :code:`L048` & :code:`L071` have been combined simply into :sqlfluff:ref:`LT01`. Recommended upgrade steps ^^^^^^^^^^^^^^^^^^^^^^^^^ To upgrade smoothly between versions, we recommend the following sequence: #. The upgrade path will be simpler if you have a slimmer configuration file. Before upgrading, consider removing any sections from your configuration file (often :code:`.sqlfluff`, see :ref:`config`) which match the current :ref:`defaultconfig`. There is no need to respecify defaults in your local config if they are not different to the stock config. #. In a local (or other *non-production*) environment, upgrade to SQLFluff 2.0.x. We recommend using a `compatible release`_ specifier such as :code:`~=2.0.0`, to ensure any minor bugfix releases are automatically included. #. Examine your configuration file (as mentioned above), and evaluate how rules are currently specified. We recommend primarily using *either* :code:`rules` *or* :code:`exclude_rules` rather than both, as detailed in :ref:`ruleselection`. Using either the :code:`sqlfluff rules` CLI command or the online :ref:`ruleref`, replace *all references* to legacy rule codes (i.e. codes of the form :code:`L0XX`). Specifically: * In the :code:`rules` and :code:`exclude_rules` config values. Here, consider using group specifiers or names to make your config simpler to read and understand (e.g. :code:`capitalisation`, is much more understandable than :code:`CP01,CP02,CP03,CP04,CP05`, but the two specifiers will have the same effect). Note that while legacy codes *will still be understood* here (because they remain valid as aliases for those rules) - you may find that some rules no longer exist in isolation and so these references may be misleading. e.g. :code:`L005` is now an alias for :sqlfluff:ref:`layout.spacing` but that rule is much more broad ranging than the original scope of :code:`L005`, which was only spacing around commas. * In :ref:`ruleconfig`. In particular here, legacy references to rule codes are *no longer valid*, will raise warnings, and until resolved, the configuration in those sections will be ignored. The new section references should include the rule *name* (e.g. :code:`[sqlfluff:rules:capitalisation.keywords]` rather than :code:`[sqlfluff:rules:L010]`). This switch is designed to make configuration files more readable, but we cannot support backward compatibility here without also having to resolve the potential ambiguity of the scenario where both *code-based* and *name-based* are both used. * Review the :ref:`layoutconfig` documentation, and check whether any indentation or layout configuration should be revised. #. Check your project for :ref:`in_file_config` which refer to rule codes. Alter these in the same manner as described above for configuration files. #. Test linting your project for unexpected linting issues. Where found, consider whether to use :code:`sqlfluff fix` to repair them in bulk, or (if you disagree with the changes) consider changing which rules you enable or their configuration accordingly. In particular you may notice: * The indentation rule (:code:`L003` as was, now :sqlfluff:ref:`LT02`) has had a significant rewrite, and while much more flexible and accurate, it is also more specific. Note that :ref:`hangingindents` are no longer supported, and that while not enabled by default, many users may find the enabling :ref:`implicitindents` fits their organisation's style better. * The spacing rule (:sqlfluff:ref:`LT01`: :sqlfluff:ref:`layout.spacing`) has a much wider scope, and so may pick up spacing issues that were not previously enforced. If you disagree with any of these, you can override the :code:`sqlfluff:layout` sections of the config with different (or just more liberal settings, like :code:`any`). .. _`compatible release`: https://peps.python.org/pep-0440/#compatible-release Example 2.0 config ^^^^^^^^^^^^^^^^^^ To illustrate the points above, this is an illustrative example config for a 2.0 compatible project. Note that the config is fairly brief and sets only the values which differ from the default config. .. code-block:: cfg [sqlfluff] dialect = snowflake templater = dbt max_line_length = 120 # Exclude some specific rules based on a mixture of codes and names exclude_rules = RF02, RF03, RF04, ST06, ST07, AM05, AM06, convention.left_join, layout.select_targets [sqlfluff:indentation] # Enabling implicit indents for this project. # See https://docs.sqlfluff.com/en/stable/layout.html#configuring-indent-locations allow_implicit_indents = True # Add a few specific rule configurations, referenced by the rule names # and not by the rule codes. [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower [sqlfluff:rules:capitalisation.identifiers] capitalisation_policy = lower [sqlfluff:rules:capitalisation.functions] extended_capitalisation_policy = lower # An example of setting a custom layout specification which # is more lenient than default config. [sqlfluff:layout:type:set_operator] line_position = alone Upgrading to 1.4 ---------------- This release brings several internal changes, and acts as a prelude to 2.0.0. In particular, the following config values have changed: * :code:`sqlfluff:rules:L007:operator_new_lines` has been changed to :code:`sqlfluff:layout:type:binary_operator:line_position`. * :code:`sqlfluff:rules:comma_style` and :code:`sqlfluff:rules:L019:comma_style` have both been consolidated into :code:`sqlfluff:layout:type:comma:line_position`. If any of these values have been set in your config, they will be automatically translated to the new values at runtime, and a warning will be shown. To silence the warning, update your config file to the new values. For more details on configuring layout see :ref:`layoutconfig`. Upgrading to 1.3 ---------------- This release brings several potentially breaking changes to the underlying parse tree. For users of the cli tool in a linting context you should notice no change. If however your application relies on the structure of the SQLFluff parse tree or the naming of certain elements within the yaml format, then this may not be a drop-in replacement. Specifically: * The addition of a new :code:`end_of_file`` meta segment at the end of the parse structure. * The addition of a :code:`template_loop`` meta segment to signify a jump backward in the source file within a loop structure (e.g. a jinja :code:`for`` loop). * Much more specific types on some raw segments, in particular :code:`identifier` and :code:`literal` type segments will now appear in the parse tree with their more specific type (which used to be called :code:`name`) e.g. :code:`naked_identifier`, :code:`quoted_identifier`, :code:`numeric_literal` etc... If using the python api, the *parent* type (such as :code:`identifier`) will still register if you call :code:`.is_type("identifier")`, as this function checks all inherited types. However the eventual type returned by :code:`.get_type()`` will now be (in most cases) what used to be accessible at :code:`.name`. The :code:`name` attribute will be deprecated in a future release. Upgrading to 1.2 ---------------- This release introduces the capability to automatically skip large files, and sets default limits on the maximum file size before a file is skipped. Users should see a performance gain, but may experience warnings associated with these skipped files. Upgrades pre 1.0 ---------------- * **0.13.x** new rule for quoted literals, option to remove hanging indents in rule L003, and introduction of ``ignore_words_regex``. * **0.12.x** dialect is now mandatory, the ``spark3`` dialect was renamed to ``sparksql`` and datatype capitalisation was extracted from L010 to it's own rule L063. * **0.11.x** rule L030 changed to use ``extended_capitalisation_policy``. * **0.10.x** removed support for older dbt versions < 0.20 and stopped ``fix`` attempting to fix unparsable SQL. * **0.9.x** refinement of the Simple API, dbt 1.0.0 compatibility, and the official SQLFluff Docker image. * **0.8.x** an improvement to the performance of the parser, a rebuild of the Jinja Templater, and a progress bar for the CLI. * **0.7.x** extracted the dbt templater to a separate plugin and removed the ``exasol_fs`` dialect (now merged in with the main ``exasol``). * **0.6.x** introduced parallel processing, which necessitated a big re-write of several innards. * **0.5.x** introduced some breaking changes to the API. * **0.4.x** dropped python 3.5, added the dbt templater, source mapping and also introduced the python API. * **0.3.x** drops support for python 2.7 and 3.4, and also reworks the handling of indentation linting in a potentially not backward compatible way. * **0.2.x** added templating support and a big restructure of rules and changed how users might interact with SQLFluff on templated code. * **0.1.x** involved a major re-write of the parser, completely changing the behaviour of the tool with respect to complex parsing. sqlfluff-2.3.5/docs/source/rules.rst000066400000000000000000000062541451700765000174730ustar00rootroot00000000000000.. _ruleref: Rules Reference =============== `Rules` in `SQLFluff` are implemented as `crawlers`. These are entities which work their way through the parsed structure of a query to evaluate a particular rule or set of rules. The intent is that the definition of each specific rule should be really streamlined and only contain the logic for the rule itself, with all the other mechanics abstracted away. To understand how rules are enabled and disabled see :ref:`ruleselection`. Core Rules ---------- Certain rules belong to the :code:`core` rule group. In order for a rule to be designated as :code:`core`, it must meet the following criteria: * Stable * Applies to most dialects * Could detect a syntax issue * Isn’t too opinionated toward one style (e.g. the :code:`dbt` style guide) Core rules can also make it easier to roll out SQLFluff to a team by only needing to follow a 'common sense' subset of rules initially, rather than spending time understanding and configuring all the rules, some of which your team may not necessarily agree with. We believe teams will eventually want to enforce more than just the core rules, and we encourage everyone to explore all the rules and customize a rule set that best suites their organization. See the :ref:`config` section for more information on how to enable only :code:`core` rules by default. Inline Ignoring Errors ----------------------- `SQLFluff` features inline error ignoring. For example, the following will ignore the lack of whitespace surrounding the ``*`` operator. .. code-block:: sql a.a*a.b AS bad_1 -- noqa: LT01 Multiple rules can be ignored by placing them in a comma-delimited list. .. code-block:: sql a.a * a.b AS bad_2, -- noqa: LT01, LT03 It is also possible to ignore non-rule based errors, and instead opt to ignore templating (``TMP``) & parsing (``PRS``) errors. .. code-block:: sql WHERE col1 = 2 AND dt >= DATE_ADD(CURRENT_DATE(), INTERVAL -2 DAY) -- noqa: PRS .. note:: It should be noted that ignoring ``TMP`` and ``PRS`` errors can lead to incorrect ``sqlfluff lint`` and ``sqfluff fix`` results as `SQLFluff` can misinterpret the SQL being analysed. Should the need arise, not specifying specific rules to ignore will ignore all rules on the given line. .. code-block:: sql a.a*a.b AS bad_3 -- noqa .. _inline_ignoring_errors: Ignoring line ranges ^^^^^^^^^^^^^^^^^^^^ Similar to `pylint's "pylint" directive"`_, ranges of lines can be ignored by adding :code:`-- noqa:disable=[,...] | all` to the line. Following this directive, specified rules (or all rules, if "all" was specified) will be ignored until a corresponding `-- noqa:enable=[,...] | all` directive. .. code-block:: sql -- Ignore rule AL02 from this line forward SELECT col_a a FROM foo -- noqa: disable=AL02 -- Ignore all rules from this line forward SELECT col_a a FROM foo -- noqa: disable=all -- Enforce all rules from this line forward SELECT col_a a FROM foo -- noqa: enable=all .. _`pylint's "pylint" directive"`: http://pylint.pycqa.org/en/latest/user_guide/message-control.html Rule Index ---------- .. include:: partials/rule_table.rst .. include:: partials/rule_summaries.rst sqlfluff-2.3.5/docs/source/teamrollout.rst000066400000000000000000000126671451700765000207150ustar00rootroot00000000000000.. _rolloutref: Rolling out SQLFluff with a new team ==================================== Rolling out SQLFluff, like rolling out any other linter or style guide, is not just about the *technical* rollout, but also how you introduce the tool to the team and organisation around you. *The effect of SQLFluff should be to change your behaviours, not* *just your SQL*. With that in mind, it's worth reminding ourselves what we're trying to achieve with a tool like this. A set of potential success criteria might be: #. **Faster comprehension and collaboration** by the team on a shared codebase. This includes more effective (and more enjoyable) code review on top of code which is easy to review and build upon. #. **Easier and faster onboarding** for new team members. By adopting a style which is clean and *consistent with other organisations* we make it easier for new people to join the team. #. **Improved adoption of shared SQL** from other sources. If the SQL found in open source projects is easy to read and *looks familiar* then you're more likely to use it. This means more reusable code across the industry. #. **Productive discussions around style**. By defining your organisation's style guide in code, it means you can version control it, discuss changes and ultimately give a concrete output to discussions over style. *You like leading commas? Make a PR to .sqlfluff and let's* *discuss with the team what the implications would be*. Consider which of these success measures is most important and most desirable for your team. *Write that down*. The following steps are a guide, which you should adapt to your organisation, and in particular its level of data maturity. 1. Assess the situation ----------------------- This step is done by you, or a small group of people who *already* *think that linting is a good idea*. * Run ``sqlfluff lint`` on your project with the stock configuration to find out how things work *out of the box*. * Set up your :ref:`config` so that things run and that you can get a readout of the errors which you would want the team to see and *not the ones you don't*. Great tools for this are to use :ref:`sqlfluffignore`, ``--exclude-rules`` or ``--ignore`` in the CLI (see :ref:`cliref`). * Identify which areas of your project are the worst and which are the tidiest. In particular, any areas which are particularly tidy already will be particularly useful in the next phase. 2. Make a plan -------------- There are three sensible rollout phases: #. **Pre CI/CD**. #. **Soft CI/CD** (warnings but no strict fails). #. **Hard CI/CD** (violations mean deployments fail). In each of these phases you have three levers to play with: #. Areas of the project in which to apply rules. #. Depth of rules enforced (this might also include whether to ignore parsing errors or not). #. Whether to just lint changes (:ref:`diff-quality`), or to lint all the existing code as well. Work out a sensible roadmap of how hard you want to go in each phase. Be clear who is responsible for changes at each phase. An example plan might look like this: #. **Pre CI/CD** we get the tidiest area of a project to a stage that it fully passes the rules we eventually want to enforce. The core project team will do this. Liberal use of ``sqlfluff fix`` can be a lifesaver in this phase. #. **Soft CI/CD** is applied to the whole project, team members are encouraged to write tidy SQL, but not *required* to. #. **Hard CI/CD** is applied to the tidy areas of the project and also to any changes to the whole project. Anyone making changes is *required* to write SQL which passes check. #. **Hard CI/CD** is applied to the whole project on not just changes, with only a few particularly problematic files explicitly ignored using :ref:`sqlfluffignore`. 3. Build the need ----------------- Bring your team together to introduce both linting as a concept and also SQLFluff as a tool. At this stage it's **really important** **that the team understand *why* this is a good thing**. Consider whether to discuss the whole plan from step 2, or whether to only talk about the first few steps. Aim to make this an empowering experience that everyone can get involved with rather than *another piece of admin they need to do*. At this stage, you might also want to consider other tools in the SQLFluff ecosystem such as the :ref:`SQLFluff pre-commit hook ` and the `SQLFluff VSCode plugin`_ or `SQLFluff online formatter`_. .. _`SQLFluff VSCode plugin`: https://github.com/sqlfluff/vscode-sqlfluff .. _`SQLFluff online formatter`: https://online.sqlfluff.com/ 4. Do, Review & Reassess ------------------------ Once the plan is in motion, make sure to start putting in place norms and rituals around how you change the rules. In particular: * How would someone suggest changing the style guide or enabling/disabling a rule? * How do we assess whether the changes are working for the team or whether some are creating unnecessary stress? It's normal for your usage of tools like SQLFluff to change and evolve over time. It's important to expect this change in advance, and welcome it when it happens. Always make sure you're driving toward the success measures you decided up front, rather than just resisting the change. 5. Spread the word 😁 --------------------- Did it work? If so, spread the word. Tell a friend about SQLFluff. If you're lucky they might share your views on comma placement 🤷‍♀️. sqlfluff-2.3.5/docs/source/vision.rst000066400000000000000000000024011451700765000176360ustar00rootroot00000000000000.. _vision: Vision for SQLFluff =================== SQLFluff has a few components: 1. A generic parser for SQL which aims to be able to unify SQL written in different dialects into a comparable format. The *parser*. 2. A mechanism for measuring written SQL against a set of rules, with the added ability to fix any violations found. The *linter*. 3. An opinionated set of guidelines for how SQL should be structured and formatted. The *rules*. The core vision [#f1]_ for SQLFluff is to be really good at being the *linter*. The reasoning for this is outlined in :ref:`realworldref`. Most of the codebase for SQLFluff is the *parser*, mostly because at the point of developing SQLFluff, there didn't appear to be a good option for a whitespace-aware parser that could be used instead. With regards to the *rules*, SQLFluff aims to be opinionated but it also accepts that many organisations and groups have pre-existing strong conventions around how to write SQL and so ultimately SQLFluff should be flexible enough to support whichever rule set a user wishes to. .. rubric:: Notes .. [#f1] Credit to `this article`_ for highlighting the importance of a good vision. .. _`this article`: https://opensource.com/business/16/6/bad-practice-foss-projects-management sqlfluff-2.3.5/examples/000077500000000000000000000000001451700765000151665ustar00rootroot00000000000000sqlfluff-2.3.5/examples/01_basic_api_usage.py000066400000000000000000000054101451700765000211360ustar00rootroot00000000000000"""This is an example of how to use the simple sqlfluff api.""" from typing import Any, Dict, Iterator, List, Union import sqlfluff # -------- LINTING ---------- my_bad_query = "SeLEct *, 1, blah as fOO from mySchema.myTable" # Lint the given string and return an array of violations in JSON representation. lint_result = sqlfluff.lint(my_bad_query, dialect="bigquery") # lint_result = # [ # { # "code": "CP01", # "line_no": 1, # "line_pos": 1, # "description": "Keywords must be consistently upper case.", # } # ... # ] # -------- FIXING ---------- # Fix the given string and get a string back which has been fixed. fix_result_1 = sqlfluff.fix(my_bad_query, dialect="bigquery") # fix_result_1 = 'SELECT *, 1, blah AS foo FROM myschema.mytable\n' # We can also fix just specific rules. fix_result_2 = sqlfluff.fix(my_bad_query, rules=["CP01"]) # fix_result_2 = 'SELECT *, 1, blah AS fOO FROM mySchema.myTable' # Or a subset of rules... fix_result_3 = sqlfluff.fix(my_bad_query, rules=["CP01", "CP02"]) # fix_result_3 = 'SELECT *, 1, blah AS fOO FROM myschema.mytable' # -------- PARSING ---------- # Parse the given string and return a JSON representation of the parsed tree. parse_result = sqlfluff.parse(my_bad_query) # parse_result = {'file': {'statement': {...}, 'newline': '\n'}} # This JSON structure can then be parsed as required. # An example usage is shown below: def get_json_segment( parse_result: Dict[str, Any], segment_type: str ) -> Iterator[Union[str, Dict[str, Any], List[Dict[str, Any]]]]: """Recursively search JSON parse result for specified segment type. Args: parse_result (Dict[str, Any]): JSON parse result from `sqlfluff.fix`. segment_type (str): The segment type to search for. Yields: Iterator[Union[str, Dict[str, Any], List[Dict[str, Any]]]]: Retrieves children of specified segment type as either a string for a raw segment or as JSON or an array of JSON for non-raw segments. """ for k, v in parse_result.items(): if k == segment_type: yield v elif isinstance(v, dict): yield from get_json_segment(v, segment_type) elif isinstance(v, list): for s in v: yield from get_json_segment(s, segment_type) # e.g. Retrieve array of JSON for table references. table_references = list(get_json_segment(parse_result, "table_reference")) print(table_references) # [[{'identifier': 'mySchema'}, {'dot': '.'}, {'identifier': 'myTable'}]] # Retrieve raw table name from last identifier in the table reference. for table_reference in table_references: table_name = list(get_json_segment(parse_result, "naked_identifier"))[-1] print(f"table_name: {table_name}") # table_name: myTable sqlfluff-2.3.5/examples/02_timing_api_steps.py000066400000000000000000000021371451700765000214020ustar00rootroot00000000000000"""Performance testing on parsing and lexing.""" import timeit from sqlfluff.core import Lexer, Linter, Parser # Set up and output the query sql = "SeLEct *, 1, blah as fOO from myTable" print("Query: ", repr(sql)) def time_function(func, name, iterations=20): """A basic timing function.""" # Do the timing time = timeit.timeit(func, number=iterations) / iterations # Output the result print( "{:<35} {:.6}s [{} iterations]".format( f"Time to {name}:", time, iterations, ) ) # Set up some classes to process the data kwargs = dict(dialect="ansi") lexer = Lexer(**kwargs) parser = Parser(**kwargs) linter = Linter(**kwargs) # Pre-process the lexing step for the parsing step tokens, _ = lexer.lex(sql) # Pre-process the parsing step for the linting and parsing step parsed = parser.parse(tokens) # Time the steps time_function(lambda: lexer.lex(sql), name="lex") time_function(lambda: parser.parse(tokens), name="parse") time_function(lambda: linter.lint(parsed), name="lint") time_function(lambda: linter.fix(parsed), name="fix") sqlfluff-2.3.5/examples/03_getting_rules_and_dialects.py000066400000000000000000000011631451700765000234100ustar00rootroot00000000000000"""This is an example of how get basic options from sqlfluff.""" import sqlfluff # -------- DIALECTS ---------- dialects = sqlfluff.list_dialects() # dialects = [DialectTuple(label='ansi', name='ansi', inherits_from='nothing'), ...] dialect_names = [dialect.label for dialect in dialects] # dialect_names = ["ansi", "snowflake", ...] # -------- RULES ---------- rules = sqlfluff.list_rules() # rules = [ # RuleTuple( # code='Example_LT01', # description='ORDER BY on these columns is forbidden!' # ), # ... # ] rule_codes = [rule.code for rule in rules] # rule_codes = ["LT01", "LT02", ...] sqlfluff-2.3.5/examples/04_config_overrides.py000066400000000000000000000007711451700765000213770ustar00rootroot00000000000000"""This is an example of providing config overrides.""" from sqlfluff.core import FluffConfig, Linter sql = "SELECT 1\n" config = FluffConfig( overrides={ "dialect": "snowflake", # NOTE: We explicitly set the string "none" here rather # than a None literal so that it overrides any config # set by any config files in the path. "library_path": "none", } ) linted_file = Linter(config=config).lint_string(sql) assert linted_file.get_violations() == [] sqlfluff-2.3.5/examples/05_simple_api_config.py000066400000000000000000000040531451700765000215150ustar00rootroot00000000000000"""An example to show a few ways of configuring the API.""" import sqlfluff from sqlfluff.core import FluffConfig, Linter # ####################################### # The simple API can be configured in three ways. # 1. Limited keyword arguments sqlfluff.fix("SELECT 1", dialect="bigquery") # 2. Providing the path to a config file sqlfluff.fix("SELECT 1", config_path="test/fixtures/.sqlfluff") # 3. Providing a preconfigured FluffConfig object. # NOTE: This is the way of configuring SQLFluff which will give the most control. # 3a. FluffConfig objects can be created directly from a dictionary of values. config = FluffConfig(configs={"core": {"dialect": "bigquery"}}) # 3b. FluffConfig objects can be created from a config file in a string. config = FluffConfig.from_string("[sqlfluff]\ndialect=bigquery\n") # 3c. FluffConfig objects can be created from a config file in multiple strings # to simulate the effect of multiple nested config strings. config = FluffConfig.from_strings( # NOTE: Given these two strings, the resulting dialect would be "mysql" # as the later files take precedence. "[sqlfluff]\ndialect=bigquery\n", "[sqlfluff]\ndialect=mysql\n", ) # 3d. FluffConfig objects can be created from a path containing a config file. config = FluffConfig.from_path("test/fixtures/") # 3e. FluffConfig objects can be from keyword arguments config = FluffConfig.from_kwargs(dialect="bigquery", rules=["LT01"]) # The FluffConfig is then provided via a config argument. sqlfluff.fix("SELECT 1", config=config) # ####################################### # The core API is always configured using a FluffConfig object. # When instantiating a Linter (or Parser), a FluffConfig must be provided # on instantiation. See above for details on how to create a FluffConfig. linter = Linter(config=config) # The provided config will then be used in any operations. lint_result = linter.lint_string("SELECT 1", fix=True) fixed_string = lint_result.fix_string() # NOTE: The "True" element shows that fixing was a success. assert fixed_string == ("SELECT 1", True) sqlfluff-2.3.5/images/000077500000000000000000000000001451700765000146155ustar00rootroot00000000000000sqlfluff-2.3.5/images/README.md000066400000000000000000000023241451700765000160750ustar00rootroot00000000000000# SQLFluff image assets The logos, both as png and svg files are available here for use when referencing the project. Contributions are welcome to these files in addition to contibutions to the codebase of the project. This is especially useful if you have graphic design skills, but before planning any large scale changes, do raise an issue for discussion on GitHub to ensure that your planned changes are likely to be accepted. With that in mind please follow some of the following guidelines: - Be mindful that these files are linked to in several places, including the main `README.md` file which generates the pypi profile for the project. Don't remove or rename files without being mindful of this. - If you edit the `svg` files, please re-generate the corresponding `png` files accordingly. - Please use https://svgcrop.com/ and https://tinypng.com/ to crop and minimise `svg` and `png` files before committing. ## Licensing Unless otherwise noted, these works are licensed under the Creative Commons Attribution-ShareAlike 4.0 International License. To view a copy of this license, visit http://creativecommons.org/licenses/by-sa/4.0/ or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. sqlfluff-2.3.5/images/datacoves.png000066400000000000000000001176021451700765000173030ustar00rootroot00000000000000PNG  IHDR>žU pHYs  sRGBgAMA aIDATx\U~{~ά=Mz ';vD4 n00%@[ ف'^TO&paHu؍< SI6n)A\{Z{gV!RI* Uν^. ͼ0u_.MD@}/qFh^7QPW |L7yy32[gjM܄c>ܰVST3.F55 ireii"5#IsT"ok[15tgkOχacuܧ? 7k ü Z[VKG?2&g®¬{mOl]NS">bΫV,(Z"8s=QՌt^!m`\*kl_~oJkM.Rz}~O[1g s7G燀#,$0]Yo&m5w3& n5FlJΗ=̓~kl#{R;DkqIh(K9M/=+G]{Ժl6 _YKޜ~v}T{cO *ѓ.<+jXc\zޅMog9+zoxz³iERAX5Fsx)@Pzؙ޵|ѷ_i>MjT7YEK諴iU3 }W9%KL70vH.6Fi_l.􇱚MCclO֚F/׈Bnz]=tֽȞ|b_v9a}u.{1F4E)Z0Qr[?چu";^jeAgVջړkoS0<صޝ2|;SL7zV% [t26.[%_^RtQr-SQdj?4նJNc}o+:%="ĎM'c1vlTvSJ_inX~aٟVqOZ ʅ=tlO³w>|<1 c>|O%1'?3>|*3VFމA-n;'HͼP32xbi=@|UxM _yS뵵5pG%VyND'\tf.<3}N}DY2U5#Б=)>#, <kO?;D)MNNi̸LTsyuަCpLe¼A'FJ MQ߯әRRPde~wwqIpA)קK>@|1k)#gb>YV`BM.wScM>| 6mVEAYPmeœGg:+psWUiVkg 4}i5sU=1lnʇೀzmtWM@>&p{Qur*^I&-|Ov=Ħ?3U7nH`_"0WHd!DȞQY~eY;+ (&| =k T@@|g:+! 1zV&{ӚcgNsƇFs<3PCi9Sԑ=)!3g6COC@Y{r3G/?}!Rc ns‡ =X#@gkO ۹ uV#@8D^}^#B0z=L70"4p=6+5;h F3@BϚxZS#DyC_yٻ `30O_v?ȁ T| Y{\(kՒqmh+{hn@rKba|̦4oi?OԊ"5+Z0ZYDE>5Y#ո;ƞ@ >0BM֜e 9md/cUͿliF,$ Az$.5w4=M' ͜Z8R7lJp~D 6Јyڷbnzl -d^cs׿Q/y]UO:7tK bC3P# ߳Z_3yR<7m;B7uj.;Uͻ* }okKhw~QufGr-nnj)I[:ʓ/SȚdtDCbdO:(z_k4P*kbt󯯮H>?O 0 瀵.6kq,:뜾B}Z8Wa0h~vE0Vvk} ^&UKe?0H'vr u穦|T,O΢6n$mc+J?߯}t67n5ٸݴc]&+MMϏxw}t׶jv]5-ر*1-Z٧~맻^FQv?VV!+c&?puSqMnyj+7LdTֶ&g af^^1>s-)t`nt>5u:Sn=uDw]ɧ@۶UӍlp=6 ɨl2)9m?7tjk ާHk/NFǶɗi?Њ >PDa9[oUbKE&oV~}ɜ~ gDf^:6:`E!1{S?t|8YӆqGݥ]̮byfz4tƏ+lMV9Ի8̘vˏ?۝W.ޱ5yJ-T90]7MbGmc%p}؝_"`b7hJ_{6nAȸAv:ql?I)s>XQeC">,eݛHn"uٿc{o~p?a8Ʊ>5]0M@wp 穦W]ɉʤ@6Kv'Ɩ-w XwcKJ%+3q?w*;p!EU'ϫ-K'uQ/$]rֿ`a_Vޝ_2mLO+0sl]#;ӏLl C=x" Pm6ǘvo8,;骰"\Ks@|E-M\琞j% pD~il&\YpQ=i@=nj㼪 MDvò_3bnC|Z1~HF3# UM朐'>'>&DD}7.w6rywh s@easFEeZ_u-[u7m빘;3UoQ63d|7_Reh 2ߩ1nj`# A||լ_xVkw>߃>YJjA YÄނU]ӺyMGȂ7'@hb¦ˠ'~u8O-֘ :[gBEdn· ɯqѬ|nx 7fwLQG/XWjkAz)Q#B@u_ix uy]/^?~M-O5d(,Li4d!aښ| .FZ -0՞Wѭ6%± ѯӝ:}T@ۇ_:[=-AOrdUtDV4v\vǵeʫހCQSY}1^tW&4o?~祥YwUXs:PB7Yg %e]Ou@Sw &g/)y]y]{7\{m^rfZ^dm)B-msָbgYӡ{|K Vڿ U}tFvNHa2i~M,MߺIw]'Э}橴k83 [duVzsk!X|@=_<7=' ^x6@{mhsvȵPG% 83:|7á:\o >X|v޺[p#sM<)ʺ=^9RMFJ^(dWItZrSG}vBAmF^m^W7ɶ)6i> )NoNЕN!mZjC5nk&NCXnٰ]{S^痲0z=V]7%Z],qVPt~\q5zƟk|l8>՞g3`.P:z-?֟U>wK0ʳɾJ*c^W>˦g\Ɉ<5 " e^K}8d{HI\|-鱑V糺{PGOeY5 /&SwѽN>u7u3)m$?m&wm"P)I$-zb柙nv}ȷ.}\\+tҽZ^|Y~r'<g7:D-s<}*L761L_sE'LzZ^:^E;Oi|DuK2r%I;v (S?;^{$GcfMolޓ%O]p;649.9q({>:N9]bj[wf𡧿N寛x9y5_Q?VV\[`v#~^YQ|t&BKiJ,trw~yW]/ mȡx+?~,k\Vb=]ϲOF UкO~+~ғQDn| vcxgzF=y䟰Tz?>$a[By~aNvO*] D}?Okmc. w8>՞֞$̗^.jo. mvdO %w'[@ȲZϓ 0|xXWjZNo9f`b[rctB+zIjI'lze^pIK5{{5颐t;Yo)\?u[;gXwzQ]'7:O,)o.^]9nxݵ87>~fx՞.Xȝ3It_AcY=K -/\l̀(}^+8AnRI I`o.;?Lmds;, {dlMULs6P(AV~xٝO+\S^؛XZ߳~/WT0be[nNs^e8 G6&\zw.c3m_wG g,.^jOk)B|ϦOn.qׄB?;M2|+z>w,BgS[-_f{M I`lU9$uf#ycf{6[֌ 9tϙyQ I*Eo+ Phfe|ZB}w|A=!US M?/އN!lJn[[B{5%k^Z璽?]W mik/BD͚ L̇(4Ķ_~³wmbk݊Uy2JR*O?褂yltئ$1w}~b]ۑpނ:* ? փKr @o@ i[݆ڸmN>t;:lqkbi?w^fm?ן?v #uy/zk"'T%}=E͍[̿Νnv.s ҁy 㬒*On$i{l ֌Ɉ&/4=xF_17`⤠YjrLCgJ+P.huoȜ ~}ݼcwOub7Z6:cW{vF)}y~>#uu"MǿWN+0b;҅>fS'B0ߧ5koqn̂VsNhyX, A&wx'-$;V[3&#HH Og+k/=am-;V[HߛT_b.yrú=nSS;0+_Xg3 ih⠥lqPS!._q ,Dok+:Np|S4.9nwn5sC~/DUܒ@m«aZ*ȇp07pM5Z[;5x(9s3kBtQei]A @҉Ya5GXt;Ƅ >MKar@iV؁/>Y˪n =[⃩ &2I圵6q|jt/]k9^7g!ӯ0d}Z*a ?*BZ,?lxkPu+OYHҖIfEG1سm]AaH(}Sk*/1i & Y]U]=Uba[JjfT9Rۧ%VzlN?RBXU:K39i02{T?=r٧;_| /(n{b{.Ua(-Yk~ HG*P lpM0I'~G`{|vrȪF~j !T[ݞT=,ovukf0r~kzgz]jr"ܣE<$-n2nhw CtBكLګX[UHCEC I`VLxȅ.~b1m]Xq/s/<|醟C7͊<1ǵ]kx1s> ׏Ecs_5q;=Im>mnM-F{Ș'Kon^95`jű:a+pn;8gY ZYq.̵{ibiwj1kS v.|Ѫ` XP6z9PW|ݍd=sǷ>[. ܃՞UZbsD[|LiAK{bvc;/Q J'Vr΄Wf܍O){zQz>4XL½|Y=2E" \i2᷋?<`p}h;ZhG.M4r]vp?B>Fya*|TuaYiq=vg#5o?±'~/ST;4'Vzs! @ o㴜.~ٱ4`~N2{ݺsaܷWdFӪ3C;ȡ4c-gx ]7jOοǮhivU6)[UX,g(X8 Nr [2}f4ݐ EaOk^sϪ㡶⢛iS #H+Qpnߟv.n1.>՞[ߟ^P(c['6b+gXʽ4I6ou{BWt|([.}Ԕ̪biqknc XY}$߽jOUw޷B'–V{aoXݽiT ,*PD~Arzs- @;G%cK5 dWVYW #/tmy[jOG6ZVnUaǞ/>E9މM=+]7%Xeb Iu]/x8L,z ne/:LB:oԥ50TU8+7g~Η58l?WG3Qo]JS7VCKKT\Vww琖OeO7J 6C_f׊Q3}gy"|9S5&}34wW4k"369\ԗ] P0F&9wSj,9J<+쩴-@~Ė@ݖVJclC7 +•ui?Tt2.]hHiXՕ/BsyUϵt7?3J=qAqkq갏yuVF UV{v8%W?++3agoHb7cL4pj2OƲ7@|[_T^hs2r&CjvqG/uNgyX!s 5\:jߧgn3)LjN+B7V~B"T%Vm%+ &'yJ6qw?+]R 0>4:فV/.l('OMN.lDnm#D?T>.񾄻(]y glcgTjgD:jpޯ)3t=Uy۽d7sJ{s͋4k״x O,e Sn$>OYsMZQq Mk7C=^GT,UZw= ڸO[`<'Ml# ҏxps|φ{Ftc;?ՠhZ9VsMWkFM?zsҌd& ̫)P's- \K+iۺ揣yx{p;'ث\.K)?}C_BC>4w{3…cj]l*c"hC*⮞d&`Fqaǭd{yλME o,5U`tڻK_ɇJtju l /He[n Ӫ7|~R+ {_K5h~Gj%by)/v7ڋeu$<>P{K߲~v2s/q 7vXZL9(Bۭڟzn8j_ocNUa֘0_\諿x UyhsȎ`n,e&VvM?zp:nj3$`O>o9wzT&|uy7 (O[f0Qz4A=nd J8[I^y6]At;~Uc}ҽ~W6prfh;ݐ0=K.i/;vm@WVWXn2<[ dr^cnl1jFzkg>v|՝X1$ -\7f&9_j=/0~_`;V\2A9jC^>=vsjOTu4_UZΏI;4 ͙gtՅh٘3}ֱ/ppTk4wS,kP}?stlrZ9wa.byu@JkT_wGQ&Zqm5 (جe}Qw@yu޺[+ #' ;8-V~lsyftdɮ=~H0Q;y}vT2qU{Im.6q0tI>)xv:GAqn{ԥ5~}^Vf>mݼnZouƍ d]rʧ}яn͊|sGq߫>m#k_4yօ}9{hd3&wۯ,qj;= {{"LW>`Կ {{?>=l}QA.R#Lh]kdgtعܶmuRlB,;g?7<U;םɳV_D?V`ȈlwU *o]!W`җ&ϓ=# D9569DՍOx$k>8s~|t;zا+Us WQɿc ʗ~\-5滙ݩ{ڝ_km4}Uhs[eǛ^_EN7ܩ-G+I=*_Y*wgW8՞Wv9}=j|uOؔI孜'D*:pq=u:x>')z6g|2i+£9Mz~4}b>klhol,Vu9 qNwGH!|g睟jW-juC}rZrsa_Q_SH~ FC7=crz_VUyٻPji 1ʽ\W{fS٪8WDy6(=7.F'ޱVDؔjng^[];]il$]]y=#_z;qɴ-?T{{3z2g㵭([wl|\zlsUK'ɚ``%O՞uQOe ׬FV<; ox]K̞w|U~=;haMU7_% UYݪ(*Ưޞ^+*?_U߫LCӦPZYL uw}O<}0zb8R[Ƿs8՞ݪJfsZ>O Ut^|~ţu5yixPW>;ؐrwMꄼriGlϓx(a5-BvEKɛWZoz.*pc:]I륕Wlij> ҶɃ n%v7/I"=arm7n"_DK/q޺sW= çq_]se\;"|Hʆ}tRU~o_[uLX(T^%" k= zZYޘV̂߰{_S鞼yyh[~ES?_ ߊ?}3'l%v'J'>v}1ϮE'sJǖMjlx1]Qjפyw|/g>[3n|:Eqݟ/t_ ].v Z1l~89ᯛu?|:MV*Fb?A&n4՞U=(v讀lr2?G'~O¬ƿ*Z놞oͩL|s~@is|^eU+5}DvZ=^.{.,<2/v^[wVpb{[p}6۪c/eٳxU{X(X7P0;` -|t, P͚ e]dOUsS۟1@g? PF2?{ {;xCU־}@zb?%Jsn|y0\n-P*r~.[ÝS~VW{ *L=k b'n&•^nǑxJWt:([j& RDOJuP|]4Ӿ&ltVE<%?Yгg(;镋=:^WՄm7_'M" Qz˞8 S7oN|wӄ^'ҽ}Cn>? "VzTyW WWt3%s!}{`s+xY#9 om~<1;j}vL{[/b*nSX+Wka훶;Ђ+y~ߋ~ߏsb:ffvE O-T7®k_M(4Lm xoځΘ+|z^=xx{~/շ ʖy-z=?p qmnӕI9+o\+=T~X PȲN CWyͽv#% -!$jy[:8')t.1丆CMhzs-EJ wmjNw#F=TNuiSfgos˱چTGlynnsph">aW*RdiqECT!XW>Wu̼.Lńz8VXV p'Nz^smYZIxbi'BiLd-n1rW3V۲Yynn0s{ Sp} |fZw P-b _$q" ??> [ ܿgM-ͺKcJWRwkOk,fzw_eo Uܤ_X q{=N>D ±?ZEѭn<'\h!2g ;D{G@a;5lÆrNў5j3-~NSsmO~fAWV {auWLNu7lZl1AC_eJW a#K++JomXw p#j;0&p>XFx>*R{o  f`;NPIWv ;QH1{qIzgGeǹu?V]^w@vL[_Jie{/W:nn0k|?3{V'| "U&3~ T_.߄6A"?8gZII[p6"L؟5^ ]G0z{Yƺ]vbljmW*vbrjCgdYFՃ,)V6uh({@`YYօԺe?xڠWvBK.*1 JyUֳV~WBy$`Бe=uY* &2 ͚d5[ZIcB._mw{o.}l3)|kǡ s*)<aBXmZTE4hxt}&kU @6:0 :UOEqX ΛƲ{,6+; j߫n> a>oŎE9I'B=+UFaM$I^*>߯)DMngtCIҡV9wYZ<˜TvB|PB |6{6?Pp\ÍǢf㱴Q]KS 5e/zu" D(椉<{G7KZoeU:G tV(>[0#=/<{c洉̓ q?6\3P94r T,V|ZKST!)lqPy<69YӠp.2{(k^t_k:mZ.EGgͤաƼjfL>bt#jP筌3q*]$GkbΕ".sZjźI\=6j Uq#.[798q?:,ڕw ;}>tHsI0g0j;?0$IP(Nsҁyj0 T{zuIɫi5聇!Bq|B2iɷZX3}X?e 3~_}W jjK ]BS!x.i PaohZvUԔaTV [\@ED ׯ|۪3|n6glZ n>ICбֽi;\B PҎ.6kpB2ʃ +> aF". 1P X:ޒɹniɝC8WpA>**^,~Bo$Cس3M՞X՞խ)4maVR})Wed2b3)Q\[)8~,ԠR!$a.nZ1^C6 Ą@A]Z*D'Cu%1tr#1#c*P\#vux7nĿsӱ@.6ݑz\X$ow7q%lKd%i}U}x|EChiF)UBDh\_2Vۋu3z=w\rb{t]S;0u_bdaN!{g _ZJg:1C ;UsUK [k|Mvddۥ򡒹&d5'rv6Iӡ*X’e\4E44tYͤh$XDuȷ'n6;-eu)0=bFvnǮ|B!V]x.+Zu?Uؚ>Ўʸb0_/MMP4UkqxZ8Ȯŗ`!WK)HɚFaqѸzXlgqۂ{?~Z/]N^& X3٧~f':ZMP18 TpMQɃc < r̓dK۝CWzI’$aGuB94DE77r՞՟ .&m|#w)YUwd t V柙n P=(vggTik3Ua2CATL/#yEص[^*#G/!& o*PPQ5: ڷ5\v?ج^r# N ̍:.6k=~ 7WqʍudGs ~Mr,ۗ<3<5\sRz>xJ5eǪ({+?}Ƹss\ַu!,;` ќzo*VgR!Ŋݸ![{\W~3FӪDϾ!;k_yٻ T&2O*'ګ}ђާ֌?fէȚY& LB +]X^p|@:0D@|Ęו~秹qk% R,>lSNh]ڦ:g'{kYw}, Gi+>c.<;}N@Y]ҬQQϔ((ZӷZ wtPT_rIlK|B$.biov[V~g>w>ȞfMƜPB*fBl-5RecO" bZ>sDt>K24>5Q 4uӟfϑ;&~*֞LHjJUq=SG1ourbim&g9 S9,%KM-X. `&|ZH* xMΩcbK4gtad>w_m4gcj]lN2K$u!7|2I,;][TZ#BX]{KkNV}iMf4FHmGXj:p4DsL, nzޝ;³w+ZUѼׄ>2ı!e﨤k6ir$Y*4QShlì;m#–9\=&踅A 2Xlkh(!p~*Fsw}\@guϘ#kʝZ`\c#oadj1<ȧJպNSISQf%ʠ0Pρ܃M8mjbvK6ySqQ5:Xw,tF.&\x 6 e۳vh`IYrإjX g6~r 0%p$*Of{ل ~ոٛSpF|!T~T֔fi7lNIygN핇*PB!1UG &9;c4;`.6k6՞tZZRJ`3볎46Jlbi%*I PW-ltTKI?SI-*s|dPuhΏY76_96߰Tܚ1S o[h(%#T}.{kuFD|J5g^ӝÛt ҮN+'.YBbEAV[K@OT]-9*_: 7(_G j!g󣾝k*4'.P ETBз]-%7iоs>wa{7vgagO &2O*T=r&i*8DcP)tsXM (aVs2 _uEPaENρOL] \?{B[25(6& +p~ANΪ;"snNÅ B-p C Dw[a!5{~W_.՞\itv Db|'!%ȕպ-?o 2]\i}(1Zf ?"4 irLi9p7FҮ+DAU躜U,ÝO`wc}-.0 khn(ey䱺Zvʱ{4ZYVG C=jg+C^Ҭ0UawwPщ|5t>P zuU#%_7^W)G/z&wl wnhSu5Xjk*]"VfxP)*C<͵WzLӪnhoxBٴQŷR]+;na|KP FA/ luwn:r]nzlڝ~G"jxEhpύ3ls7 M7Ju7覎,/ZK1k̜Wƅ6`ͣWsN#J3 '^ҕKz+N?g :RoPU&݇5L~8{6BkoҪZZ9wla{}T >{TYm/$SZ⁢U^rsc),$5>$=to>sMR6CvoguEK>uKIvKxDp:a=>23Mq\ Ӎ&K'C*ڬ~\Zճ;OƲuᙢs`玽w.v sk f~qHhn[Ԑ&{=q6`b e|[v>ӛto?v_[4̏hw;{M׺wB -toO=/.rba|\ѐP۔ ;N;cfY j:Iƕʷ rFHSa/D Oy],=Juvha ȷY"3rAkvZG z= R(88TxvUّ=) Pi_$9ns U;KhPh`'^'Ʃܭw՞(j}teOڳ~fA?PX5̤>WiZ [I+ oBYW>w.Vl- "=s7rgh掕@ $x$ ҷ|ubhE ;oVQxmMÍ[1GS901_ mA`ia>?nvnԴӧSzpc=yɇc(vq5]QN:F2 ?ޞ_q>fޙUa]Q_.S홊O섻-V bz+-7ɯV;ns|`U&۳3Wl`mݭ;6_T^|uMA{]5_U$~\@|ŭ;mOh㋫zЇd(z-n-D5Z \Vk?'kn|M@;kV'WY=!MN6TXz܉1J8vp ba&uPBo<,t}zgNBϮ T-QeTnB{tr&G"ܧ/<{W.+жUϭ5! ÿq # n.PN迷eZk&n**VxSꈪXҮɚcB&JC7@\ZU+.$"۟0y. ɥl`WFbiy*ӽIκ!bq}܅_N+8.a>KJ@VͫY ׿Es8ͼ.2bǘnh yZԳ Xظ ᑛ:&Mv-߈¯f6&AW@4MVSaa T?WWjJ~\n!x~BbmS3MFO28⮏ӄ;mw8a೤|b-w7B~WHWASM4tI'scZBm$@M?͍b_E;@P<,&Rx[S}f zqA_H&C5B0B~?䅥GT5gJ}Z~eZvܩvd++?V8KȾy/g>Sg1QGۮuֽZ+ =w vacbN`*I瑉0o:+:OIhfMTG⌛îd.jy=l+X:i]HB؟?E_?>L}ښpXug/-o_.ߢ} -6f ?[SXwXawA| 2gL )k̻^'{}[s՞]aﱳT6~MWB=8L7T(ֲi< {r66|G6VR"㻚ce,?S.]+]TlfMO) ]GőwyXj1 P/Pi.dq]>KDgvgS ~oP^{+=SKQ?7^=o?J3tzM>fl4ϖn?+?= hlu4~ŠZhw:&5Mem芞/͞~?c@\= Iv"c?vV*£|^'a7os5zBδJ&^Uw]u ]&/f1.<&앇~*8EPS;0~ƈ& GxR>I ]u(o'mjޟvP^ d~j^᜔: ֺƆvgEY.td?C߄>{z1JU)wy=H(t?)e Rkr{~}B ̇Y,$*|Lz]- ?+_$ܛ;L~ ӫݻ9YY q&T1pLιZt>GnYWszf:+7jۙll̺f(tfv]ɪB)~WEi5~RIrl/Of *5<*$8Uj_҈tOuF>TAڳN#]E?'z*K'f}E '<^@8-u$_|0@n.ˤՎt(F,гxfeɺ}rlB?.6xF9Qtȷ )b+zZ۽&|XKt\Zuz#d濽vb/.]+[쮧\3}۽cNɫsP,m/|tRKD٪OV> P_֛tXI'Nf'/ia}K|uIV|z~4L'xUK4\d r*ֺl$W˹Wc[ɺ'x2=?cyut,U~sX|EKUqbV{y9Bjj3gi5ۜqK5K}ɝ_=l'z yUM vB/s;Aa$^k̻ bv,$c]ҫ(ل!Uo>RƄ 4v<Z8_Zz*,xܻtX0O(e=5ga}ZaUdNɎ 0͛~׺]aݶ.kMm@8_je?\YSiZ^Xޡ}칺%s`uORlq=r?)ٕEܷ=̪~Vgh|͵s*|3v_KۛJ+y ̤yPQBjϏs}֌9YVN?/ ~*ϵ_5]7~=.d}G7oMSl.vok`Q=5c2죥4ux) )U7܁PJ~~<|E_Xcii-;>r]C>=T//{pI_=މ\+{1]ϯ|s8=üf{c!q6~zqo\z_.B=7KpPϗ~@N̩}!sgfg>4uOM Cnft,=Ҵj;]=H,ޛ7Y%3uV:t!7Ѳbu{!gMoT\x7i5Gafh5ө V&^ 8 ~b+ )[K"yl6|Ls3-}뗖9MrtyzeM O(1`êܭNe;+Wt7ܐ}EÝЈEptgψ4?ƿ?fzW'ϫrhWn\+vgkq篶ݽC ?g$ t@KcV-iAhö7Z_ֽǞ%h;[θ1*Xk'XYiz`t]SWW\IʌأCfB ?}ǛW̶a.F#үȻSDŽ",+kQWwp J{9\S/U*ό':߯+m-x +j7]:/e*NgvF}M;rI횛)QE_sQMQEܮ7jy\)7*z w͏~np |OWt<ݯ';ɸy>קL0]'~+}s^$t4j]vqQ k%F|znM亮WnV:4.4q?}lf%cccnlYqcJo#^H ?'\(WY&֢Ļj~jZ`^Q<4Z7wU fR3]49C(˄V7ձ5Ym)_)y> h6J dgWJzI_Ɩ~>uw>o>/YQWv=|XjۥF2oX}o|hlvpȑlJu}y,U=&m7񎮘dzF?P.>Br6z Bzz@ϻ A7~L X(ߋrcm./O&LKxo?FKIM~]IT,ʊY0 geu5bxjZZvY~dm !nD[~;PdQY':{j~5t3دv`f*p4J(D) nmn@o.۫vozXߚwJQgYo;lY*a܀/>[ϫl?yPwΫTf|o.v m.\"SC'u{_U.؆S}mWzw9 ߷>+cZ.,ў٣GY 3jq5M/%.ZRY[ޢB>=iSQ#n\n`n{CeC?E2ҴŵJZn2%߃.ʒE9I-nj{e2mw|Mvs_^ӆq];6?"ς?>g"ryHlos֗:RQ]-nwPOiSaOQaskChwͲ,ӛ.,Mgum -oK: 'Ufe+s޵ҍə?/󶅁_/+K\tФu[ZE C$cy ==c5[7Xc޽bVfW\X;׿`ΝnY\ʘąg{]H[z൝( wL'ɋpo==x+sT7'M.ʜt6lf+D<+.i*Y|Uꐯz-4Tzs~~Ce,|ٿX^ N/W՚i־04X)`~۸e(G.#VEOcA׸ "츀 ⳋ"U|z%Z ވ_-W̮Ģ'UޱwlO yU? rX/%VهhU.b܎N,etYf{ژh Ѹ'{G%Qs/=@17ڟ_n~ѼW 7>謟YH+ZzWf՝ևSu*~_ώaܾۏE>`dW}RuC~1P8v#V@e7=W T~x)u2זȺgdMm㏧`*?o{('=KPQs3jq^UA:?t[\mWU=4il|lM&L{-_YjFo{@;Ҷ}~M.+'U|zDgU*>I#n|Buٟh Az,,{>nBrkpq s=}=o\]8\9M=&U\pO,\wP.T{Aܕsy۳ѼsDZWU'q6;ŝRJ;>PU겙Dڨֱd{1X2֞X菱Ⱥ7v+ӣsΘVP@ >EvKjwEVOq)a 2#Y~Sσ?4Zo>ĺsc0Tsltu|"xehF](2ZHnbV[I4v6KJjQd{9ǽ6.<7_Yܫ|. +53/j;ФgOYuY!5ZXq#~֘*@OϷ^[qY`O"g݄,-o(VIpc!U睟 7C}v{B[C= ۥϹycϜ* ]~Lsݬ02JtPk>q5.xs}=գygN {-/7ASؙ/L0[-71v`VC=rs}Tؙ+k515#_ [{~SwE{Ay+t[BWts&ޖn,0U&ֶQBפC{;%w$/ #ACKhiz8؇柚R5F %o3_3߱s-*ĵ;phnS E~z}yj7j8°A5i CWn7B9vꝦps~XCOAY"~!fyut>s支j8_NCg`ߊ;,99 n`ǹR> PeFاyO\o1s{[C6-?C[տ;<>w'vj}H7R`).z.oxn ,UI@Y~гg3dwqu[:|>sUu":N'rا =h+d6u0[oͧ7=!Zܶ9NOr\O=n[zMXtn놟UdOf_6w/fKg&WZR~:[͇,'ܽC!tJڕO`ŦwV‡ A ̙fͽyD=ysp 67nЃ# !k٬kO1|qmsb+Yسv=R{p?^PgM6g~^?.Bs~_LE{q1'?6VbDƂŐ'o?4d=V? &>3Jag_\HfuqMhtޙsn햄phz'7m~͌kIemW:uEG!Hgl ]b*%f[Ō?"a)tY(>JB)7C"I+hǸwo)83 9rEK&2O g|XGX4d~쵡1Ma!K]U=Z.H5*jd|^#8㚕TSo^vac" V+ypyD Q00鑅EҚqXO?I n,r=to4X ! ;o)3'gqEЬ=CB4Y OJCO}+͆/!kh%=dH#m„dIQ oQ:!|֜thY\胆ct?C^~먛3wvEpxɺ*zBWF M܅禇 vMz\,ƿ}U`F͐{S7_q= m}nj}zfg8OowTw(5^Kޗp&f[JճVsd짆ʽ|H}3 ec?Z9\NW|@bf4OWH|U& pOrlx"gHRz>tӉh?#q#45v= xgAį<5( =(BjQٜcP&k+Af9_$sDtU֍\迴ro9V;]AS'/ 6q?:Z" g|gjO3G}Ӂ 8AoUW VZaaBrꀱ1 !0 6_f>tߏnzzT@CYGoGn.K;5ڝA>_ Ov|7~;Qt1mwEȮ֍ao?vstP~{lY!Hk]lN<) D֞$?pzywqBh?BB4Očd2/VD 4 J-vmNӪiwv2}z6Y^vuiCY`~A&pWd~A\8L,ۇ ׫?^?G[kh-ZOdCwfHz>VsUͻBB|jp=VYi˯>?gLSO1o~2&004(?<|^v ڿO&qF*@e*`7^TyWGydz4]/uw-0s#5lޣ:gv#E(fiqYBn*v$[UVMap:QGdCdn| T!IWK|ClnkL>Ӥ~ **h(]M+n3x'Cךq|&ݧCrs{wzH?V,66kEhAfMƜk}гhs7'}<}:1ISm2h;Uyf Z=&գaݮΉ!R,huwybն.HCW]/ @ɪWi6+fXV,nM2>=1=&焂"n0|{n*M=bzob,Mv_k?sk*ڃr5^5]CuL3UW9'Pvld6ݲ3 K>S#`5Gp"Xnil/jguVx*1BpfN-LW' cf)k2/aӁc%Ja Dx)E~5}&ϻ Ƴ1 `Ղ!['ʬLc}':Z2~ȞDp{<ʙ*]{ąN4vhMd>[Y\1 |^@O٬UraVκqJ\Jعku11* > 3 bt/<;= QlIY? A\W&7>PZlk<:YJ>m+ib\G;g3+R]Mq=>{a%:#PϴZY@SÝ*/(Gi=!-EuU޹WI%^ߢgElE=Ir^79.rjꨴN(L6WQck]g\x=.z$ܗ٭E2?,ݏ4ܧ93/Oz,`>j8ybWJd /*aȉnKݰlQ" ꤛH'lA޵~:[ yu9SuVsoWW`nm[pjj\lo_ǣ6qkܸ3i0o=~WWڱtUuI˅xUq?rR]ZW|7ɮm \nԕ ݫDTq荷؟qhf 4qת;8~^೽{A#A]+t{a-⢳q?R6xo>9gܕ`ttC0&SnqO:i.sWM'k+?V :bZ-46 鞠>h[v?s lkꮧib#z6dbb\Z,Vlh7-1}.>߼1";l,79 _ͅ/>֟_P{_M;?us~ Q ZyȐ謀|_Yps>=4X}JSDSi@z3&jeUB-7ѸV?/*r&R &ozLbm?OQɄauy-V$q[ᖟM@[Nu\w@w|vs1ˑl话{go1/ckc ] 6(g +czW:؆s|+fyhl\yY>}s 6^ Pծ/4Y `bMr]GhB2nޣ^"a$|gÐd瀽QUtu<)`53M_\fUcJ%@9W4kdo3-(# ÛDY+5[g:+sY-_F&?hi ϡj]l֪^󓦱j|5`&$F##Z)ܨvTBK[m>ozMH[c?.VHnjjjIH{3dCԗi۟xl!b1S1k Z>t\Új!qh?Pwջ4yN5{FpiTEH8'`$@,M%.VŽPqؠ$+ )PujҪiR%Nob/<~^}{?V/>(茔746T}]*C/Sz6 n2.tElnb'sTեKϨWnWeZJy4h뢭n;3bvwQ08τ9;vѽ._y!G"Йb;S_oT'seS~zqJi}vI|Mz_ڍ2Ż֛Z?&4gο:Wb󧫓sP~o9kϱlXդlu#8v26FQRN4屟S zcLr.&QNC"F|s#[vRO.)۟9^wp"vz㫱X )+G`JvZOƦET~S BxQʅahIENDB`sqlfluff-2.3.5/images/sqlfluff-lrg-border.png000066400000000000000000000644161451700765000212150ustar00rootroot00000000000000PNG  IHDR,cPLTEMMM {{{qqqYYYdddgggAAAPPP999EEE555!!!kkkSSS~~~]]]tttVVV```==≠III222$$$Ønnn,,,'''///山鞞)))xxxǬvvv◗kV4'tRNSDUgުw3M!)9 ]>~p/cTfIDATxOP#(=Wn$i!Pdd 6paȐQElѲs]祯_>-l͏^iouRWmmw~rTv^Wٹqͪ`?itU+<U`<૾TK5 7_IjyZVfb89o%E#Eak[>y"JCB[0&֖8;!%\t4JL=>B uaVW .QDjf/-2Gքjp*5I;B,Lkc3Zga'co2!V4Ůo%f4˚-Z'6V]h\K:ML_焩/c6,y=} Uwzuv ;3Ψ[oiKfa8D&vSLa Vzҋ(ˎ! & -%pŢsdXwGӒd#7A:]F# :1&ՀE\Ϡ<2c[1(0k 7k>J*V)2L~ S 5HezEVH?v'b Rmj}g֘"H?8FDT9s%$)Sp!)&n <[c])8C[$YX5N,y6HdNIY67N'^]XǮ@V:oV)H9Cdi#$IFSb$?I>]-rӼQJ` Ip֒rpm&RmpXT9Ǯ=`*vNK' < #?)3O쯄3II&VXvIlݣ]`KY<[KP9~' *!m pr,{/<$9.4^b{lcGVŕ e}i/38>7 +yx,%v+5km*qfp"*i7ޏ)wtQ/P}‹}`Z"43{,)h T3}l}_p6uRzYbz%=Zdž? S>}jLx/0d`b_\=:GelfE![b_Ĩnؗ~=[^ ]]fp@F6Y*qΦf'H-<+D%}Y;$X'9h@`KՊMN`UG4k=w$6`i7Crd=U݈e8^-k0Bٚ֌ +wj% zPHF1[ҍkO qQk) : a:bY?6ה{<z= mزv\ Zƾ'Lu8qPW[pR#{FW]0w ՆH.oa[6sZClWma*iZ?wj(Rk{TgrwP+w5Ơl׮;tK/Ș&ho_&LNZsoUJ#ٹ@r<:I+ǭ'[ߠ#^;r| wS:_Z>NruhUkؗj0Br_ wCAq; )8Ԟ%[z|WU{-tU;Ʀxr?} Hhv҈j]/غ%]7@]CZVXYG`iSNOr]!JQT-k4i*3O^^GI75|<m}<5O?OaBfԴ4-?O֘'tرۀbKd0K;s, d\F=z3$w,e>54J: ' c^07~ܘݴq&+ mH1$P \dxQ|PGHe4}ֳԤ56d``l<62@[I8浥me0tchhp8fFZn,\:xf{##38ǯԬnHIλ0% HJqxYoReH R.Ֆ@|B?~bke?k烃ccO].ԅlbON Mt*#(u )5l"n<̴/ ږe5̶9ma ύޞ_;iHH%@!@CEZB@gGlFF) :pԐN#C/"#@ȒU`V/hB!W TAZP@a'@XJ>d͚x_'hC`&#JRe s% ̢6K﭅g 3\eSҏ$NmDCrױ7G(H55XowPkZpYyGRh@ZmKn?#jդ^U>4Lf; s2Z #t#f^-٫i쇶]&`nmW[FkdF7RA[V?6CڑVÏؾCCk ``汛h%ё-s7 j- E;]vajQBV\^HڠBƱh9)Ws"1/R.Ճ'vXAAv{V2,\u_JoQeQ&6KId '?bo~? " ͷƫܩj/[8-"V; ^p(YY"CuwK4r,p,8C9EnJ="m(CIҼ4͠ Glewm hN:`[ȧj֒mT#uC%cٷ6(㏕P}ͽ36^vB7 H ,!,c6nH\B_Kǂ<{?S~sJ|ٶ,߼'S+E_M6rAQywMF&B.W*}@ON±F]Uz2dT $p}>pP%m)Q,'߿cqIO_wG~|.VQ<^oЍ]ĝWWtG"GtQC0w@^y"\]^bϜS(d%j)U& y;m]txi̺&Lho+tk/y(qB.:NpVvQ4iOcV&Z3GЋwC+1R!+TB&r+.f!\")Ed+h#FV5R#F*t һC}(%Qr YQn5Yx*($΅PE1C y:Է(E2*ù35°X!y'TT"_tVY J[ %^&~"qD76TωαEۘ3ȇ4<΅ Мsv )%װkBcwW{ڱ.ۭ;.QR6(JRpjj,Q${ku U궔q-Ĩ d.`hrTnX.=\-%˗t{k;3!ERН'*q@D*teM%%6bFT+#s_4JzLBD>Ԯs%yOo@@WW줫-J^#"oojnA *JHJD*)w||VoG݈TT"\Ⱦ_0hU-b9;0VU}];6U/]E3+gmdʡ{U~\2FGb€+6d1AC6Zk{]4jq\cY<bEKҿЫ!jDSf2Ә>] q?bo+ ,+n#ʑ2jF9еWGxs-Eϕph*Eڲjmڂڳک ՚IQ VSD|>W Ӑfk@$̉bV0#Ғ4X=3%Θ͘`VDPK 64_ pS}S4RM؋ N'C(q`N]Iw F&J)ShϿ.&c@dyEB;Â:Kn|,ƅ)mGX,lk!O9lK\BXhDTC䲰~@OV=Z3ʼv[! ΫPZ=+˗tU)%*B[o`P-lદa-9aKϡi-2Z!\ E^{å ^a$7VF1sHcL31 3sĜkYl][k3H9VdSx1lL32OltzЀefY̝fCnW~/5KA\^۶m躮 Iv"/E髋? j̱dBebyGsĝe_ٝMv4q/v\ |{vsH(Nc6T?RkǼo ws̝kYۿ3}[̬YOsWnH{Jӧ]f->s/f~{N.DWVvLeNaοWDf<~Yf( lF_Э{ɟb4LL*Ol23s9=:pB <1S牃 ;X MKK|c?]a౳TŤBg> 3/o8}%UxN|{peNrlG6wq%"7[Ų:?8#ݮ@Dr 0~ZE,sD~_4PeNG_!Hh4476u$<* H*CN콴^n  wk LaһR?-&k'M[~Acc:د78=V6_FQ`2/f:m@'׹d-Yociݵݺ{=dQ8pw͉sW'u2dd$s8{%!@!D ucp ' l&em,=hڌ4Ç_xw7WzWůԕHrY{ְ'\yH 7,}뉧nů?pwk;5"a=jW9 N4o0z~)$~D+'v\!ukL̄ D-vo7U+mj3xDيWya f{"3ݲ?#_YRHE`߄N~NYƳ,́Vj3Iٲn.H6VF ^ŤG}q᷻sG<&w3օE,SmJ;7d fA$Jw`/n]Dp)0xx4C,N:Qx8D v!5_›py6+EZEx]kcˈ5Ś1Kx$U>onq[J^Dv@ o嬿oNW<,W BCsf]Lk>PE Ty13b]@@n*T݉F3rM&'W?lW(^q!m{ghPcPHUVD#d]g2Fģ #>H\sl]$抰m[פۆ!Eg{lVOfOu(it9it=21).N,aERiXcA* _@"D{zpag LJ>8K͊~Yd#{hv\eBBF}kݓkUCX]`*қZxtœ h}QZ"@^ßYÞX=q1N&Mڦ+#j+&WY+vCi͆Ԋ6W7oN֘9rs<.,?ȅq'{L }kv y ciz]D#4ًG"}wӏ#2; GUmÇ|k/nZ.-xDsg )Eť7>"}onm`0چ7קK \%ӃH#Xzjd~i3OL]lwŏS? sa'LAa#Lm| (/'د0_ۅG|{/3 hBR&b.NێŲ^XÞ89s%OݴX ok*FnS!b1܏:6ŭ;JZjw_q/G+5X_L<8t. ~-XNIIrҞ7MCh>Q Xz̫,Saݜ痨;}e78Ǒ?ZKG|!ZG7.Xф3M ǒk,oBn>vm9Βhz>=MrG,-q꥾x㳙H ,GSaOosb=2< ;3)Y~*M7~j?hBK+42scD$aYְG \F7n`A[AF95^VJSϰFE ".k]ؔ׍ ,~Flk#|sv uJa7 |0"cd홓x΀DL~U t-"K,kW ߥҸfF׫86qZ;#7򂤏B87Lay' ܶ|kHvrmͤ}xASDXE3&nwˍc$xC7,OU$7,a+h<"~$q >ze_N`q|(psbMί7TlUd&ޕ 79GͮӨ;ėZ=HH$Nc\M38˰R]'q|4;Ѩv3"DW:YM`%vDRu?L$:θts>PJWj8lWb}e#?J.Ga{9ܝ*.!N .X`vvj(O~X |ǑIDR-nWLJiVj)X U.I"eqF>@oaq E?-"#QEw?-j2૨0q4yʗk GP@R^?lCTS9ms!BG[X?[Bg;=0n7)΍Z$)N'X~D +' =w2a\ԦO&b[aQAj"*0j\T!唍O_ /ؤ pqy @1h܏~ j(I;Pq!5gC->A4Oz~ xcWJR y"^r"B KUGIX Zg\S B>*NEJ18KDI}j[Ƥ'zLjGF):i}rZ}KuM38KNLY6aNĴljlw${nӢ `߀M_``u=S rrqUϷ7ha, QT!nK 4H XSm)vHvOs`jnꈘBHw -Mq{aʹ.I8|12yCg2Q/7q))vއ:3檙 E#S'v3aԛ}ոw#?.l:KФ C*-jy|-_NZe- 4Hs=@ކ 2H4EP~%&T/ jVxu::j6euhx&.%y BaT3խ5! <'oiJEHxhVX4#wSRE?y> lDsa$0;<_ ṠtjZ-*'2_U{<Ƚeq )0{RyऄNs)V{l\~IgUqPD1"C@R ަTIQZrD©)صEVn7%tZ^봆q*M $'#QMH/ѡ@цHTul+xD_5H 7*Ka+*u2YI):CF-.uvD͘@ ٳmݫL}_8Gv9Z Ni{K¯Ӷ IkJpgUX#@ Aj2U6ȩNr3,EȈXJU'+[ʗ%^։?Sȭq/z"G2'm,7[]v=fcb?o1IUED?a7A#B{ВICQ\@xהa+DlyZQh'c=U@qPu/)|⬲W|5DN<Cl Q%m| K\ux?ZdZ=%oN qŭņu9N9@?Far Zhay1w ?P6ѢHxKGt4n7,J4=;s3 롾I諸[}RŎoXIFB8pb7ik9r-X'bE_:'.j+,a&*bz۸E3G S_L^ML*, $ۺ3SFܬ."+C($4[N]B7q)En)$4\_ܜH53, 4|d"E_ct+D<hݙz[C<+/`C ㄧ>}ⷙo6'߸u]7Lߕ ;gՀr/#fQIRHhU[xu!^Qݩw8J PϽ/dN=Ha?8O[CO8ߑ#bdM͏ic#ŁSa쯽Koy>H&q'"KT̟}X Qo4S-Gm` 8#߷IVٓa?ĵ\VR#Twt/>(1?bRNw_ c:b^aUnԔU\+:eiFϨsXӬ "%"z (8% ۻ%YZo igdNpOdC8od)q,35[67DLP*5ɴŸrq~~Dˆ9D/urKʔ%['CX{?̓&SұϥW=I I ?ȃx{b1D*ִdOBN=ajK Fo84myX@;wqΙB$R(й d MyETrZT q^K&S [qb*8rO:dY <=])xדPuԠ/zcʚ\lҌ ;y@;%:lr#:胗bp()>JрKv7gbR^I8tU%?R Z;$7I̶*Vaw{pTOt H.(k.)M'a?z n/DeIf|hoqs}Jq%q4=a[ora6Q`zk09Vib:,e붢8Tqd a!U/|,7MTvoNTv)v%N=tdg6J1v+MBTQ]dn(΂:e| `j%ҚdA"Ā܊!H)ηJzFQ>{|U"Jlr]ɩ^l+>j{8EU.\n4inr#Y C݈OTe*.L$&H[$Q c J<;~ɛ{w秞zŏ{sh+#pdIB饺Y7oW,deab+k # E]dBR.q> ; i{3AV4ΨjUH!CmaS1rJl2,UV._iz1IU˯׿0} wq+onܺ!?lROOPJr$G'F{2t~Indu0!5;2BMfs٠_PI'i ʹʅ\&S~n:z|3~┮'iuc7viu.&e> /40FY[xU[^3 hMI+ɿ5JŸfXM]&j b1Y'Mj93iri]l5*R#V*_LC/o_?!]]/>'8d/m?L'Vh2\TTeLq*N&E~i՛1*SqyҟdGdm--aԳn!z=t'P ֟/$Th2 l;U?az!@LsdNR=vBB>SFO52'|5*c:˂ڻ!c\*?gje׺56!֑WledʔaZVU)}?Z51<%N&/N3zɸrM3v-5_޴3'Qa=p{<^̨ɀA-7UMPߓF[lAH"A>l 2Mh}#K᭓1B\]NLYo'?ݵZb]FiڻE |j 4g: ttxI!C$'nI:+,i[EOcLg7,}tR аj[@ൊa/<lԸoBquoȌl0:Lkyg{ggpb` m4R8!^&kdܼKc*̾Z#^?N:|z-6ܐ*52a"qa{@S==7I J mǔjn g;4hB;%b:Ea1L#ha'xg:l}8}!*%*hsS)zl n 2fn+фl}l^+0v_5 HPɨVBtA;(T%ӄgb'lZ ct /oyomܐWQv ID\O" b2l+3ТT!+ktN<iB3O 3@xhsVw&ȰX-n3U׊~Ruo ].'"6ajv%;ɀt\ :фvy񍁭_Bb c.xyW 8Y7e. +PGf¾a'K?&#~חڝD{~YyLm~&&X)@( (6Ӛ#;a߭{jnN^_pj^DU,L$(qt@Fro p MZz۶Ol^U׭%]C Jc]V>ySRY:,OubZWA!Q&cJ#\(ti2KQ#XM9|evȔ]_Z{_ $57/!3IAa|&Fdz+ U@wtE7 M[߳!Z'E# gqL( S^2Pא@6Qbe rhjH糤QHL A/yɀ%8u@JmC21 Yt!+\B}o ?AL%+Bd@g2_f]g%g^Q&2Lx)!I2^D0EcƷؐWGUM̀/TC/?XoER (3ɮsBL*@.X, Uf>+q3k;rgB\8&.D#eYk/MM7u?ǹCDoNm$ 8E;BLe'd]!E[# |xbő)B< CL1q@F~^NdILa:{@;8Jf14|oi1V2]ZTƿ2rOsVQ\aCxJ;w#)Y\bL?jzIbV@VPul)7+AQ2/c{E=~X}3|h[[|hH-'ӹH@0o ]DjwAO˞m㫮NN JgLϻS_?VدS [D<ym#yAsb)z#*(FCRq=:b:a`ryrtUA`i~ {h-;M"7tn)M[y.$dvZ]VW8Ԍ{;EZ\hyo0nQ<Sېu=sd&G3@ӳ/̄yZIg>ˈ-ir[~ z[6l4o#74͵:=j=- :9qՅU2%2^b:ha)\ ;J]&yL)ɻf (/w'Q+宓d1_Dxe4 v2ֵS6S|K+3!Ungn s< F`i1QnAVA]cy!|L<G>;a*B(:+*Y6t25̓ɰ$>.DݨM. iC;FÔllL]% 2$i"s>6D!gc#cC5!P2[U2~z֒B)!')o][/Vp֪f_oRT|}:G&(-^j.m=%o`?i\/51IJ&rL*۟ߤe{h+a2}ݚ-^\!L_FpwH[J%n~/!H>P vv銊(ΘZ,{ ၄ZxNh4)R ղGbl|t:@;2$tЦ2nr磮_OL(֙o_{_W إO=F:φnn݃G*;G^;Mf1 )a:ؘM'm2 =/B=]wgYN[JU+G}<{6ɰڡ"rL#e2[>bX\.?nszk2&K᝵2:|K{/Oz93{&mEPQŵ誻] F\-or s71:2n:\K~^hdFX/nd/Yjrrg&+i@dA!ݮkg!Rdv#T ҿ(a_2߭@W`S;>WCDB\87P)tdD$R?DWu]ig.@ی)?8םuݫ |])ba\lVz K2Dy5EfVȱESEJRNT~J5ՀK&&fuvZ=ۆ(a$'g :(łtD0AMPNҚXܬJ]T?~«o#a]ZWN̓[X"8xY,Y"K(8ΈchNDI:OD38(XɌtYȇ uM>&L"ddМJ"^9` E/_fIm$rWѰH{HON m' I(DgLfT5(QXi?Pw9Aju(r2jzC"8,,vs ;(!{*xV-Y[D7 RЍ&(@箨nѰ!m *O)( @M B@W)ݷsYuN1^6'yr5SEb&2,{u4)x:吤VOhf';:ouZ}3Ş):4u#4caZ jxnx(GUȐMlK ٽn & sUÛym< 15=Z|\RF1tu׮1)aŕ/`$]\_ҭmzLD F>I#~6x ^7~K!UiGǐF|Tl p̓(œAU'R+|:>S㐒̞xohe`% un4}Ou@f*Շ6w_w ,R}{t'al7 H3Gs9dnqb١X";7A%f^Dx0ފTjݠCd| 6ئAjZ7$(H !-Y 9B{wZ٩cRlL zQߚ *녙yNsW6,nޯe=_T@--˭N8瞛T̛;>]$W#+>r 꺕VBψ|g55&7&B{PV{%٫/MiNKoT \_xWMoR47 ~%me \B֏ubkw뀯s%ħZqh.*ޱ0yDta@W*ٙˑw@oÆ=q?iS{Ʒz[h"C?fv'WFH49f:b൹!5v]|zaҺ]zh/C4FbبtT{\U(63 ]lzq 7_3)}%{^*8uLDwvd-%pF٘Z t:(ɀh!n 9_6ܵ8P|d% |9\O[B>w{D`5A@ϵ=ۇ{a;fb;d}X 562(l$\o@\jq+q5k d{.4_tS6H3\ڥhMZza$ ]>0%A 0uk a94s}< l{}^)һ%?kek,#\ZūmvTwFKF!tlE!LK)Ā ]n*KOp\l܃hiY+NܾV$-k4zԋDŽp5¤c5a[wuVAgE2b1iRw$ȍ1K޳Hb'6xyY PaӑQ3ӎ$c8vV;V-4Ps!;Ϯ 5AC؈B-xJwR΄9wC՝7laSMrZ}MO)qWŽNLMbgK'4du,9mD6p/D (_ Mc6 @rt&fGUMp ;< EsV麦 p0'yVl\IHD_wP, Wʗ(/ޠ$ek6OW5Uk}Gs<{-N9Sq̼FD7c !jf7#"6>V.7_xς|a 2V2R|yR=ՙ7t>!(=!XXb-$|\ v_|jM+ybdH˺Dj^fS/+ pef_$QG"e33yy/{̞gZ!öNJ>/@ v> pBxr7an')i@a|l<'aifseyTۺܬȇg̼`^s4WP땝9$gxpy tf?~{0spȽ]At$H)k)WSF p@sksӓssci9ߜa2ե)AΙ#Ɓ/[Q'6S9@Oݥ+oˑMi&mf.a6üޭe>0TSWZX̺-Y8؛Sؤ즔M.+Ii+AU$r"H p P`PF`)i2Ƽ4Q9`m%C)I"6CX1pX.ᙀmC=%$iSd"-QMs vfI9NjjJ>)9Q#ld3UC7i՛dDXѶ~C \)" 3Bڀl^TEsH |UN"9bI69u,BM(!*jZ֋TG├e2#ݽ?Yh Y&5m5ڗ봄.ɶVƷ;쩂b+;ͭHQ|ȏ|,֤ug㜲 -$3%Ti1#_wDAeYZZ.@ rWT4JxvlJ`& PC8˅mݓ JCl*;6!Xc2nbLT. Sծ(GU<7oi訉'ԓCKct vt)/Ji[ HJ]kKK1=/k`?Bg x,E,Sg`MQ( an$bP|QɁGV(ز1fITx^(;D>%Z ?w梛Sj:n5ci1'#62M/Ek)P)(d{7mXZ$Ψ/;JF^!GR.ZЬӰmBv@Ϥi(K \QmW5Y5g8CD?􌧺"IuϢiRDѽ1NӸ/ug?LrBD# wŗi>y/3N,JnMl 쟉QhF6a=eh>J2mܣti:f0$"}yh:tgo<|@WM7/܉O^A~4~ O%)ND*Q?w SL]={=(x-N2l #G/܍{nKOM=xV&7t ﺾ>c/ܥO|NuЯw㵏^[_yC8ozg߿/>O ާ+i`tlWko}ӏ| C_K7xo{^vo xfKu駫_ ~_`L4<5IENDB`sqlfluff-2.3.5/images/sqlfluff-lrg-border.svg000066400000000000000000001035661451700765000212300ustar00rootroot00000000000000 image/svg+xml sqlfluff-2.3.5/images/sqlfluff-wide-lrg.png000066400000000000000000004631271451700765000206720ustar00rootroot00000000000000PNG  IHDRTTPLTEMMMMMMMMMMMMMMMMMMMMMLLLLLLNNNMMMKKKNNNMMMLLLMMMMMMMMMMMMMMMLLLMMMOOOMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM%%% ...)))<<v ġ_+7KѧQKcIDATxmOP30A#DHu/ #}Pchec^/ }&s驊sjhZV: Қ5ZV+hI-jSGZxnkyPZƼx_HfqxtFğ_g|Lʼn齽S_/-L~}9v|K1\ RQJ96 ssّxWdZQ NR p)p>|oS;l0DwDn`̼^3u* LO g7ep$Jzhg`?? 4`,. .l(!1咂b2se$MU 1 #.\{#,zfg{g3|lmiXNlؚ!Nɲ`MdHl潆MNyvt(4fd{v_u.w#J433L Df`_C, l3M>W@i G)aY 7-"Ay1bnF:NJT!*bBjjǧJ41lo5!,lE9/ߡqd\G"*db\Z-ϛӮ^߾{Wmr R$ Jq{7Ċ2-Ka趒68KtPGnLi) ’!M/T~eLG H3kH ohS֙^QU],,;;Kvs + 4,)a'tN͟/ v\A+.[50nuZ0mfnV5?$!MqʴwU@,aJmt8z!'M |ugEUq4~^$nMvpo_j$Laٻ((džEA4tlaL@i13i6=AɠQVBiB\ʸ9x߷=\Q4^&wh'~+% 2׺_;U /jWb gAΞcBh`h@mB)-JuGk~TAlT@ E/FS" eٌq?Ƹ/ɣnduMώ sz a3E{tS~O5`N$K(CvHvո"T<HW qΞWc+}uYgiq-8_& jbƬx`P.YR1t_[̪iP ENu13/S[WuAOY$ocM" I5T 8'uvᘫMchI" j|m5p%+\ڧj'*}5'!{DEj41MllVZ}Y QrXŅP. ѵ?hm34Lz~"|]{{ED{nix2BYf#fh6jOL!f;I -41͢i}}i[ "^ 4B* [βSÏ 4 BD5tCEo5B܄)4g ÏvnHK8S5M.{[:i‡;漱ZϭaDZP . i?>KsfQ4""O$DOKě?+AƬL?Ksf ̉ϢCDnE,; "^ 4B|(uSi, `8QStcpC"p;D|80G2ٕI'Q/VNAL!ZqHq[ͦ֞+t+cI.8IG-+r(X Yy"qN4mI8lM=2ŢmpOC{vDOAHS lưq&zi>ٹOOiE\W1p l2,'],Cվ LoVvutlZp:]ě)-xqv=?c4Gxr*"8Nr3:$J:7Ň{bX`?18d" ,Y gQTlO@>̍va'QG]wP$hO!q @q2jxUw/ha(Imq0+<{,ڀ*(ى.Ԓ7GSpw=Y T9e0[ ,~ `i/#R3YҶ _IiHd %6 yHs{4_XB{\fpjAă%S%SU ;ɢJ4[WŨQ×+>k؛84˻ ~woQaeaaaX/iXZfz~ AҬ$3.V޲cO`; l>?^g;?V|J Y}DOCkx1xW6YdER!gNG- =/$bq5 3pV?4yk!SZMgFjw>տV!)EMSLL#Y Bl;uσl9!r]IH[b_wV dvYfU0`ӆ1/kTp/32 &LOg37 }Ax_Q,Z̗Up#60ucLzl<'pymDc;PZxYjQɸYiuXO } rيa2-˗ummo,MU$ӆ&3X&`̖14[?7 zqũOT[yb{VGLP!dz2`L7"Aq.l,IOG^4Ыj[u >bu 7AMi XDTRMR-&]R46IO3A˥26kKeX [` U$9"kY/ $JJ78? ~jz̎^Lv{ 37da0cƂ`J-m]p&F΅Ly!A@0dm擾<3qdBD9!9StrVR;A($y!$Ԋ1m` 4xWDUID%Ab\OAfPjp/ӥS+@!ݨL.o47%}I ?߂(^=BȪxkUQ ^FV Si M6 ʉ4_2ȩ>d,}B{G6qpkˤߍ#3iØgv|B_68Ǎ2}7_`HfZwI:_)< e)[jT 6MV+g ,u*?O &B]|)uV| +>SLߛYQTN8d:l&Az)ᆱj9\@/{]Hv_.9ZԻJ= p2ʣųaWzzsB= 6F#vLpKp5<ѩ=;\xahUz<~; Ȥ k^ݳg7aB`̅79q.=a΁?7 W`̝\\Cu{_5|!k9J)RjTKMA8u*pxght+*V9=$)ȹ%ؔu0)Z,1˶bV'` +V rAUNG1iF`; 0bTip>iT4=uRm+(;XtAmWHɇcjZğWmmOc.0E4ULrî\ۼш77nA_!,x02c4$Pdӷu%C]-p˛˯wXT1 >(JVHëlP yPsUa{NZ cz%0 >WT9\5Aa|Nխ݇H7r0R%m_*xֹ\$GSt?nEF_+_T>eKFLp4l8 ᳠!ʗeej7eOX8@J J zZ<W&`=?@p`W]Qɣ@tfEmfre,̔ltj厅>~Y2B02cMd2w\ϟ)̏Cผ?HC.x^뉪ǜp<:$ G6`&;*_&&OϷ#@G2ůJ±DwxB"C2b\5(xﳳ.SR ]/sRpB4SK ]l!m{&*&wbfvUV0mJ?#2B7几-ib_+;?n-h* { Öu Zy|yN?65cd ּ4xRX,vv,il算VŊ1ch!͐eweYsWm_ߙ)]hVNmW Ȼi}pu,/PQ\UG/t[gnqZb nj_Q.߭ksIBn\ MzCâBPM*d{I@\^)p&%GY2>]*|b,]RdjvedV$bsU#*_n;?^Pϋݠ ;ƕH7W!LkuZxOF=T(sﵧ_ qWvN5_4)%,ٳ 3W Avͭ?ߌ{ǪS] ǘb;hJހR6D^ԕZ5v>$Tlk+&-O U+/GR @2!Pur_SJD8 AF jgT˻ BjvjDKY2L 5mn\-=Bf/025Ob(>퓣ԥQ|^{*uIRdV4Dщmt*pr,o*h'[[~?O B~@C\'^V=쁏CK\AG*^(iֻ' 3S7_yyq̄  C?! 1?Nek|y~=Γ$,-pV]ko3(с;}GVo m-ʜiq ԩc6)ukr?|~C︐VH}+)EexԱ.~4BRB'Lv  50vP”ygO@)Ļ.CNAX{*S)?cMYf)^A,(!G"FJ[_Oerd8l:˃5`#z=<x AmW'\~@`'/PM΁^ܴ k{uvƂ-a2Ϭ=/3ϓg1WZct~Mm$2[OU-PWs͗! !rpR'oog| s#bBmG":޵?_Sebߑ_eꢀj ܎ێo&ܩ^5N>-uG?@P)#7AyLlxZ<=2V|'/ߴ*2Bp='~%~%oBp]~mʗIʨ . j0@u攴 LSǯ6.쯕nj}C DvwKKchƌ/I{ڵ"u4c Bm易sSйG۶u!#:4 xqL]7NꯀQMvm6$n}^-Spksk.et^ {2 򕓯AHP,1ʇ}⎂-NVa\PoK~3R*u=-S7G*\ayD-)"C%p% a䋼bdeJ8Y\{N%+j]|goYW\'ap ̏%tA0 O/@ O޶/OZ$f2 }p-{RU=ľQlmE0ri]$a0c{)CiVbPٸ׷bJzv"3&?B0\@\v]&xPRMϥGLP r13~;&`ޝEYq849SUt}.,.,qd $ <3gY,M!yR[|{\' "\Q WdY'+W7ȕJ˟Q˟>~+4SApSzg%pT1b̫.{w,:B ;}OEe+QXZT` }XP]7L_JxEI5eR5+-Ş?8BkgW%SE.Pt>˲ӱ/LiWQ(&d:CL?Pd#uFeyYQ5vkiښ3ߐSh1 r {%7)R$A|d*'tFk7pZt(X.f, PfZ̩"2/zGBP˿+kH+d8ЌsL,͇gwZ]obKyo/H1Lc N+'M1n>k7MChK"j?n 0fTE  \Y2xǘ7֬]1Z/o+ep/CN ,u4 LGTb3Bk]([&=2r^y*BLC0_%{կ:9FE;\ sA ˺l{6\2aN"mgA9'ܪ*n&nbHցQ%, : f l*ins[ F" t6Jq7[WR5ck \S+$TQ 0f(c@>p)B"p"hvt2ZM}z^%Q@v=H>oY۱Ъn_|2WluQftcMOȂE+9Vzձzcqpc,B.:oeST8ݏԢ?BJ1)x3ƤvJkGhTm+z]̮}Nw4õU%.S9 2VU6(G?x瓭Q d9y9ݥnw8ʂּZ(j):7ueY(";d 'EʽGez?=D,$p =Ni#I]@y:PH`l( Z bqHss^g5P(b>W(qP$6nM9 5וn51b5) 0̸88 .PhX?P42΀{V0jw] 3T%JAey_x-Z޴nv8ߔsj}w^S1uD*w-^<&;TU}2ס73^pGe#Hy黖þvXIIpC:w\&9 WWǢLÕOsum7AZ'9SMu`HG+<΍̰&Qf#@ sb[p |Lj@{-F`z."ylI%VNߏn,FKoUO2(/-^ZxԌcl ܞ'1)6 JV^ǒ:%k#]v{ υg6bMd6 e^zbgO jd[^ ?uӫt%¹26`]pU>F~< ]WFJٙ}rKA~w!H =7D "&ä[p\H%kzì{\md $x)[Qc({z'ikbB]b=቞0ov{O5"^tC)ru&k(T+8 Jq5śXTl \HB<W/nGYT7 O ]ˎeYoo.|ot D~iۢzEߘD@'JďUz cC^>p7o.}$zM7oxC٥˔#%Vzg*Y_GGƽR gPC4įDZ aD6=,vaHrrXX>&f֞XX8o{C?)OuM=ՁM#٢M.l0xP}Mf`<9@ #Ҕ,CsCY+@JfJzm g# f\kC͔L3|CQL#vc-u_.?NmZaC._[,si'I #~ nk'i)g_[kܯp-F )U0<"@ iHg͕EVF#7g }.+fNq%{Ȼ H#]u3lxX UUZD ᑍeYƝ% `bx4t}o4SA=Hm]8{0;5؊o p*'OD(E#FZ]4tjTDz p,˺jեl%f:OO]K -h#N*o24`ZO0DU~]ZH#ЛO6ڨ02˖+\=sM0m~SU8e/Ḡt,+H?O}mu Bl;q]2sU- ( xx"3TktG_vI\Mi4)6=RΖB 潷dSm/L3iI}=յlo>gnx5,g6G7b!m8㊲ǐthX2G'Ip* Hq~(ԠN vղ1cI*%Xi%1{|\ubߓ6Eź*ccFVv⧋]x=@$8K ; t@9}gm9F˼b^%| Kt}yo6( #n_GߩL!xSq+A'!:'ƨQta9DoxQDm&m-al-gJILAZ;qfR'mɻ.~ U{Xz09g+ -_ZpR z41(db8fs cջ #j֝$IM&!E뚝 {88u9#%Km4H|`y&n.3qv5?C[ D0bi7u,Sٗw%%w򃪪3t;{Vit8&3!ݟܤX*tE%ke kS 8^hxzyF y˵5TzˆV".i'|loPxrx.ÆӰB'ڭHT=?X68\,k_U VK(E,j|V0^z&L)0`хJ}B ă)$ՈV%X\skmb+^uE+E'@,* F)O3QZ٨^#[iYip(EHr?uxşđ}oXmK.mY"1rqb)&Se  y'8os'Ԅ3,<AmRtPq57_gWs:T g;L(,1,kb>=%v OA3 ,ReNݰ|~Uqt\]opEPh`p/I?a]+@BQLB1 pjc5[^cTUK-86 Hexv;!0ņ \6e4ߑ 8ꨵN0ˠ aQJ Dcp')<ԅ&jqצ881N)IjV4E ʡ NUQ0ؔM>ާcДez!urc{4%aܭ73Kw1"rPަϬA+$o/S4|7' d@rVkgC+.I˾]/!j`B̭ O !^GTg+DSPd })%v'iSP-TKwv]l,?ⲃL\| +Ƞ$aW~ .pl\2ك=imy*գ_|UmBu5[Wu0 -4+W]g/Cy38&ݣMDw"IR<&O\Rj!JZ R[~ MPVФ] oj-[Ap.IݙP[~o+9bYKvY'iUT'ZxowFm=сI8$#ap,5smRE yQEZmT]R HkG ̜kJJ -z\0TvyA8+cѱ'Ijƶ:aMep ĸ-SV}/>{${K߹õ_S%"o|ZRJ5%nmd?s7 OZϸ{~!,̊ %F{d1OS"c%g\NM9+ՄzBt&;1#'c@$1c5e"`N Fg" 7U\*Œ-zV!l~uϹ6JeO*ȸ 0ݔD8!'R{ x)q0'eufvR9L2IYXD3tqM ';8f_toxWmD(`wɎM";Ɗ7-U\PP/d2k:gubQݱ)q .}cb>LD|`zP1[J~ _9ΥwH$!6#p]2&b@^pxPc4Or{=d8𧑦,ϑ1d'i'J"SdnuьA&*cYELG_]M-mMzVJbˬ; /jY pDT!ذ:YOJsedab70PdTr-{J32o 5Hv8i dK)+ [Lk3skIWQGGu1 9[Jr-3,I)yC~Ub##2i$+1G3$h!ad};Izh\gH`.hݞ4 .c#EWed.qmJFd >ĺpA@JeKU#Yht18SA Kgƹ UɁCs]N8ef>hc,{1UYPٻ$0 QfZ}tV6V9ӷPhxUiyQQ5]f_ >fgg{5f+W3( \`J 96Pu$Vt7OBb,gAYZ^HSN.3dV&e,X1阠NGaPN~z:WP'~kO`33$] W>88kko%ICAɦ Ӵd gkSmkx?'oW&2brm { .(!uwfa!xP*`r dqsmmmq HR&Rb25YEǔgK@#v @N!& @ puY4G 37"i R[(05kV#?)S!&TpG1 MkS0y0hq8ߨw[kJf=¿. ov'Y~ƃyI,Y`7LL (R(ȃw\kʽi$bJ%N ½~{Je} *z!H”CK[\@̂Xx^LQ^$D&Raiu{* ksON$Nq-(go:j,x&LmrrzE!d-0K3Y bĤ+B2Ј=(>"e7y2P&Z^2~.4XSQ'#1|h02XYVL3d+,0tgdQ0o(W| ҥSh#\˺$)KѰ([ORh{Zs{;W)fo)|_&nEAfey%g2PΜ@!qS00UX[9xw| @mh;LsBԶ19(`J'ÉHg|cW2K ƲBٜFplrU vݔ;AIbi,)tEVڑF &LyKHYI.jwb@tsȻQ+. j٦,o>ܳS= p1 b )Wh*!Z+0 fd}ྕK;5j'J<tnm$2djޢ QMKO1žqhq;(ش:&8E;}YRr5NΌprL $R]C^+HN>I "1u)$U;Nb Km>(z<[׺,9m7m{DPPTƉ`^1jw-{2d)(8jŊ nAhOz澟W$#M{πsP|/3࿵4)GHb&@P&S 0kaV)ߖrw4vRJԐ_$_t nR޺/w2 !,q  fJN!>5yI@CIDX&>AB#d,Gfrڒ(ˬ `7jy&[/_$UG%<21ԬK|=>|RJBg5)a$l nE,yAMa8g0u@oHF bk8̤f[:2{,bh'I6̂ptEj#)huCz$%~tՐ+ގq#|ss=țG/M_A#"Wt(18F0N|YâmB?sm)*P>z\8E[W2;cl-vI_ƀZ*qEd"p34:YźD=m!Z,snOOlo٬ WʓwBv~o 2woXO;I( t}qd=[0VR /!Łj\qiꁦℰӽPnή8*1H `w}`LSZ+$([_;@(7[YjGqQZt5-%D+H:H҂J~ {xl?09=ԓ5Qd ",Dp؞}w@2/NS3K<0 3't{zt`pXs@reuɻBfC[Fc5dĴWzڪR߳*ą9Df #hc.a?IWo7Ix_c)@J `3vnanD3M!ym&ln Mu]S5ḞϥCt@]5 Ɗe5V~(&Xy'1^ 3[l 'H͏*2 0G8K3E} ؍y0MQU!m*6>,0*v;NQ@`|9WĬ|*@|^DIׇ^R8 S>1`~G5 ƊK5/ V,5,#<3I Śzc5IigmRpe2  d̊?Z"Ym+$XEkhL3,,^HP>a(h@i(E)#-Ѫ\l@X|vڀYZ/rϽU ˮŒ4*V~C zã8`^zz*dL|B@T в )q𒉧,sc6L>NƬG08Cr`b vD$Te,xF@1'ROx1CSawGʻZl3 MXiUuBK;7ӼCiYGȈ]B?$d7ߔ~,+K;Cre+#t뚒a*XotOf5=oD!%v^ p&"0/mS<1!;.>C^u`7OҲkMiפ;H6ʕ (-L ?йy] ƚLm&#I;=&3NS'UHݖ0C4P%*N _?.7x A?Xq}H[1*PNj #>|U]#Nn ɚV1QS QjyC dhA "-HiN:D] y0{=aݣX044nS0@7)uJ MF(c#frpI3141^fdu;%8`E$RZtC!zqz:&pi9CڬTL.h\wfq {vv@/Y`yF?+c7բ/BeuP&S~E))ځi\8]dmv-\nPo.aqW7 ͩ;T uzUB?؂  HfjpEEɲ}ibd V.P@!~|m,v*>f- /m;|į/!U{!O엫ǕrفH\a;J D"k@79u[m!䃗/00"!|9tՕ(VCAH(ѐ%XRm 7%Ԫ5 .R|82X:_\]lWE0֭A"M, o FR]ת4&!V2\JBu YxB7S ca=f^&0v\T\M>Z^o0EwZ#mADV(BKV!ՑQv X;Pd#Y 7ڒ[k+h=0#%]pNC1SG? ].1V ޱ6lɟ[ne4t>jI(c1]z -&4ǟ MtcnHJ+Myomm wC52~NcH _顋rͿMw~صY?Bo- \Ξ|& )X4JJO0n*sVIǾC7qS[K[7T5":^t heܨ4@_XU./[\v%!IZdk˖h|QR`wTj(jZ!8Y`R4jQתHUu<1{Ɣڸ1@6h $}p<69`#@2|1PZܵS}]w(y~}8~"[p9{[M5㖋}VlC8XJ=JչIǹ ywvk}$7] c+m COn`ES f\\/Ѥ^<+fI! ZCQ?t} ,fހ)zfpziXHU`]ra9v|M s4cƲ]֩aZA!˙*ߓR%8R؆>72 ${78u(ajoz[hU]5E.sO:,`8|ߏ규ޝvQ;#gS A7_n`]Uun*Ogae憞+KȄ2(wD5e~ 믖CW6!`Sf8۝3uwɶ}+R!k'΄p˺H5l&&wۮA{epͭP,*a`zF<2a'3mٛPomP r{AQ~{J`!%~pJ|p_ٻ~*b!QP|j\jK(]C7;>H'a[V)!wBӼ,K(7KD/酪aX.'aY^dL^<\bӐ$HyJ!TFYޟyop ܑ4e2!_+܊GKΕc0g dξm|G)?!V0b#U,FL%KCʏ3cy]Q-msfe(-#dyCx)m C'wޖ߰a-J5oп;W@T0.ͺ^n(Z)fw@: [N=y^j641Hbusl6:oAk|% qƕG/#%̒K)ey!G1|^yG(.@uaV)9O0p 'VZH),H' @8//<+Z5l#O,cWzƿ Io_Pϲߘt~ Lf&+;fS.4Ҥ鐽'!L*I3dʌ'jE]#|R[0^IEP?<4>Ů[$@&CvALr~ B#/LF%kipU2/`R,LzK..|]^vp%/".Дi5qpZ-X qm^_BX MA8)/\K]XM""{*VϘ{m1E`c[ Gicy.jptJFغcViE>%r|Ԗm G 6i*uϐF-)7Da?TS^=hH=T G%C_l@D_ ]!@ .۩I'-.Fwq=I/x9J.?1W BgQs ڇ'(J~np1u0eyh |es+JSL 4 Ch4' Ѽa7&0L`]6m;$ko_&qp)L}$rK(]P Εnh'?bB\o_Q8 PHҾ0ػ󧶪(e3 !;t)t@*M**2RZJTDhKmVۺ/yTJN dx{9B֭1=f@Ex~J2Xu/ёqCc c]OO4H.i 3ьo,N|AJ^= (U=$Bd%0EPZQ\_ׂ !wTn@u;ޅ-I_@!" E(TdnL ImvA u(E\¸{oB/sbyJKDhweR)E;]Ũ5GHг9JE@Gmico0Q|Tߘ3ꃣ 5RU$E׋q ҟW4z(=(G$%Ð BcZ?guZفB|؃&J"Qk1+^ ĸj%XNR${FM"p]>H㙰5/cl{xk6qRt+b t n:"Є&j#F]UfJ>eAÅkcYV @v ]&I$"Yrv k)Fܩi=y2T&;AE5u `OcPDS *Y0FU@C!'2qZ-ATa: P3'VtiEnQ ws#-C.#>Q:4ׯS^[,0y؃o'5vVBUy\QHtÕ_ sQi b!|TfJPj,=>-vŴmg^l024 >I_BjbQώ+q5`_x>1c iTڇR!5Mwbxtϕ1jy"Cd)pABxrȐ|LgUzUϯJrH ܁؃;MCJ9F^}L8Q*T10bؚw1=X 3 ㆀ).NnU$^ ʪ sIͧ0@h'C?W=Y#XKG|go˸4p2ӃG~>7y\h%= T .uB>ڠ  bCȎ NW_46\J&V0ّ[34~&EcE'fWahJ -ximZ@5dd`yfak;!/Q?k,OC8:j 1[: PEDuyD7,$GX8 U](WFUS77$ԫ'9(ݣ4v'JvYR0 v9zzP1 V%.Qhju z0 H <ce +0)Zܣak^0 nǓ(-DPP ̩OQCBA|1Puc,F8F+!ļYRRi].BevϬFp]Ż1-{s"IQˬwiO4Φރ5$)4J)s#*?R+_e`ACAT " ]wt_ DtۋԦZh\eg00olg5<5.R?[v \"Kd!ZB#(PBnèqVj'XD9CM=I4xݐ0cJK#Xgl FUiQ.`el0☞aӷƑj)آ.r[Jυ2@K&  {epf1~} J.ڷՈ`J] n_1y@HP-HJfj1&B aORؚ1-pvpf-1#AC;D{ʠ胠vԤ@](y5 iUc^Qcr8=h- }1flϯ}.y~G0hN`Lm Vh`%Z.`O^ѷhJ\(Ѝxc?U邵Pr:4/AN[_v=eE^+}ͅsBϒ̖ۘ[]dƘ5``; ThQ?O8N[#;KU ®t: dzҊ #}!BФtT~RAuZ&hU,`@pj0DV=8`;@#'"gE @udz"o͕SKI#@ ^L(_ֆQGNQ c>Qzq]zW/9[q8*\= ֟;ϟ0H5-TpPk){}`1M7 j+,Fť3zVx\:s{KU+pF:)CiSOy˼G9`[`3QEi - Ђ^L]0uJ6;ˣ[Ohqy~gXԅc?޺KGCEݒgLŦS>Z;}$ @:JV%p/iyH& sQQWέVLQvň;2O1 6_.LS$OArMkΫo6d5x ؃j*dM(jDjv0=Ɯ71?-)e6ѓTF|!aAPM fCa.x($V \:Sk5cSƘ}h?}msF0zXO+)7u@AO*V{*7g^d$J9ZSWCp )7; XP mda`Og M~H뵰5clZN mk=?#$Z;AĪ0.' M!-h+9[uGl#\1Kjm~471X܅2㴡@uuo5/JS>ܜotEoЛ -7bΜfVUCwo] ۏ(a~Q6lhU"}̼%ߡb 2; ,sCj @J@#z!Խ~ ^ 0[-(EO u˦?T73sHT~7j:[@rcaܜ/tFESㄨ]vH֝&ЃEb:=ymV+vT!>uLv8>o̵O~tӓed6 60$$+vu`r=lr@΃4Eݲ)x*ޝ4qP*ZxUШ oDE51u,PAr !(boEA[clwl3n7 !Iڈ6w}me" mƆb s\>uU=Zj,U&IċVꀓfVB0YLgZo Rҡ=+Ƨ}m:V !QLF*m^ԺͽL>84;pBdB~& 'wyӜ4 P6&K5.qAbMA\+K 6-AFhnn Ц ( (:tO&ʕɾ/[Y@{+cpd ;YX \y1@Nas&ȩy:]iBࣳpnmYHF >d[Se!D0V3i2rKܙHƄm βrau&+ JP,:<"#ю)^K #|TOx\X9*g NE8[qd` $*p˶fQ9/|> \r'S%A Pl1cޚQ\L%34ڳ);l~o˿j50o,Bh#H9'G{to`噲 ӧ$}Keq~]Fs2% PP%cD  9 >L8oWxºCŅrSx_&l.Fh_ߚ%' [>c*<;O}*p 1r ih;K.2iZb U_,,JՁUM'DHc 7F7zpD&j[@`ӌ*5%$_gl}ïQ[0@QۊR {KqAX3gJJNhsT=ZXl6tp T  ˵0?]K-`5UػmF=:K" 7xH$aԗYj4@@ptvx.(ˆhu}_&\, /jWנ`#RW|]z^@`*f||F0WuJj=l9oPa'K7/Px a/?n{;D$Y3 RwBPTO̴mwA())U_9TfU])|^:#ZJ"7Cva?@:y0T%ρ>;H`!=ucTkL.CZ[,) 0Ў}6tNRi rvH`>B}/US+D7%zj'NPD)@UjmŽǜ\! PLmAǪ2t PZ2aA.{#QsIU+H5YY|芐B@JF%aMT ]R! PL8ڒ2Sv;VxF i~@1:Aby~)-q/4ǩ lZj)J6)ErK.8ڟ~vRe.7KTr]$ab  ;M ^Mafw~D$_" d1p DgQ+B+7g@}gDޗ>EYyOnſai= %8ٞskh+n=a-t%N4CSLi V7hJkƣ!֩,_77A%ٴTgS՝&,{6A0@[\^O-JN4ScA(aϺeQa,8k$1ׄI].O-Z@*g$ 9wgDs*@Pƺ)Vu4Pd_y|]+=)%4 j:U+Fn6;:~~\S^iPjh6Imcڸ _ :گs$Oդ`w8 ZZHSCC'yt7J25ob)M_l[ihFhZ 紤F Lj bŕ=`ԔDPj=ɌH)߹:;ڊ9 sdٮb#=⤠ Õ?4dWWY5dkHn_:#i4@ttU*ՏnRJ.>/~:/B2 >򵸞8RJAy"Ehp-h`i+S˒eg K0irl@R$d^x8?Ondf-vk f+7 ?xך}t|.? $|ڨ}i'9˖C"\4p}pmFqj]H2޼Lm.kCHPBҁH1<skj^|t};YG$۬" gA^n)ג֯%@+O1 @39i$z#5:OD$jǩWɚJ0/sLQh^_B|km3JI_; & }05D_ H'(`/ QѱXGѱy$ Q:4JbCΐK G8Kݷ俳|W_BpzQx噼*6 iD; ut! ϒ^8kjs彚w"j3<[$ziTM92Vo&7fJ fcm*9~< 865S*֬Z^=*x!)c+r;u t7qe5αb g K! .1bbeKC$B8k%FTYtGά|aXd)ь2/sp8閸$S}}}ݯ+qȕ' , fW`E ( suԡJ!Ə5$hjX *Bc 9~`pȓh Y( '^=7(i3"*`؇>Uv6@x@-t ` eǥ[RMʬ_Ӏ-^A!ȂHB/nd% `wsoSje۵nm5Y'b%~@-X4W:x T_08 +WH cͪle<ı>%O3-Ow~V|2{<!A; Ǡը~@f08+]\@eL,9 ?#( {,a(F5N0y[-'GnQC00oFۜSd5J#ζ0tq` #&r"Q&ޝ>+pFD⣶g1 kUZF ZXR z zvJΉ\6Dɠ3Ojy!73  $vYRiEyr9]tIڿpi, ~Uۢ158886444>>~j}&wkbq&dQlu􈈔ԐFdԗo[ 1v8_q`no`>[bNOWFo6f7%$_ =¥VK_ZwSE,ߟB+{l?]YSz:j4jU˷c~ptƞٍ0{ڨɟBu%v6b.B}ÿ ,+c`{1Rc <<8_gaWqXV t6}U-FbZMkJ*9fO\^CдKiivn pX}d 1ƢYPtbMB[lLu<*jt-j "!'DذmЮ{ uS=n\ fbEoNR,?Y OCD،n)3 ;:IU5T28pieߺq}X Hm3 _K[7Fjn,_1/]x9: /(JJ$o{V ,.niCpW( HK(U=?coaѓrwj8 $kpNL~uΫ4 0b.97I*0uАt+jv9M@bjw-6]HR2 CzEElJ!P}QFɉȼ>ռEq`pӎa 8a$ 홛\ {z"e hhƢAEEQOG>z>בOS U isO_0wi1dBz"c K)OHLRNSh`UEcX8ĴTs,^Z57j,t\BNCkzq6i_Bxb ډ1XХ"- <^^T e Bg]<7<",Cb&FuBLV_CЌ`U:bFI:LA!Nc[.8jx[bic X̐ivDX}VDF~|`[׳2-8F~ &i,-|^s~I53RHV$&PHP`n M//}a3`T@ IJA9E>5b`^8p1*4 wcTѶ eU¿aqh~I,c2 |YlUB7Jc $C"l$.R& _: $)j?B=+3fs@XH#I LF &h`NQ|6vpX4 >17؝~Oîά58?_1=;q;Cc'H.5 : wE\K6&rw-#|(rI'TW׆M@-HTSNJSnlK7x&bK~l+.lP<"ڦK(}Xw">'88p`*4W#pw?cޝ*}2=q ,D^ Է[zdHkZ_u!䓻<1t*@ง>&!UܙZj@B ]ujtPh⻨PEA$b[X|h&'T,e]LCR҆m5@ IY TI{êvp`8Ĭ,v=Fyz꿳01-5X(vդb)(L:$pBt_Ў?SFaS^<|\>rFږX w~x IF=cJiW5|{h=F >>F+ ]*TVY=XV=3" Fi41ҏmUV<S&: 7@ݗSegsIp% 0:PXP%8Uktt|Z =֫^{x?h6w}wPKOƘl*P\~z@ 8gE3Xg&;"0|4WiISQ].׽QحyۛO8D7K1i ,-}=@YI4 {9/E>agA{?`ʊ'g]o\p9>7rUU)ch`0InjѼ]u,}BqHNt@W ]6 6&ꨨ)hI-/@q(ΜB޻/ o Uʄ KX<,kV4QM?r|Arhw/|!LQ_if럂©!!{]r0Ca wd{zl 6Q(+֠nb9 #Aq24YR1{ƶW3 m\k/;>҅O!: W#'2<7U]u F9-߹\Z?AQR!n2\YG^7a U'bt;w8*%'4i IW̐yi#ơr}Qr#H;rݩPTS e+P=UON UTj2஼J򗫰+R mie1Qڑ3&^ ɭͼؙ"Х(/p`hMMa @6~D4ȯNjQ2%ah:N>S#66x7K=ɽcdxbQ_G#tԒE~UɩQcuxb(Lrbr|Q:۹iPN@Xs^WO[jB5ځ_.G[([P fO`Cl|N *WOR:YX­ARU/8UvePdyq):kjxNM(1J͇vkCtW9hF$B^f <gV_es y>,ZG Kۗ"IuM A֍aEauг#"s$kolv/$&jdl/|(x&]df^QC͡`Ir"`VNC"C$$GRonY9u=.-@qP fa8*.g Z¨Dʑ׎Ò E*h Z¹c͑@^^ sd7KY0e+D obn-CFj0=Eؾ(I҈ t=rR0]Z@W@U|7A GX|$QGh澨>HMAdRF VC@";#UV7J# еQC{5/-vd+%#6 AAK.k0YI:'6@ȇK[i ;@)up c ক44ip$''^XNpw뿶pQFAuٴ"/4G.*0Ҽ / §;[uEw x=K9@-')V6k,lHkJZZ絨h^pja_,NxԄZ~Ot|~GgI'4_C _f;v ׎27 $.#^7F?hђr"?Y9 XpUYڑ{\.'UN*Pn"OY^Rɔ 6DQřDAĸ}(C&4g$/ٝC$J=rmR#b:xQJs M#&` Sx73?z(t)E=kiKFHמ tXٷe*۱ ?0C!'C\jz2y!c§5Kw-Hk/̭:PQb(yfƪ!`E0*yBgV!ʞxSݨ)#mf;-P̵[ge!ocj"WKk*rrVSk QC!~KiMΆy}\KY6uϠ`ѹ欑_-ײ+Db;_}+ ׇX]Za7Y|*~u(Hhfq_Z_b@ů?jK4>[7A!a%} NO\\ꥋsqe !D.fܐ%3V\ߋHԈ尪Dwۖ4R(@qw|z}9_m>JAr" +a!S&"iT֌ WA*bdvq$R̂[9L7t&sq?zm`* Q0)W{\" А@! ?؏GDڔÜ8B,P/ȋiH(Lw96dYc\A2x3๘\?9A0Vt}uF{E b \)g(_"+mM y(ޑ30ɐO}}#t?60i@Q"P CwkOcH:m\bY*J>k_X{13iʉғ(VFFf'w*ho1ԟ*؁!\噒n>fy rU2-C^Xs@*Y˯G/+6ԻLgaʞM4g΍/]Y^%&+ X>~ 9aWØпyObe"kZD Cf]TRh:9Hցs{SpUɃƽ%5Va:ܻœ KVҚsm^)1~i/als(O-̣B?dól{/7o] tO~t?f (9C{I&)^ %2xxQ*BwGlS]իTp:`My]yI{KTzR0xIa,E!&J-0;HGs\|>KEDńpX{ܼ3y fm ̨aXa,+B}<^ N k\?o㐇wyHL=D;G`sf1c}pu~Ɇ[t|5DQJ/i;|[>cq A aҠ`b.S2` a#tph٢(Ew'IS-FeW͊qt l|.砋d9G [k彨qڊ?By / w:Sk!>dǺ)V={M r3t l6oYBW C :8:m" "r P]ribJW-?#7UA[E=J:,b>2 ,rV[jh]zF@CJ3[5NڳJD0Rzơ{`?G j}IѸe2 B{4J@w. (U~bUtij0qL f>< _B>Ӭ܁!͵<֢X}VTG@:PW\3nܡ>%;gG1+ b`"jc"nA;[k%ƍxn [6ajw~~ǶZq\ U&:4 /-L>k6x};78ǥW!hl00Š;-rdj7b5 ]Y jI!Se׍g90teO#7dA~c"{ /\ocYIʽ0 [r"k6d?6~ZMb2-G4 ï-~&{FNv]W.>;n(c|^ரMھx2- N@61Ʉ#6m 04(*P *PbݹW^~W~Qv>{_S. ؅Ѕa#KRd ;D'-bW}c\Mp7![)8+NYw S hE L|Dy1k AFY*Ŭi262 j+;8M0cye~H 7 j"䦀sn[mtf)8vx]=%82h`I9ta`j>J%fxt6sJm>]v6!#wYJ=lurJbMN[ ҕOil rŮTs*Bs%)n 1nXtK/(yOHP"y$‘9>*q?Zw VϮf6Zw>^;8z}N!Y(wE=,AV@ lO,/Bw%/~ *$F_Qh ܪXY6?nƦХʖ{H.kVht !؜a6` nW?PDYi0]CMA%7AUChHKf;׭^bnEH=ň۞^ S\wv>G,!v{c7˸ I͝)@=^(O؎7mE0Ԏi d@UvJ~g;%uNyigkvIb] zi,Ċ:4KuㅹQpU1qZ5/aZTinsP2X Q{r ([R B5O^ R⬭`P",+5MX-D<x܃-,^tOCw߸/,XσEWd\Il?g ά`=ڂΝ]3" EKZ5`?5~ 4CW)7& |){KӦK͆N3oC~ sJA1"dR0c688ǎϽ4H:=,q|=P >e;Y=vn'cۧTj} \~H8)*8Qo/biy S {)p~݈X=m\ȯnxGktMTզ2?l=EgX'(&WNq!4+y s()* MtRAO1S458(5PBVJۈel ܙ2 iɫV},i| 01v&]j1<f;r#+OnO[a˅ZFb6AD:] κh3K<{4tp [dƷF!Ӛ,yV@4?j! z41FB>Ub$ SGvB6،UQ/ur$յ68Nv-t|YIt%حlެg;Cl;? U.̓X9uY)f{25ͣ_Y,p$eAW9] J@t 4+#[]f˝4ݕҧH*Obj)Y&hG9f4BQ쟂DNKbe%Ff@ښE*T@E[[t}4[ stI2ƌt=ϻϰM$´fs\8čOf}CSz`jYvbⲼ0LaոEq3jЕo8+[3B"N&,%Ǹ.ooByBYaB(<) y`DϓV-MITEqPR&IZnqJc }Wtza0d%E}@"pXTh J08f5qB c%+#]v |:բZ”?דּ}p ;:vM$V 񭵎lmMy. Ewo2)g_GwLhyjRWJ#B~4:N[,1CRKDb.fS-l\d"PS͗*WgjF-h2ԍâdB݊XWsl2Q|N+! F4p/ӹgˌ #jƮ)O-!׎Cƀm__|@&#}g~5>5+ݶբdڙM%`>%kbB C0غב5QCBV8C3q g)8vsKȹxȕÈLw=z},)e)ixtY)W Jpuk]k* L=UrӡZ[~FO_}%ew4\t5~W.G w&6.tlgXkw9%R(H{ G:4ѢLiZ TҏىՑx}O dKl6`voş]Rcy7ˑ&y#Ƙl?:>*A.j tKcnǩ0\\Pajf cܦ@6lK: =4w-,jLOa[o/8#i2O}VnILWzo,tW75ixh@K6X&dw4\Mq@OVk2IS`*]A(Ntce7qi,QfHwi6_Em2ŷ .O)t+ip{On0 ~յ\%A'i-fh ]cV @>#NׅSFZ^W2S-3]w*&(6*QpBdF*1 lthg|3CCuc|F#LYfqx@_n10 jWuahm<ۯƟ.(׷ۏSe0O~V^s|9Sp /k'~Qq6/ Co=Ѩ!V0K d)y&0tpC͙o h: 4bM.wS O/fvйseWam~~663m{RnlH)@/lV1g hn3Ӏm)snZ J{}/ ݱ'f3jj(^<+apT)L4ƔE&uD݂}])9X_`L:{wF| JkԣUgj#m=}xh|,;,cνu&rI0^R'%U2hwtϹ'k27gcm/sa|+ʍ?)mHhf` i )2N=FHe5d^}^цnk/O.:jy|FҘ֗ XV&Ku۞$!o%L{D@U'Oz<ޱXN=ѵt ?kS_)) )>N@Za$<* Z IqTg.]xh1̬jzd>R.9>: i<2+j@#eܟ$&0Su3>QjDsfsf"8߶u *|}6Nڛn#?\vJX4fW?ؕi;/!$}P?̱jW{W}R|ٍ*uE 4W%˜}^j@qoˠ()^k6ϑỐ]`ȩ/{U{Yo^ms>C ;Ӕi@;4f^&U- ;]Zv;̞Qt9CT hfMAqvhv(ud`]ӽs"*}!M#I!}[#SHֳd|MuD/L?/m{w$!? "oTC,C0a@yNFHzZPD&Q~ TKRAIjR\W+NBsAX hm'[s )pmo[4V^]&^vy!]UZ4K!?1^wsd\ Zu2-V0L@"LB>$5&hoDX}X\ʕc7 W@-FcZԸ~xܞ:9@;j9e0OF7=+b)A_PMj0O[O8C:ČGN?MHH$D@P\b?,ȷ杴qaRC!MEtCc=`d(HWvv6 'tt,>6| 4B8;Uː ("Rmf/ au{xL[Ry{O=qwyWw(ʭwZPYMQݚgx{ז~h'^7X3id 8+AŅi>pCwo gW>ؚPTXf&M`] 7]o5;Hڭq`P- +FGw~)#H@^ .[hC} `:NGY_԰&aza0P\Jm'X8A.B}"YxtnNsjr}Ae 2?\GդFӿȳZy웜R%Ζ=UE3ԎiƪkJ8ً6qуq.oP. {]vҞ iSC*ԄNpZ6lzhvGџPM^R]_EV m`ݐwӶB.K˛wD@ Sg>=芓KM_WiӶ {bdʗZtwMy4:ik1oP+YǡiW9j"yB9S&p0[n4!VP黕Wrj!_QM![jLKxv2^3!ސy9k wv!U'SH4=FZG>;m,H[agu+00$1߹cEP LQYp:~P< 2FvDVM,9uTjJ0@V}Lz55em룝i˾\>~Qx~|A" HٿiNF T 2@JCjj٧C";܆@7nw ۝yw?]y=3H#/p㿉Ke`\M+h6]W?eDd"̕Jܱǡ $hF=qnS Rj&!ڙF煚zzE^|@>`R" H9#͹!L#e>ԽxC쯙>("'&Ac1cj:גD@ re-Ā.KR©~<v u@ }p-L8i{0Qo\!i豊WO8p(Qtȏ_ m$0M )_)DӿOSQ+*UQp8q8po֘nZ*-T, *@?hmB}# $Ҽ=wTV!s=VsDH[B Yd:uPYUcP zÆ16G(E4B`ho~N^mDc*Fjt &a漣'Vg 87N=*,O5\OK;JaX?iz{+OT=XW8~ e !J.csH (i -Oux} ^7] \Cࣩ&od1HEx&g0M\ji%& ] V{eU$ MH51e9L )LdFP&Ųt4D{ad0@ 1⣝<;IG@T7A@klf1|wi &*ȢBƿ v7KHVuC6bTX! "$Xm;o<5r:!5cg i1(Ɋ~&Hw hجd,ЧYy/T1]^2E\J5&ƎY'YT~Ջ(=?ܹ, >BHڲ zi(dpe"n2/ 4GJyGg@G7zf=!\mW+ Gr]s`]!sf˙`9M T] JW4 ? @Icn:LWo )Rr #@(PH pw늤/Яh<H0AWtJEY( 2=2f²MSw.;Z4([i?CR*BƇL=qhѭrMB@I[/52o'!E6䵮 GJTZ!1 cDcXe:; . ._6k:Qaf.?iBҔ%Nto*v3% GmGk5$zH ݟk p*gSvLbٹTSAsMb2VH]B@F*Q:$LPcDi@2Ph?O񓡦֏Req)54h!Bʔ\8dXHM(C&c$Ck}o > A+aۿ)`,vc?~io@>`Sn=%Tn|X~!EƱq27Ύg3 m=s1}hUJgC % @q-mɯ%' wOR0*nlmp|4@v.[/*;Zuo5Bғdc:aT(BQ@"[(w F|w*VD7T13r~%:_4:]& lX8b_`li\vӺzl0hX2~C:k/2 Uvl,dhR.2V"8?/7xikT}_` W׵|pc;&`AciSؤЙo!y1h֞l]\'ˌ@ +-ȣcʹ!G:{O8 ]~U@IOu2sx ?^/( Hq[NHmַfP>5zYW0tQO16Inq{wT`xZ+VD1(FEqPFñ"YEA* P*ȽpoKLh)9WUt 9:s9uV;(B(Xz[ARF͝;!A|0`@Q5!:}E|&iW>>CX"*G=WYA0k[9_o31Rª\>[?!  HVt6` }Y lov.D8U6&oVSg$ +Pò1 &hXNt a@0 *0?`8j{ʊg|U; $6c><0~Ve%VQPPwA,GM6nd@芆5L5 m/( Eb6'A@ky%~N]$1&|5[gkф72hK4I LBb77e&a@0 UtVrP&X"S]R"!li]=E6nd@+/Jo{(;eFUP $b36h5 at`_u Xڿo8 VaQ hg$!Z^2&?@#  R/[܃ko`rO@"Gg%Rfߓb/\]IgT' fot~qЭ&+w-/fHds%2b%דp5p=aދ5j5Lt_) wOMQ7H4t0`A{F,?0::lGMH " =bA,QKBHsy?p߽}Q4rEײH^uyʛhpl,$6RNS'1c[(ô1F>O vsu ^PY~[ k@"ngnZ?4N K&H2KY'[PU xY\c~[ZvĜ<1‹-V8uZw&H0J~g(W^kzgyE)\hS 7.JsINJ7m_zmh.R~+A!|%>.PA y~7p3ʎK NG+1|[ k('[R8kxbȸ̓Ww ;~NX'p!@0`{]֗7LQ}'3eI6Ehy[Iل$b*I.+k`xxQ<3Z ;zO\A(4@ąa Ѳ)pXl8\x9 c;vFo7Fh%Z 䢷7#B CXU"S  l`8]m8MSo2 70,`@!*,*PqtȵwCK'j <=GlN_~tvʌ7V\0yK9w ` \9|;/Z`#b#6A(!^+gϼ^ 97RO(ӿb;X#äA{?2Z XP`l؃D]-=X@mҁ͛;mPTiK!ҿ@OWzSVʚg@9,`'م_9q%bȱa Iѥ# ~cil>oo"z`GRgT{X_]O^X_+eul3|mB2=PPvsAdrLsPf7jfmFJsnĻ޴l4=ɐ6 ?a cG9l5Rtw  ˤ A ~./`'I*s(:6&/b@S# I*qvSv<]zIC7Hް>+@@%C1h ̽FΜoz}_1RE| ]XG//|m,~'.xTnn<w=D*CIQL2zgƻ)5EYcOԌbj4:O^r?8fXh z|BB\k,Bb`u5$~R&#_"Di$)u0QR^',18)Е2JyRgY8Ud.en0zHJ*5_g 'X[jvQNJ6"10*p*+RUREhʂmHaeNApROq=$u[;bEDuähMŽ櫅qbD^3FZK.ф+Zoao%H`̝XTNa%rXG)8mVJ쳤^Ę_2cJ$A'Z/E(S6ѳ};e¶&Ĝzӕ1 K,`L~R>RǘnQ Q h^567rj;I6l>o%}qH:q^,>^G=LlZ$ֱ6 uqh%;I3c%=Bڔ_meb2m&PGfm[~DSC%[n}-9;&(g'VR 9+ H, [FFDfVc-q>|1gf 6xػ0WDC)\ҌlQKbNrowV}Q@@N}{B$C8r~}uNS>uah֡/%gwZ ?1eIbk-V&(%)FD}[:?n5ao=+ʊᇋ` 04ktB#Z{'[SuT ]'֔ ]:>h }aYhV) 4,XGtK^e9;Kg~#L.7pa@5YX@(&ZA-$k!6,C8?Hٳ!g0-ݎR^Ȱ@a(it!^6\7M}Dhio|F Zsf'*L}d5c 9b "cӜW8|CV-zaEæGk"]ձW7JL&b!٧9>,8Mm`!jY-=J~C#< jwɨT2S {[ɵn$WB V4P$p~M4N 9iA]{=]49LFN\9_< ;?v'+8Q|J gt/@g`ܻl(qq"% S +xg2tu^2-B Q^m.A9dg P I6,B'O- ,qR% [G`;Y1}d/d A0t ڴ /BAjN\e8 Ǣ)|thF-5{ \3B+%7uO) \qಌPt52dNvN` c6dmO٬Iw*a18B9t=v2g7ў 5/3;Nv Lxն<:W,^&U2 SHXx`߄-gK;dqFʵVh(Ӽ5A).V!{kxpٶK Ak`X+ ϵ:XM wP݂`]aSd Ctu0o,2 C(rDI DbǗo?uLpG Wjb'ǚ(mF"BkiyrykukmqYËeJV|Vڸ Ew {<|ȯnh~$:0*î/nU`)e0$v 86]&!88ULXE6ad@-Sܥ9Y.HD<_Rt+Konr#毰Į!V SoJMlpPc20PB0ܰ6.؅ r9&MP=hD-w VH7tLSzF/!I^:B'wiw&kpז#L>tz㫴!]Nmwx\@Vb^QڵsCËp ~ѕFopE)/=QUȹ0(/C!W8r2IʭUڽG[ ىiuKⷌL@T=%uX!7T׉c\ oC5:Jf~(vSb;~2M:k 0-,ښ74d;%t=EQ,<9zURdyY.xL LC8*L U^-DHt0DD/T'%^(Ȫ/u?3 \nb@o!e$h#Fu f v8)S`oyKzhMϕvC"'SHPGK};xP|ᵆڸ`#'?`UԹ}g'wO+nƾ2N N쀪x]y+EĻl!@=Lsf0mQ; CS:}OR8m=$f#ŸꮀfM`B'k@AUQm\D].VfT E3 vKL-<4h` nCQ؃켉Z-RALPi&z:vuמΝ 67pmnbAU>6SdH/Pmp?& Ik[G_hL"t̑Mlm2xeƉ d+*Wk*;{aS!Ӯ#U%4LRMu0_p6ܷgiD;-.|=ȗ#K#)[#j|Aֹ,􊛃lTo4+ -_p=!& W(a՗~ &ޱSP5MCImֻ)،Kjo4&G$8!.<:b(&9(3OV+:)O8yk{ߠBv^_w4n~h-L@5@`g:b/9SH?>}@/^q>08 <6h`Mm+S^bd7;@E 6Y?Bvb2ҵ̖㴆9['l}Y^a[;*[OW^Ei=;Xa ӡG u0'xjkS Qىsuw{p /LSE' F0 1''`jHEJRm j Puۖrx1(Pquܤ| C`Xڢ/-wUB KSZ'+bv}~N)vV }i7 hb0[?(W eo<O?$fCJR~7F( k{[ߏ}Bz:rt(Lj;jTM2^SCuV|}w_$WVn Wb [1˧t~NqN%N#:'`9^bDvY[EeK eFj#q9Jh;3A/4)bWa*95]8\&qg> )7B=xP?u3:Rd#xK HM`9x [*m,$SD}+rIIysŧjsfUVfb^յd]fcST %cGz<`q__# TTkeX.ʭ1#8Ksϫ'*JM`Jbr)@3L ^cQ5DyjzjDp#|Co;W{=w;07pm ? \莏Qe.5$)_MQꞎ"CuP= )>bpCe'$i{5+"h f}u_Ѧ:uB_&)o.!N*qI'y&W=Y^OګR"+Ȑ6"]6,LzEC!QД4 4YMĻzٴX5&C"ad({OhA81|-*,ºx(` jTUlD[4tٴ鋪bIy@UR( G -(*BPmPH]NJ/E[Ӟ+ٰzA  ncs%fTӧJljxx-,tߺVjE wؖmo\v(xMrk 1'Pww;]4:B~2C#M` 惁LRwv"S.^e5M_~ Y]esUj}d5fgV)ǂ{g.@CikQ 8+]M]oM]^Ogͯ ,u_0Mcz)*N:J V9P5܀5Q>&*TcU\(f[Ya E$̓PսePL N}\0\c Bu^ItKG`jyXodG}4BiۤW?I(@~$MPc/x cf¸zVQO:4@.$4~e<5O9P7㛠OO)l7{wUaaad-ᴱJEϡTCr K"*J%f-X`}CCt?4 edyg+u".#rr>RV^fI_T޲F_Sj/lՓK gy$)L1nYE F jWnoc PcISґZrL|veA$R{ڒm99 iۘFpu{ Nl( npkMCu8'$Yx\H-R ŧg& 9MWQ ݯ1mG;{5c<ƝB73fPKkCh(>YuTp cthƫa !)mgJkwlt!nC΂%y>4 i\cM1f$7G;Ngyc$u:if#f*tR%:Hl-.o3Mv 7JbKz8ˑvEmkFhGm+ Hd鼣 `s GX+tm}..|ϬZ@.}?+^ZPv, aєjݘ.'!!'{tReA7.BN)l Y32%ߜx0|*>\2ER"\>)fH3z;X+L507A1 C ߺv#Bo\yWvFpm8+TQEawFǘO$_HqH>"Ow}o)C_WxA6&eAڎP>D0*'Qf`c:Ay,i=J/p 727"{YrkҦ\[ر?oo(QW /ф-p ` Uњ*}2q:p呋!膇% FK~|R9m}-Ft5[}ҩj *R>?rMKf<0I]? "h P1K&b?BLc/ Hly\kh'XK%ĥw!ܛy>6FS&6D)}U߈NЭCWKELRy3LjEzQf1.#~D4Z<UKLOfA24u:V ˰ѐ%59`L7Q 7cp+6_xV5Bt5H@x)?Դ+>W ׮&\"# Ek"D;[vvh )߳tω.?d 6LY6f Bn$"29c1֫Fa[`~pدSbR{2k^V8di 4xan.kIr ɯޛw 2pVY_/]GONhf&d;J݃ufs4'J+?/brƼJ9?+1bJdeO>~?Wu1>~:x>J-,qY~Y >SO~K !BG)2?U%}4lodZKKS3\t3mlp"ĝly ց{$zsL3C(bɧ0 pབྷKpMRE'J"#h͹qy[N'Ƹ&oDɿUg-5?G8gt V%Z8#jtƉu3+ʼ"LwVcڕ3CgÒ+''dwP d`CelϪ˯|ec_=X_r1q5OZG}D#We6^ "peOѤ q}YЭ%WC%6/<vHc, b/eF)UMPm1rVA5:^|=8o UӒ(pvgڹ05!=23u"Ӿr4=''6|ĐtSDHh,\Rծ8Λep]MRi(*1E8˱Bwi&貦mo2U8|i)"6ncZ0[s6C7*p\;1-*u !',{ʁ X;MtnfA+6>D-%Yē"`nҽtՂ Jy8 հ.$:cbc==@A%tG32ErRSt{VeB QOWIp?%HVIida(ഥ޼q-Goڔ*:Y!L?-|.n~:~ Ir(]΂g8O%Tփ9^)ژH20Tz-p ~0"B?;H{S׮8kŹ ׮&\RC##YZཱnRdtLƭ3?]w ܔRM?bpU p&ߊĸpjM@Mjti 5i/۹d랹(-l}&L1Q9@E>TJ&c5a^KGZ;wk1}my3{~G 3A<%vH^@Kb8y%i/h+ j zr!xyueA 's #WdnL#߉1Oyؚ|W(HW.$O&tPC.EP.IG@^fhgmOR&B @/wYMly p@nӊvNROE[F Sh!邡^*Hab˯\ ׮&\)!-R,!;"0*nCF]k,a_,EXUD&DM@ؚf1kpē&MC܇C'+E{ 9xyE3@.\nKNopBe'~O/sa>,q}p%5BML_i~H}~iξ <˻K%G_@bT9``LB[M  cMͱLi (cOߕ&/ bՔQ]28w@[uW^P/d'$m 3lJp^zPLS@JkPclkU:V D-k$dfs!^ p! ('rJJbMQ Syb'_p_tQp[Q7mA\/;Q@ &ЍYR,,F>MnsTpy(W.\a-Gp;?M{,Us7vϿ_9 n|$٫jde|?'w- ]܁˸n]Ќgu,;7)Ⱥ& q"AK?j/X%+9, qJYTjt$Ǻ#4٤'0.lc9"s#ٖZ:Frp`{$U`:]`=`v_C!H, a}M@-(w Š'E2 ;[ h8H0i3 Ql2e]:!OG"K izRKpF\sPɀ`}C 8UbSIm" [Xu>롵6`ԖjqϪ|gRzNGwqz X̐Lϓ !An}u!w!9Te^_jBЅY_wߖ MR@eu"s qѝO 3>i߾~3j܇㿟NEtTGo c%H5 H4t0f@ͼ4jRWL37LS)ozӛ׍ÿNet5p:Oѿj$`)' HڿK# T7=YͦGrMѵ-=fH6up񏮈M]݀W DGT@M+?B@-@Wp[1N{+{ao@3Zh=܈1)R|ju a$>RL{ ȭۓڿn(Evde-ن!5dIg`3eZv W[a2V8D'kc//+eUR8 G:bUg'6f)bluCQ)wOrY)!z rA˹VGΉC 0P?yornxKL#@NDt+ _mʉfce&I`o5. CQZ n%[,ɟO <{x<341tE[d;50ѻJz :u= !̀)fSr:`|ٕvL$_=G`)'_MC_\"ە myݐf֒(Hn_hTo?Aq%&fȊ9J@YA]3K鮃$ǣSF핉Eqɫ96Zo QT5Ďam-MY5Yf)>//_{pq욖T݁ǘtq5,/Dh]ONK'C fg 7h?R'zv4_c'p 7%7fUmUd`]Mp@x4cYњ1K`DfEaEp箁Ơ;/=RI|.z;B44<:-)=}ǃsQˈa\'l(Cܥ~3߳F10SB߉I@f=9/|v!)=iڔl<>mP_OC.&?c[ǘMO fЪjO xrS;jq`~96> $~ B{-kjEO<9@L 7i& Y!cwoSʥ(}T f OO3p?pyägxK1K;FXَ^{J ta?? 1ZωtJIPx޵r؍qAq~gLeA"f|ȧHteQi=XwRArΑi +A$VS}aER&t =NEd.r+Sl# ?w+BmK%ȲXS=d$K5T]l_J*iH&s6y@qx\}8p8@i/@Qz^~dHwg~Aw!?W5J5w ̌95z 챎}6J紅OjnM!8͠G=d)R|KWJy EV*`-a5|DMԄ Q4Z(6'AGuЮMx&.*4ܿ$)<EuhR$hQuҧTO\c)$" UVA8V%ZWAt:i!' tv]XFPIRPHdu tEd/%zXXs.r<_J [6e,S*G6]OAzwK }j/7L3P3vٔjIw08 f$gT \+tp\\;#l Q6o yvDW]|$^.Xr3M O@KӉwp>ĔA{+.>$Wga`Ovd\1ٔglpb~Cr4O*r@`[PԟM'`fhڵ;=K*)7NY$d FQڍs;o4 ?qz@\C2lh9IV#[= 8u٩F<x(fIuڦ(|q<"p~M+͠}%C̎;6y%Am߲Zmpm0 hJOnudKկ5h?1? 4hC!;gGZP8gN6$5P7t/}ukPб E|)5YBȫɃmK 4F3:#9Q3k_ e==_!}RdBtB1VL,xUx<lUc%З{T;2Qu>4MBPFv CGiII @vA2w}J.)FNrE ʹ@+1kZa3}eF RIImc]s@eO =ֆ~0)6d{;1 ArjD^Nl՚T2DZzux<Tn=n*};y݊E6?{CG4 $CQ~K& GWT~c!I:8M*|p[% Y i09PԠے &K zDL}umg4{O8U eXėA)Y(>7:K5E9MZ:O'6))#. ^8p\Pqf5E(԰R/O˭r_3o74$ ٮ*\O&&CVH#N }8"*S zr@Ea?O"D%6[_ Qqo0r ZpX_m[̈(F95$:07roӯ'`*NI񩸨~l-] #=Gg;RBo/q<8'oXXF۽šU&.]wXlj'N}*LfՆ't R#2U[eptsJO͞ k\}gyT^;%ruGOB|y5r )NՕu͇i:[ Hk5sCc5T_oCkK. 58=~+|3p]ge'/[ߚb r)5 ALj>H%9 ~IoPrm,`GPs2 Clͺr*M!RtԞήĚa7ۼ43| ^ !M&u";Qu vSd?|sMXH/4o Vq&%t >zxx8<޻7 5wٝae. q䏳? ^0ںz  ˄o/Y:NnG} 46 )W^{K͵f. O TJؐw4. >p@*)T!<+]_ךМ~W4o3F<9O}?G\lx!Q ݁ϲ7^x OP @Q.XYuG`mW)61ܹLdSh%IɯN.&< wy3`Jn״lbW L,f@]A}C&WkՎ F@U~Rg'ga?HZiuqc_Mձ.z:EN'l C;Z>АKLwz_|ctXu!֏e?ǭ>tłO_"xE9<7 k: ; [ b6xt ;tg]îe!>cۼF*md#&7ؠE/iROc(@qdYԄM6gצmo, cz*(C*A ?O?ǵػ0㧥E)'np[qEg12U8+hHa<"C ;|FZT ͂D+ >0}2{t+\& `bo.cӛRՏ^}lPnXR䰚65׫z&(\\k-~ @fn!*:FڇZ%Z(.ï ̸&OG75Я4=ńɫ-eI 9 µP P@B4MuDB0>I_ʂz L&p 6hBiA*^jZ{Y m,(I^m_\><ƆdhfJ9.-H͝?5ٚbBбN#X4;F2*  :a|<^T&\6Әl * ltz4H*GyII6'sVԬfeZg֜:uJ1M Y)fx0?M}{ \5CF+ ᆄJ; (/k8ss\\3jQ`4p]鄏i1) sm-Y\1ŲYyx~ppCo"FF01# DC"!{٘Һ7 `Dooڹy Pim@ݵJc^Sޱqk|L_Tov @+dғ2iwffE[ayyr# $d[)D!Hk& 5s;jšU!U}⛳k<2@LEZ2*GC J0g8"-!*a^B uoI0sxfuuP ( nHdXi9Z6_ɒQPULȴpa&S6U`l u\;kmtǒT ^rq<Ǩ U1$JtB~[tE:tߞi+ ҽ(3H%#,ƥp?&"Ðhyl,Sh+6dkj%Fcg$*M@cP (X A"f$ͰρBeç^>f*i%v/ jUA"ʌmkr Ƈ7orݟ{zzpnwOͮ7?ml}_Qhp>v5j״T[4ֺOWvRo.(q6.gD@^KV-/ɬum7ds,[;@#'D4_3ʷO꾇\cM> v4*\\s}UA$˄\eUK]Cׁq]{}RɌDuFP # dLLD!aǤZ/OBGYse͎7+Fr]yѴ{"aDi/GPu`$zu >ޝ4D )V`7ޢFQAh4x%GmP@CPrxx}AM] ȬμgH? >m A H bqEVs pN LmA+){@R/Of9ߗ'0v%:JXC?75f2z"$ esf3f ިW6]$A"Ƚ(E_@!fJdm:㪛Ty!VXJQeoo;eIUc'E5#NM: !!IF[sPUD эS&?^KXie&u'$ 2񎜌5R#kζ6ًtIe0L3kv24 +E;5;6NL\Fzl,EL@ޣD\ $x DL׈n Y-&'Uoh9y,s2bqH*^URjz4ªlryfRa2%DZB+3GvQa@03JIDT+W+ؒ x}oT#].#9{0!׋ uP<_׃!$7ka=L[ANgtE ٩/0 lR@_ktkUU_$L3m%Yn7{'9B;sM|BP_Q>wW߹3Fl T_}s-e RV&H52 DD hzc("bsX@*kA! L3 (rN[Dﴼx2?yz釠>zhDL@Av׹4(Gyx h޿AcJ+(Df@reJ7ƢfS2D7qc3j\$d uc}fDAUPNgvL quYj$9%i 4:I_@i~KpMP H]օZ~OѮ(PC`A/;\lNHc=ODi3NH{χWZZ}nPCD8$?jﵸ@`;q%&lBafU3xcG(ƚFZgťrqXNZV6N{rɻ8f[/oWP (y6w~w'NIDqH 0%ˢ$M-,Ιj@ME$M$++23ӲrdwA`b~?a+N۳zubbi^^VF8:&j[sC^EVx\.Hgd8TFS! /N:٣0- |%Sy?CW['fA`B tN6&A{J 97Ca͂q(UQ``<昁aB[aI{pX*~`ȦMJ!J,ƚNs0"z54 J_s!cDj_w=}Ȫ Bp 1Tc Þ5 昳GO vΝ2I}/Sк4! ?7Q~}fuijArJ#seT3_ocf,*'rqA`F0/ǭU8[j|@d1!9Ĉ+T`%l0`@a8  ?Q by%@Dl$3un(vk.ڬrbɬO {%R&rh9Č;o48xl H$3HأAY9GҲIDG5?ñ#_a?AP5 8saEOYwM^XGۙ3:C`ZS&z’-V5q!vipp%N0 PXtĘ/NNAx#z5 L$ٿk4Heos.G!,7=DF'Zgjҵ"C Lj^5܅ rD,MRv"fmte˘1ĮpkeN0 PxR%O f[i6Bc7kFvgJH+NH6Qlz5lotu V@Ԭ|k ߻lyލ͖ͥEekMxyGdQiEQEµ8U0`@a N='IJMVj>' XzĺD4NUex:j@*֚s&V3tm˓ƙ"L‰S9YGh[ƃ?nE⒧Ē`'|}cISaצ+a@00[@N+%ݡwmSUD3Gl#cgbPpD-1͋fe9QChNB-,QcT=UK!L >Mr!a[O DÁ<vmR`0`@*z;j@u=@|o̧O\jNCH'n&Hi3Kh.lVqDi3ϐH\v$#ٺPGdt.śXs[R0v >;jH&1Tؾ[ =vMhGqYW9YWeZmԒ$AkAj5S >ωf c#Nb6]UyM5К’ q7Q½d»jYwe5 7pa¦1XEf cj+ܓ&mlX@Էf.փp1QfT hAitrhH5:¼g(ÏϸC;oTa—Cv 'eew2s%) P;k R'W+䋶|tWC0UN_L!kPKl\^o|/JPȑt+=[/J\N!Ask:.pYrsZ|iJy$DSJza[?['B`%٫Sv9a):rh!qAKsiSy7FGm?UݧOEc7so7["'1QRHYYxU1k+GyUs wܶZ}[Ln;Azs$v; W%.< ~-OVILπ _)QJ`gZαeI[ GKjUYAO@F1I{0OL'2 YwFB D+t=W%v&:ҧM$6`#,߫VJRZN(m )i(Vhnf>ݞ3*PJ9+mChc#S<[]{ cmaɩYӈ`=6T ~q9edn)a)  Xy",>u~u$ZA!qwу V[I8 .]^5UOVo}<\Ykvbf g𗒚'ǫ:`-V#!!%NՂd`UcF>п|v W%_r| ~(1Zek_ՍAV&a5T--1~3wRJ~S~i {+aӵn;.$Vg I0J"-bޥsS`BAe`s!UFgmKOz%ys@FsFȋܶE[]U@4%ŁgλTUڀ4 %I Td0M.$Xe&c~J9f_Lz2I[qЃB|Hp3ߣJZp,+7  <?{ n+-KtO.UPs(=$1\:KT K1 ~e`ʐ9DhFlrBK'=)1e MHي&-\( pN L e:yPBvwq $g*I ;Z3*gF`u ) rI@b_J讬D~v^2x[r`*vٸ٥WUZeyjg^gyQYwRkUUonnv:FMH5#c$׈Ja0O0Mbf2MHИ9\nY};x$?TX6* SM&0uR a"?F,6xvPnAМ*Xh<Ϻ)?.w.f\}` Q% <.vҳz")@ XSmw=R%XuaM4lPU* `9I.K99_}FST9h<} dEƜؗFvdURlhVqP>ܐ)պ?KB_}^KI38 JO &4J@bW@Q.EJh *( "b -)fK#w;0ϥXz_Z$ dϩ j{* -l/9,J?kw+'LiQ$͎ t9cT1ѡ I5T0-p%D %^)G`i'ɉX!^>De.x-,Al~Zvp~3AvnT+)df%a[ML(P^ !aukZR,(B#Dem+0 DlS&`$cƺt`9R? Ʌz } f)d0$4f!9&*yr(oAZRz}VdBHvmc̵VMsʪ-;UP&(òL.ng.>x5bUn#l:_>r8AZ:Awy>j{,Y=Me8z@ P0 bmSF`%(8]~Q*v)_EVCzY)qxB+_ۉ m-I MFHK3+2z,$n\|j/]i}i[_P x m PHj'`imN?m})O~t9/'RQYXD;AkFΒ?_`^B瘌,X`GTF/2 49G ʰm Q> 8 B!$kv!HnfN"ixyH?+un!:ʕ}η62h QL"'dnz* (!&e֜[c5hD*兜Ij*xY52M#y5" `X  [rE-6$8hoϊ3*k5ES|؛G;5zo M!DHN Qꀑ{gV"c64@ep xn!KO pzalg1HxZXkI B`¬qn攃Ôԏ@寣BJϝU扗f=9/DF((5ʓ n$vK07z렭6T ^)j=ŃВbAjn"J6ph `@(|`&|,m?>ݳSnn7D֟ ދ7dh.+W,PFJ dFUQa4_k ~sěa<%U=x@j 45Ƿ ʰ,O0u|0 T%zmaXY1ৣm+k1.10NQ0doG*<"'K̊ )?M'.eUd_FוKbӰɞĝ!M%~cgqp@Y]=t+:s=vNנ˼usRu5x$eqrg 2,^N @6^8 B?ٻ& t]P">E˳ԽɁ"`PPD |r #>½;ggG]Hc)A.U[)K1(= C⼨Tv#`} njWۼ㉠hsŇ; d^I,M?^ѫpPe~H9NɠXPBg*I !a4 `!I+ -vU'e !FeY~i`9V}O_[+̶ee9GXö`/T%ߘA\p1m$1Ik%.!u)5ǫs "vjFRGX!-V)4>d#L34S-UFy-"8->`$m%1>F %B/Fgd`BeG`>7+!HbP uRvԨ+ؐ~T *#zL2:}H,;H;/vŁu{ -T J<N>mpmYS)2ubArBRR47N$(<#%uqe ZT8A҃dn= 3?fkhrզS oy]jAENQihEXvQڟ5 4NlVp03*M'(X{Y¹;@Va\Bj9 aijb{&0m]R47ƬpTA!!MÔa%lqJTrc[ hqѦl|D%&.n" N[) tOJdj%Fp$wX^cpjJ5XpPXRަ@Ȅ[.AaL5PH JLM/('qs~$Kۈ0a4`F`13`7[0}c<3|ij6k~ܿnF DShRQJH2[2` 9X- pQzNQ7yY[<9 Ϛ)OO;AsX"J(p" aNsxl9I0@A0\/0n8๐T#C *9qE1%YP)īWw]딏reDP+ʴ\#:x"̰|4ls㐌?ܶՒ. #_ k4,#H ըqhR7ciy1ѱAzvb  g*~nhA֮8PTע#smӃW'Gc2?"`!0͗$.!V,0XYYU[kt #$r b޺ zpfj׍*lP||GzId/65F)S+I"8}; |cii.{Di%ED  *vmJ҄@E{E%w! 0d}_>:l2{$xF%Q K/ ؋G]ȵ.>xzk؞a*ikD͂r`RK'hCCL~b{M0֝XrSU0Pʜ$\B|Aɬ h=8 >U>6L04xL D_bL|pB(&¤T W/95ϟ  3%Aaۋ&”GڷuNVaÜ'`'H>5n#;Ekx?2z `Z]TDU_B[W*vEt,\KnR]) 0EJHWSZ/aׂhj[8JHQ1-!ʼnhj?88nY_!w`H_c+;,۲}~e.^~̌y)9-pPQ8F뼦f)%Ě^w8Za%my|&kCWi *'vS4c~М4~v3񩻊JkEd!"Սe^bkr4p9Iso"$lPGNf-*)2s(kiN|[V!R30dt|1پ9)~p#!AʶÖˎ, : u9pj-2= >\/Iy15e^Xd,B#8OߏH䔶cJ:vM ė 1|Q@/1&zQI SRj[eĚd}e-e%ݛ8ƺFD/ؒ_ *G{-{6QڮYSܚZ9M=l~|u7o2Na'@i[7r0'u6tL oomQ봝/*|~|q&d _zn 9DΎ>@fpC** B9y˲OSk=Qxݦ,h*Je7=T +v }`_){}Zuojkc9\3r{}JۛSa&jw~g EL]=z6=yS:6- wmDK0@ \ZQDдR#@^7 jdFH*C:&[jnJ񮧒 #UUv]a*ۍuQ㗼:\ $8%]8EfU8ie $KkW_~"+B'7yzXVuW,ͅxiJgTv,Gcoq̻-J/gC{ T5?GRAH^'ٻrde 4&A<[S7"U}oÔe n9(\eͥDˬWQ9,;#8WM1JPu ɻA |[h]4OM "߼ZgwF4o]bC$HMN,9M19oXde $-6y4#'码jÜjWpՋsGU)@{dQwż`\PB lL1S-x^)@,sF\BBepޱjNJI,=\C5E8)eO>HT̼+TQ 9~z⪹D2o6~J8LJ<"R @r;vX4`j"ADRԈ( v޻3K$M#QQAQ&@M6wvN( be'>EPdUƩa|#ǁey5?0ܙ ofkuRT֐z+TG2 +Y9CSj< W{'Ah)SJ;&WR@ u@H: 0@BzCdJVcsF˜/{Zur4#~ pҡ@̳SueZ1~ʊY V/R.4d̨z\ q5Q(;͙ Ve%i`δWV7]lE{O 4VQ 9# /hԫыG11MR%8-]Ml EP.|Q^mI ! ѥ^|å?MvpR 12])K&ɢ]Lie:@l(1'썾1YY%v@YG26]91,yv*N,>^uKfE=} 1go0G/@ts=6$9Ҿ"1MP)= 8XG 8#i`?dQ#@ue͝Kj!{vene+=prw"Dv0]'!hu3?oHs)v1aS``<-Kr4 Lw w 13sAJr1Y-93TQ`ԩ4R!ߴˤ#ӊbפrwV;D&3wC@6)0@HG 0yDcA:VͶƱH&hYJ ֘y+@ڣ'u{!ҴXPƹ+d9ȴ,ɍH1/rm5'96im[ۋKS:!*RoJ-J- 0A ##` щS*4\k\>B[9YPz0MG_ 5NgD#[ЋtQ=Qy Ie)J!0PREDf g&0N{; X'ZLp'jb0k7Ikix7M-gt+0B_RQ@(OO|f@"a]ˍkUh/ c}Jm"0VE#2 9X_oXYUڿ34Y^ʹϴJW&0n)I:/2 4ELD9ތ/_POhB(Nb>\ P9d U@9hHx_Uc]􅐀Bh*sT^Jc|%%:0\! YDV k^R{.p3 aK?*+t kY`5vf8޳~#x,%> QJ 4.Df"Y->1E~ P*#{Mca? 3e!n}=-^8}5 QnZځ;d  ܅G.+#UUU -sDSw4.DfL8HkEe2 /^xAI c>4\C~)!F.sWIaH$HyxβrZP@H(Pj8Ay"ҴDT].a ͸I{Dxt ,+vQ,&I@A@( F@Dld6ހ9?3?t\ GO1PՑAbG -8?4r:(;AY3BUR"Sf&xة]WW16wTU\$=Wb ^l+)wݭiR,8F` ̊ &p4 *H)@' FPJUVLM 2ݹv8FQT+LpXj-v`E_LqtrSeWAi/AH(.J!sx@`K0@hBz~24SsSZ68^s~ꦩ_bW P^3`_1YD4V壹*xD,'(*5~(햡{Wp,&L{o2 Xsu2$1Vc {"$ؤ|%$j iGj5VP.*6x-(N7[Qy}05H+6<|8al8ǰ./m*Be \Php q lw k3sh: R kG,dMZV[W ,6ŃG}<"s撰eȞ5S^0B^IAa؜%RH,YA0Ce !X^p3M$,+-oMT5qћ{-ca}_maoruO[ G(oMM_-{,\A:a))m , Dw[Bā;IRʦXiF\2x{EU~Sĕ% W[^6fXlNʞ , r1MEŒ4A})eŜr$k]|^ 3V,x8!$,<VA-1f6H.Gt/86XZ)'֑^.$mE ck\;Ǻz^[S2JUQ >2f+N/e"u[BDn(&odG]S U&̶*/{ |G4[)'ßzNays.(Y4dUk`()foYK  *kk+**F>mnobu V6auՍWw2F#&PHNo[w ,!(cg$>V`L`Ƀ>:A!VYFp;G \^GzYX;hLC5U}I_3ĮRoࠪE049")uex@Zt  $.0@(GgJ  ^ٽ`D'q/k׏ߞajh ver2gkrʤ `٨Z*e[& S0FW~Q8t\l a`s$`P@AaG5q1樿NwQ5x1g^5O L:}qUe.*جe;z 4s>Ŕ-q ƑBJiⴽ)?nD/U]!)9 B۸@k)q@N2!40qi!X 2r l5K2cI״tuS={f?&:`/Ɓ;TM#ml/~=axb?b=#)v7rR>NVB^lC[PiZ_`HvKmWGCB)1{;RU7C 0??7[o>6_*yp|%dM2XO^Xk<V1j3(u|G ~%fј{YE9q&B88flD@DB (mfp{8jlmz쀀A@Umw =_b_ RI*D;"@Nyæt&Stt4ѻry{(?*E%lw}c>oDAJ$DuԱ ]aAE^^&1RDR,QQ]_К`v"4)J aБU%N PpI1 X1l~euTNoa%R`:~;WPaND':bmr6ޘ4Mkمȴ: T%HO抷 ~[ D){sM|wF,~cC2%1VK2`e4GS 6b8f9󢧂.:fNޙKT=Zl?tjq7}/>~~-r5Z򤎊PAB( /t^Za@HE]  US6XAA*otJޘ56=n/cBeiw^(oR* sќ/~g 1\rs SY ;Gtb$y9 1tTC/.ClT?#*PeA .,c{Ndb (4xT JIt/жCu*YW)f@Pݗ'唟kc2C)e_ Q, B:uj$Bٛ+1sTd@.ph 8RGE4 pnZ@&ScPQwZƾK2 >G/s2ve,K6FA!vPu}r#TlEv6:2 e7AH.,D(\G~/k B*D_=R * _jzk[Z:;67w:}㩬{#'ߧC¡^QӠsˊ|FGSIyMiedb Jkә?ʩ(eC]* pu8x\b2" ?JV?7!TzaYkV ?eN/#Tɍ]mLsYp7TQ*_w\E|v݉OT 8B"I(D(LVXRZ A=qm 0$]9~~EoUu7JE&yˣ@FV_[g?d(H'Y#1 q,=5/b9S !O!ff:/$"|G?dg <@( $VGF`Dް+c^LA*x;t^b h늋W2'5:tfp$[-RLc6eO TwQaܒ'03WDԚ{L)_u0SC"؁*1/a˙sҞN= ǾК5 sE$NbЩ}`@HU"*\B{b#SZK(2Dg0rm1M;w)wJنk9(9>]a1Ys~.W$k_Dp5!;rR,ڛ+p 4aRU J}Mz:^없/[QC9DתK/;4V lʍiˌ4^5yK0 9v_vQض% ^r 1o2, /X|]ܣkL1Xwqa`Q@T<͔N-JhLBfMÔ?9ohxF;` ufpēs) e_+;nځMPfCH˛ӔO>^j VLMUwOϻ׽WPB. ?%DO.+Ⱥ)1 !q)%$H86'бK@o <>Bi3׈^dADTum׿Rc}1ѿ%)ӍgRBB lZ1+?0- u~[^ iqXleX z`k0hr9L5cs`!Dtj](ʅISMY[569rᅱ0O'wgOo\ķ8tJZ8tD(f*4xX+ 2k ds2q~־P2 Ԫ|gjz Վo,:^o?W*!Y3x郢](vrl2ӝ3_ z21&_3Ȗa@.4%POO o(S a BbI+qs 攴F( UXUsnrL1=X W`/\F|@WAQѪz6AA?Mcvj3$qo_].R>F AH0{HOXǥ-:XEg|vGi1/&leMߜکy3Q2 zޗ?@ݶr޺Ŗ TKu}~PKɫNX{Ϻ( AH4E>^,to!e BXnh%o-eEo3Ns۰q4_=ZiL{ػ$(@!beieu:-mֱͲLe0@(#|3Z96Cf?Eu'@IJđ5e5 $9;-TAv}_@_("Q-D\.@ަ0]/m)1QS<#b~4- @ΡTdCؓf7P Y8aɒ҄ƞo]~>lT.FOj#^cpp; f^CobPn@U*Dy t|iuti\}ɆX=>'DҖX2aV]@viŐ"vv@C X 4s(.εeeBv)[9 w ZN mt u r'JQ@!Rȸ7>q\/zKXst<sLGrg,(I>EOH~MU݄@h-BX1Nf%pȫzp"FN@l#lvk*LSIwCҪ Ye0@Y  YWr|3Pz)F_|mA$IJvmk{Ȧ}UhVZEO;Ztvv/OO!Rolـ&'}ǃPsnL\KaR0@(LVY8:KZMP R*879G_@B.i#m2ڄ^ w_ħ:_o@)X//BF 4*OQ2 e:Z2f$oW껾xiUܡqY*~y ҵPE,4Gaϑf:dTL;~ZUGDs35#Ȧ%;vjc ү3< C,mq  ?&[mXG?0Ukw=awQf3N嫿x ! L4O);cQ7*/"{%7wӫjJ9径1UugVB'ʲ o("KZ9W&2o 54VY@'YZ<8_[͜_!BM$a`ɞ@6rݼJ[zvDH] mKUeҝ< 71ВK/7L3hj'wa&W[o ' Nc޶p5v  WoM+@Z2Na!I}FׇssK<39gZVjmw 4Uh|—(?O.hxC&\0bۮj'{D D$*DEQT콌c{HbBBAD"lHք$4n;3h22 {ZsiI\~}恷7F=6BV9P]KT 8fH{Ie4HaBt+Ԏ)8P6)[C^Bh@Y*?p0 %J[]Q9BU7?լV5CS'W z mΪ;.+sy9DewpJx)%Z 0!BY(Sp`[yJ2  TzRp65I䗛@>/]V"#5@8w}{ݼ$,y^usnL`;&G^m,wywZRY" (>;*x'D'|64uev씱L+*ۣ6[楢5s.owDn{$8젒Ui`Dͮ?c/aom@4k0nHNHpA^=nP*7֎f;#Kol&)'T=WU]>I iOAL9 '-A!WG; %6ĥoc%:fK1o}u 3 "F #kNL]CkOT>bj;oAxکI@R,Sg-IXx*Xs +àh,qB,X㪳 Zrr)WhcXF)ʗrB$^B$rP7 "5zj4ƛ+rM/ahCe:Pr}AySͼZ0L<"TL4ߣk $#)4vc d66N5'DbK%;b1Pa}(3.1yWЭ0cg >3=\Ã7,!y01#׮Wh ܉NX䘎]P#Go%":4_fa x9!Pr` # 0t;*BԼi% =kYO!3C2K-m.9wf a@ o3s7ּwDh(Tz/@HGVc[ ~pVB,7BB&S:T~/7,JRJr1Bzn;JbqX۶@wChm`qz*S`<.ZPDl|ۈ4ge6^6((ra=\cw}V7(JlOL<u6 +ZSjf @nH)Ƭ=(8;A`[,+šlqEZLnR=#Cn[M(%b 9j'1nu>2 z@S \ KxB"BXknhX!HVe).I+eRw!o*^@sڈ>i,a5ݾl ]Q:#Gm:wZViEJϩBNA}5q &QQ{EE}goO$$ZBQPAlbM.x'e>zM}UMyiFN[0 /'$P%<aqbYzh6(!Pwi4:lr &]%- 7uR>jMgQᄠ e إtB$QlMӦ m8;`/jD?d*]X$- #n .fkX!`娮y`**E23LkǷ~3zA ֵB`9 ΑXMao DQOH r/M0CC M96]!~i2HTzsk ’S!-CcD܂#릊 úO_B**(v;F`hgD9jVAS9";D}hT2˯:lkh24ڷ@\FQ,Hx?qR~MbO4ia~&Զ6R?R$rK+Ӌ~P`ӽoRg&Jp撠1q D9 4d-+bI9w$=$ aLfѓX%mv2G8U6e'AY[Xy$Ŭ/γ%/Blm,=(:XYh&Va`$#>' D\[N*C Q)ʜ Q|\5FoE\ֶȫ烜Kߺ`?j{6#ݡ&@ B0kzLR]BW?9lR`hdJΓX9)-akOT)@B7sJWD_\ϭ$Z(g] ,1Cc C˞'UQ]wHH9bZaI C wx@΋R"M,!Yey|d෪f7v aFU߫ހDv2hpl~\M9sC#De RK S-0 r[Һ2Xzo`3eQPjzGTja d+`%4C%\Ft6 sYf]eeҚ[@*96¼6HznP|XqD{kRN0Kz}3]qYmnNkBX]\n:9bp*8Lhfj(l0 HZ r3gˁds?8jƀ1XsBapzE2 DvzIдڊKs02u4THpmIg-ᲂT H&:T ZK`E|im-I a: "JAk3\$}VPIaD<by7m'UOTl&AXQgv*I?<tA@C2z'7imU{ mEVS,DF%KLY}T-6s6o0J18AyعML&xqDҁhw*2[aC|u>k|y$HDnO0#@9* TwF| b,µH6 ] *Tr4 xod}j4~_޸%A[ne~Zp?﬽{0+9,] -}@؜hyT{[@$)'G1 S|{:Gw X!nKv6 mI>ʓ%^hd5Dr4F)g~$UZylZ R$F*,VKkh*Iʂ)QΆ~Hf>]D"DWul8'{Dp|#HURE@@(ElKHӀ@DD(*((XLmBn$wnv?)ZsZa`eFH ViI1R]aﵗ''c2dYýؒT{20a5ITj8ՁT:0ZD1EEA*0 { kĒ Z`-X wQ[ë/lJ8>Bjk5#%>ZR1l\K#^)=G ;_=k\'^v|JyOLZtdrl++r42 !8%HsAp)KI) `<5ߪJCTow:LM/4kG)_M,aHShxq#dž9WOo,Eb?j^;MF% H]. \g62  !^wV9-IKwGdo̵/T=bEg 孺w68L"-w],8d3MTw&2UOg%0$Hu . 29E 59kpܑl7Tns30tEjdeP^~yN2ԏ}< Vv"F o#.y?gY8M9hy`cv9i`5H9#>lg\܌*r !R.HTN $xKN<8g5;1[)d4>><?X~2%& P*$,}Ng,dS9 } S_-&ҝN6DWyv(q.*u`OTϴE*?"wJ)p8 `bNER. <9\h!N ҜF\P}"6[* ĞX:>RhIV?`rW([?z%&g%8vDJsʐy-E^rt?]2~ .Wi`P\J}>~O{cdǭ``>3R/ NtSޞM`/#}T#r A[K-ˣ7xrVbsi./KJB.(~-SHlh5GDR:$MX "~+,OPM#Va0/.wg١΋j'+amSE9\?^*/)sU|qd#Sv?UK/"+w uS%As 49;1l`P!CW wٰ^e(Sx~-9߸nVeY{`!2#Xz(gUo*abJ@Zwwe!X'bb۷KXhê Kbߒ" Ēh`]:;HܼnBg^d{4azGjrj4#wTu0:?cxqT0ٻF+WkJIe|S1xE҉:KcI[Z,1{(UZhX&fVi\.\/ []ÂDe=5I9@'f$o> ޢ]WD! S2=&C3Adwج! $uO}pa( TM<!i ܞX  eWGMO\krTFص{OTL4xa,uj _9$=oyc:ӬB\/jpg_[A)86Oak$QtZ,%MJ 0,B@xR|;Dtܬ\V;妪k<0ZY̓Φ`?B ؞[/-8 !r_E^Ŭ!4IoА$An #!^9( ?e{ @roDe΄QZs[=x 1\d /J3elCrN[Z>heRILެ|o͓k*F`GrNR I=i, .P2?PS@IsAvWhxb7ipؚm:*: }rꝳc l& ij " [$~8PKEx2S)AHWNyp M9Td,`YiZ9-{Y}V|L0QՆ7cuUaAF*o4P*,Shz'X%.j T@fW#YE.][d!Y$ ;H:' B ۈE'MW-*' 㠪=~4v;6406AVr1ųƅJ7)/+9=ނN-U٥jH V́qtR>9A\NҝAW%(^( &$ n(߀ SMC w%eiQ@C F=LyUSMO5R[.MEr-RgdZoWi~A4(Gz%kEȻ *`5GOK,6`пE(4is[ >Y/4P+#vhuP#,d< Ȍ@Z`Zq ܾcސ7fZVoKo-@ d!qZ.Tbs8z("BIX B %Qn `ʹveǩa5O.wHdz *»'ʗwjpI iF͉O6K:*ĕNys&֜:>觼59@f,YXk~2 B:a$ʅX /ہ1hGF*A S:PGi5{uJT4M%g ֓57+ZQqCVr4G ff8£rB|KTYDURgA?ػ϶& E H$(BT,;TPQQ Mc[L4 ^j]'ٛ. } %!ն^4N02'/C UrdY\Zb\E d*JceRՓG`1{|Te@،r JU&S<W!Ֆ'"]Tr{Ȥ]x[9/QGN/c:u&:lj1j暦m1j:qyUEq^y{G 6{AȬżp ,([~A!dhn`ƐbȪPpS_vQ\] +D`&Wx[Ic'G">sǍjÛ{.jvM֝0@}wqSB(B|<V`0d~b6{B*U4R05t-Ⱦԟi HX+^^6TRoZ]#2j[qs7< xh, UΗ+[] u߽|Ebd,(S2Pm4GTۭ1 ^=,t_3SAHRGxD8/jӊB(hS[39[ֿRG bKc@ihN.*'jFV RŲ Psr tB.ֱ@(޼:en/*5FKZ~!0:Kjՠ4:; d%&jAUql` !o "h `5QKjߨJمkT7N'eNȆTPe7)ׂ{4ZA!cC'SvY궆:JM0RO{56Pk dz{`15ij%AJ&ق6K Bdg1 106\ij6mP7Z*PA6IIB'Z (|8qؼ yY;S ~@R._4OBwL#k+ Fax!Fb`dM jZ=?A4bnb'q)yB.h \av؄IGsNкV"mXVf߱7VK JBF۩5~vvh`Zdbw)2^y@"28[Q 9*xVX wӻJ6rׂY e4A=qn>SKI J6(bcD"]{Z J/6gnNOC5( +N& hH0:FjJhؚeJPPOuQvYQJ1 ilqtώSKpMZ³`MrTIOB~8+Ff+`sφ(Xd"JD;A+&hee\S4~?rIuino:wB%>Lg!.":<yjqCҿG\֜E\;/wU, `%H|]n/y^f_GtZ |hRHRC| ˴nG+Џt !oe:Au/R{wT00 T Pyw`0LD !b``wAn'ó}}s lqc\JthR" 01+k)7~.V1C3lm.d\ 1׼e)ʑP !O`=R,Y1>4S!cB7Ϟz!ʭ.]mX%m_|@DW0Ց "<]c<$%QV͸0l $\1CUP9]\p2 [5vX|S'l!^4V:^0ʰPC7JoêHgkVE'PT5ipgP/fkEdJ%1s@XEi'ajb(HW+`{n_1MfHP`o^i͸gatj]!ғB{*=Ҩ\ּ2b۞6y A cE+Iƕwy!$:3iqaRJ/'Ats1J>b 7 9|#% jyOxs}^IJllÊ5{%ғDy@$Lݼٴl'D}+01 '){Ū(.jȱ?w|vNj;8Bg@5vt!޵Bjl!> ϣ 4j)⒊dx+:iԱ:NqXX AltaêY#GցH؍kҸGXl!:2i` agm؁r(gb*/6D̷.aU!=Կ!ۼUMَ_63- ßzwM=oDv+&YEz 6| Piֵ dr IqH[R'V"Ű@I[lzQl?LrjxqYAUT!I,ZNA]GbI%=P1nqqVDy]#VR^ YШ\'D[R "20j$L+c 'Ц> ibuhVY b$Dmmq/&"&ཤw,-oav _|A-V[%g/ҵG^ "𒯗bھonY& 'uw=j3c*b;Lc( g]4qSZ\XLԼOk\^18rk&ke%NL ?F+v@(Jmj^{:^-}^{}wr"B@{]p1,\}Ȼ^fv?L HPbNNw%žDląΙPNk?co%Ůsl7[- di`ZBS?zKz\[];h<:ِatAݟ< s?=[+q #UD$RlhDa{Prc#.Rl#[9w"&ZKXFt չ Ņ~/c@:<7t׀m nZM_zU `xz7.j|K7< 4n,w迳3zttJi21txC3#v gACtV-i?H6~Ϛ9i#p4eN+/7inCbdt *k$bi4AuKac)Ik\qaf%RӮC^#@riaY]T:!kP\G82@mgOЌo8eu  CtҵL}̬a6" FAO3%F\U3@I>A/ &ԱF}À)DT+0¯Zf^S,0L RQʥj[KmB%j/m˥x5wdR @&< {QI bdGD:& uů(% hJbIl نA_qe mqhH䕫zpؔ1!qiai5Z8IWehH5kŲqd5mQ3S%D Kx|UzFkwf!FvDig""0`ۿF&S(&yL:z:+"<()wŏ%tE@@ZVAe RFqbpgt7xfslQBvyrvǒpҢ쌱SM3[߱ܡS) de.딘E쒴~mfk! =~>:Wp@ـ [B}X?8Ǻ+ 4re Mk+6We"eeP ^nQmPDӂ#1j^h)pfڰj]am(뾅% °Cґ(ۍ0TS[8{9v&L,ȃ΍eYA-Y?! (zo5a:}Xzo(eat`r"j$J bkOwC N!P+~(@y=d!&̪r#Ha5W7_**Fb'Ҽj+>\Rѭ 8R,,yܝ/07Pi =!\Z,EDӎ EFѶ"1NkhH}) bp2n~}PZ\1{]M*(eqY x}0^ח?B ^j)v{Q"9C0Y" YgZ^d}oaLeUjajz/0E?[?׎̃%@ 3v( n'b4pCc^M--`4gfR!&F*N(\˷qPU&`#l\ފ 4X*"hSl`tBT2XW Ld.@\ON6*'ݮ tg߻TsDUU7{w&@"@"B-.?7a)UjBmZ׺tӶ_ ̙,2'r z==4Q܀Ržz!TTod,@4-k9@t1 7n rrc,\?;4qiJKK7LYʬb(7침!VţY,76Sr9.&n䛺wja\q _s|7&ƪPK SPdY%Z(`,!cLHiO?!Hƞ>ȺA(݌? Ra|<2>/<GYk32mVGF9t4w0أSUd0^;27u:Xaq_'~ۢ췋<3֜n?z0ًCLN:Qq}uw>@=7vmLX/?If,\<6jAa 6Z{тFz9hu?ݸw6KhTJ2 Ao=*YE.(ϓgA{c[v.t" QQ w˶kwx2?1 O۾Jfəf[F5R-mMܼč5t4kE =k@q_eBj))YbIg)׳RG#wD-+s ʙx¦~4@!ȈBc\3B;Vnc44LԒc1$Se:]lEZVN eq)Y 2w]ÿgy $rKXku4vתqC7@#S+ ˒>5 kf)9>YX) P(#ZZ ݁KPjVrTgӓ<W6 gn%,\F&\fb;.d:FՑ|"7E\Yy 4|WS]\gص,rXo GawI.wU1 :g׬&=̈́RRĒ1K| V3g𼐘^@A'T {i?7"$*-f$Oi3:K{7dkJ3Y4#c Doǹ d##Z2*gf3VHl; xWGZ{]3{O8rꫣ'*ojSran^^fk~6ֳ?q'4}bCHH4b@wcRu <"]&q5}ʁ"qf Z&:oq3F( lc@xz;L߬9.A@5 <8R3Xr 7ُPKEA|dbMcH\vY7tą_IF|ノ3^n/EynwOB/s[z?/} 7eh0AHQy(Ib+0?ul#IJ]A4p БLP!AS#S>mMC @z>]ԭ+jNo߷Rx`whUc;W`PįLPd5 v2?@^@洅]8qf7(]`Mu4|p]auV}ICKW?7(,.Mvhkn_k[/$<f3")qD<&f̊{B&iָw΁hdfMbBmKo_ӎh&O\e|*!A'eAVLV(T6(YOWdۮEg:7qݻZAk͋oUzV-98@E{CmN'D(3|{+rKwsqj:7"wUChO~}P|&enG[b&@7m5|I 7*M"7ו| 2rl7]0 ̢4JwG8tUd ۆoRY|ۮ /'G2x4ְw&@Tj 3kH @5dA5Gj64@dQCgՐeh6! {U~b_9 VԄ6Uؘ~)/"ڤ&z 0Lb7aP5ۭ <)F>87or0.q"\^E4%JdIJl_Hy7k.hMxW[L ocBLbIpGrפ@MSECyeb ,-%PyPt3cdB|67#|߇q]Vs~Pn+۔'ᗷdvn,Erlܮ;> M#΍ɬ+KDU( "hJ[ ֭v{{fB aqWJ- `gM%`Y&4E!h@νԤ{%xBd_OK꫶h~rQԝfZV%)kg47|5SVgyj~izqSJ*p:曯HG ^00rߵtȲk3np9A\s+AZ&3;G2|ijHZxo7>fsPwTΥ%NG}ui>N 9[,W).Chh#^f9GaʀAC#9p41pEÈ eg8毅-%͍aO5"#aD C~f[Ob|^zd*g.W^Y?ƹuP6g7XFqr}7PY 2u-@\s*$SlˆGo&+}PVI~2^z;-nM4|!Np9Ar7g]pJaʬ S\!+CU9ow x@5LC9A{[E}^GdJ{P0{Js5~⇄,Oi-5WV b ɓdY/Rs{LQS3$iLp*a r`":Aa#M aȁC#:UݮbX &~ʆSxߌNYWt}a?ߤLApt¹oC) ~+ 5"t x6 SШ-.F*%6b67{={y%?ɚ-{݈Y zNO BR䞽PV5-"K>O9W5]`nTWSo.@UArtke>X)n#K= >?0Z+Lj0nSI)O ~^*닪I_-eҋyQMr %v{=E Z:S:v`C!4E ٻ^)u-[ %e阑|\wj76z`@:\(LK̰:Z꟬UzG!{wG 'Mbٖȟ H)mTWl \$s RZ'L3\j]Fa;C( =F9!D"1y'Y..Xmot8 }ޣOQ%)nIzd{:$MFX-o}V[ ({^ci i-r]{/S(-hS ${k y)ª/c)zBv(F "l,0K}+4K( ޵5[wn)r+".¢߇pd]jbb$ CLEO6AXLJvZ*#zkM*WbŻɒs55_'7`)j ?HK򶋲溯wTT6e m .MrA)}od7gp*d8~sCChΡX(\aD~%XFMr;ZdAC31vc CΈlm K:,})"NuN Pmu]@uHzlcQ>pS=7R ;Nakj! .7 n{w_mTQ_۲Z֥ (.E=VOEVmݪV띛 K&!A0(PيP{dd/$2'{ߝ@vSx7ƣ\b0,2\r,Ũ?';,t_{B.Em^gs}jC#$KO"Ǻk,cTٺK!\CFh"{_RM"/Q#\UydU*HMG">~Prz7a b6p>xoO~ȳߤ>v:PXLa=ߤq36ZB#7)/wo+o[O >tk`+0r0@Mې: Kvڝg=s>9գkЂ{NW5Bn Iwz^&z5bMq} j:QsBSNDv !=%wf'?w!d֪[n#,0wr\:v|Ra$I=YM^:__.X6S]3iUXAΝEEkErHz)8ڞA hlڽ򠲱Z®tS_-|as]~NW[P_ $1䌙Ղ""$ڕ}l4= 3e/X-i.m-yg-@IªYi^w@4P=[щ+!1&!@Bqw&=H5Jys8pΏsiU *'#,z'!,vW$?^%t@tBd-2;l +:q{՗{n74zFrYM?X+t7ptyyQ=5Ew"P۴GPkP-/R0QE{̃]hŀ#}~rh r*+\7=MN^z~;hò,a7+_m#4^]>c$els`Y'gB ,~Wp8 ֠uwԂUHRQFxFXr$IP;i!7`M}0)PR 97UAjg7uRATV!f_~x ?8˭ud#8)4'!\ F':g접&J>'19 [< `[m}oz @3X9^=i"=heG4~+CBZnzr5,e?y^vȬ:&$$`t*>Ax٪ibq~z‰mi8 ۊ%X(9 `t6 Ux.=J/"Z ]c$HgRJd"; }ady FDI`0Ob1~-. hE7FJ%fD+pW~ujn8\׻~X2 Q eAAt 8ys;"95f"xܒѧRfuyg($zXFaJBЗe*>[Urȁ &A7="UD0* @kDH[;i+#~T-KnPcpԾOt%N'J>@?}3DP{Q j\uV u P5*H|2_0vH1XVd;̗+  ԵRUBe{*ץwe ۑ9$4e*;d6n-Q B\^}QR DS׾$I_R$!M kԜ|DT!jׯy) 8hnG怸8@zrxBi)v i-5-OO/c-Gi# kH̐ۊ mnhYԅ*`ځVq/q~ERr^oU"m) yIm/!AA֬9=$J.#9N)#BgaPÿo*ܠ>o%n)%:s՞^H4fFp 8!²3%5IHZQYaAXZ(ju8 l4aPk$rK@2QyvwNII4w`H)uI!AdTK; 8Bf֏"eWSsǷW.rT`"g8n:8lcsktբDW2iiq:@}30DCϰTֿA{J8vA؝NWJ꺡}񋤶Jht@07%՝i(Y1 rBλD`--9H' 4W$Z` ^3]Xz7H7[@Ĥ $z<=[=#𕏈 F:^RMtA0-~#.4,̉O@/YiWAMA$"J@ RTcY&= V5bm\K$!ٙO|/Fݴ`~49K2VF7р,}2T= H{aTU1ax&+s g %]lBm^U8Xq T/S@zb@1 b.è{8^ua~Mfi JQ~9|Ʋb}c+(| ]maMؗb4eG#hafmQ)UBHX-_11,w1،'vĩA 9>b4?@&a:adJ" r|h -zʽ_w8|lSt^a֝r/=a㰻蜲RAaW $fr"h{@C1TE@D`kI7@Qb60v^hzj Y֤X$6+ЌVa?I=nLX8\:caZ Ry0ALA@?x$'sJU"_f4fUO1#`?9|EUTc$}@]3>HC'j7<*o1@9/" `oo\X&+6Pz 零L$ ;#{'d- %#x$ԞU~hnQBa&0|9ڞaeO[,6Х0OHa@í|X J[B ^l/^ xtV"{1 HĦd5o2Ќ)i T9GѴ4=x{?aGċW8^"ڻpuQ`*bo?2a=gKxKvyj́z $*_&{n@;ҝvm]r#/roX@M# SV  yiFedjKKs7g[@nۉ8N6O t_ce.`b)ϟ-E;x1 1`O"f30ĊfY "ݦV7Zrp-&- jŔX:ݽVw G:082Q+Ρ:#гGSdYc]+lQZ 2 HFX uxl~(|^IhKbauͯT9n2P?k҄ Yrܨ3ł7y.KTA*_a'*ʖ`iL+$H7i:h wF̹Fcba줌sggހuhI0^̱_qdr7`zϺ 5%Z |tbzܹwB_dbŽhʩDQaBt;_r΄8cs0D:e@#7ـ2b9 ERfoO޺0ǚ?MX _ߥQ ^PƢoT(zLvL3%3v-SBz|0W`0Ɠ.W؊ H5Id"gfHIhjSzoih4KR usli~se+6 m u.-THV>K#I%4,4ʪ*;vJja_szu3qbψʶjL`ǘJʒ*Z-QJjTEĩWJFLdN?@HtR*5X KQԁ0 냶Z1A #j0-;ƤGcfz-:ۚ1 xdNݩʏۉ ShjiS%fJ17{wCZjX*"*r#δС$ D\ɑ#׫PL=c?mwI<"g1f 6L!AނR¨rjd,&o&*IMP*wl ׯ}^-H9f&'mLIm !;eT#>ˌhbk%HiV&3uE'3dJ,Af@>E,#R _/ Bl0ʶQjkk,F6nW)~g݊<5Ȥt海,uWGg!&Ƹ 18pMOL2Y~S6QÙx@21SS|&Ddy*h %usZ!`oA*ÔbIvjyTD 1DIff{q r ;ݠ.p2BDGToeߝHV,23'6ť{ ͿeY ' E`I(NKʱޔr$3Z]y;A"b$E} jXRzd7}>h %qwH_?Q=-u6`$iGZ{ =o.^Tq7 $!"| Ru&/E\0<$̀Q @N4)tE|z,m[:}|{ "ehz{<8M/(_oDDl|ء8?uu#n׻Wƭy%gyfgddGse+ՉZL)qXФw[r 0=wIq?E4P"^-9!5uxL(.,a׶(0_9f~Y,!WF@\܁Er厭+]/JoMkqhXw2d*"Ha,vPY&)2ۿr<2&OG&^A[AbXk.<m0EݖHJuΊI>{+^ [~_==5'nOOHHge[mo;Z;ߊтsVDδER%C͝m6isTb%E o>2ex[9ȹu'dia>hD9>- wM, 4 @j AR VKԪ޻@*v1z3TJ #7p- nbg/[e#Tjz#8r' \ U\܇/`b񉠣H?3]"}%X R]F* u˜#v$n/V{j[TZ|JJ Pb,s9E|}tvbr4x7/o)\0|v ͍!ր f?x!% [ Y̅Ř#hl42{RJ OԒ:Y$`n]75%>*bp"zJ3t}$T.O\F,fx~dL5[USVn+$ր )‡ :F~^Ǽ]Uf`2@wu @SR jb<휗>*Rğ[DP @!x8_!Q` uc{U |P}^P[%ft hQj(gj^!qP<az=WQM/?WkŻ8M ez©9BÞWe  <a(^5nZ 94ݢ*Z&z2/*ÀjjS>^@&1*У@6׾O5 *'d?!{$Xgl$%-.x ҁ(Bݏw-]MWEWC ]XȲ>!ý Twu{@%)sB" Ѐy ǰ HlNHF:a D'0d4#?BRA\y}ВQ2}i"-TWAo߂3,pWwqzjoATOz@eT2!k.0@8 A>(iQ+ep1‘MȦ^gy3 $>cYAi'>8{".Ȳ S !I{6LH(m._:-8#1@v!HZ]p꤄xUP WlF`gؚ esWAįEwZ:Oh'S`kef6Dq߹BR!c+ڈ0@HQ$~ӻF_p|8U#$ZDA w:Y0rS.o^P*ET 4:Iűv[iD^?EB2H(F#kpм_]ptU^nMV5SAxI_wKg=l'(CS~_hh O}(ȁ?\?}W;/7?=9WG ZRaLK Q:tB n^OgUD_NKk|v`\MX Jq \©YZ݁mBz <5a)T x~^ )('nPyA38? "4.W8qa~;X#EoW^u_p`xW@JqE6A "f*'ͭ^W-+I\3 ;(:8+`2"J#R"]j ()+ rEnQWL̓~ݺ~t"$*ܭsp5 |G*cF͖ 3Y-'\Y LkAMG;2 Z)9mcH@IXju%AP5HwBl!zBة8F8rbZɄ )x?Uq&yQ}]2 tRF~w]M\ao"){bl+ԢmRzRS׺EglKY"@AH]e233M2'!<'dM.̴1ͅa4԰mq7ZVQ P>=X_{-d,G7(iOAPpARӠU[ ,e%9D-u+(.,EK/m0Ze)' D |`5FK%F&,m.b, PovQ6eo2Xso+K%f(.퍴_ Z=]"GR<.E\oE&0IRDXT!mA0A5"ݡ$u {O`?FBi[Vv)y)wCo3O!92?+eAPѕEF="#RP#uT~+*FnOH}C$U OWWo2rZD\b- Q_[\͠~"D*9*EwttuSF:1,[䯃 5lB|#HՁFn=SJ"˙7P9 } 52",HÐ)0 RDHB[sh'5#2qqfm҉[P1 I0ЎK3ˈ699(A֮ Q1z @8g`7Q2WWeF/ 7!}{ *;DNw׊k/%uZZe MU5ߡw[S;2M(c7=cppuZ\}[G!W'RIŒ0썴M_f~Gt1ߺz σr1-fqt3a&JZb#.}t$ NvէavJ1 cb%K Pfy'!pw260D`NTjf} 1 "DV47a ONwtᷣ"j|(*^j%1PEAg!:`=c*n5B,:橃2d`Mh:ՆTK(V5/"_E8M _K# HL5?zyu@c RiM_wdsE$C.;+41 [Pc3A̲߮?|iwbQb_oPhFA 6ԟϩAj2)Y$tD-9by=uxc(KX6dGqTx*r)RJjX 8:R c2FxZ#ƑzBI_TU3Ȣ(e/hfVjfeiYYpgaٕMv9w9EP~  9y绿9}l<1zG$ۍL$X\N2w.w7+:Cm_ X +a7M|c]}!LdҤ&sO-дM ՈFj N4LQdofƊ 5 5 w#U'PcԷ?[WdOEK [b) Rso_u[g##0uCd7KRkaX0Mjru72aT.OvI7Lqgt$w7D ƵMK3'J.B6JR'Z&$4DW!*H6lW@-T;z )4"4 XY3m53 p$qqgr Z26RmKZzuvFFƦM_IxtcHuW܂z/oK nJmgsI7;^c.iŋ`L3$Z6!̰1ߥFk%)܊dBa3Mtbz3KTtt!%bKI/QBOQftXПQN24Tրl)~6+[ V%\,]"Rtˁ Q)!nOsOT8?'+V,X`eff6[c wFn^n$F<ljW )r?lD:g=M70ծ79%-S: -5 z3oj}ߧ&&JЏ^&Zzx@5.3iVׂ]ʦzPۡ>!ePҤz5*QԨnq@aLMRt÷:tz` 8*!Ҍaʃp{,e %V@Р?@9k85CE|غEdn 0!dU7 R fCu` R'\x?u07HiHP`u( sk:o|f2½dum@|Zp!3`*[NhK֯}!-`B @4p䬛ʿ+$Dnzb}|!S4W +_[p SH 7 ?@ _; %wVJ'=P#21KHk$f@ؼsSj $h-To4EM$EὥK [sc^WJ;Kuv'sHrumty")/(r`*A^M~?Oнqq{#AmFLUT |cYzve/.+=l|YFcBB :9F6-Z6E6p=vPN7FI3A3Śl=ރ=@uz*+}5X)77AWQU~0&M v4kzú\|ܫ_ ;)KU;FRuJHb # 8u~5"YVQPKD]lFyaJ[XrLA65P%u H5Qݬ;Ll[(Ir%PMBؙnEʀMS{\[(UR3?H]1Uo.k(gܯ\j[Ù;CA~"A1~-keB$&©dzjJز.NF|7n-Y KP rŧRJqcJlʂyn ^ny4k=1sS#0sokR'STNRФ).TwIk$a] Յia/D:y,DCm!pEmeY%!{>IaZÇk p:񱷹$WüI5XR@;FP"-Z,' m.J%fTU- G9V鴴P9D:?,2[T^ )5G T^ @y%5tŘTHCS>1\{ ͘ʵ̕s } .3\HP!E/ 9)檌CfІY<+|un0ylf~F_^:H?ww<dJOLGn?urN?PahkUs(A(;7I$u+ɷ\ C|AB57IɌ/"8w^{k7D.e&DjpNQ̭Q24[7eBi1݌6J!PA w uD)RV'8j.@r 0X%ƛU8MFnt ptި xoxN_CPiTU<`IiY3DݕPbvbY.V9 #Sbo.Ygwuih,8 @hk^R)`AG)l4#Y׈"t5B¨c\ltsUKKr4rk1㋜Y fayWju[$ݐw¨yr ITXWÚSrÇ=~T'OlE:4Ц/WtBZLv);dS6Ɠ33`ʛHʌ"%vC.ѭr Utӈ2Ƹ"EДJoކ?A{%漞}G.ٳ0`nJ5z9<0o.J̀3NXgKV2SZWC}1?a[\@aCd] _B_z|4)R8 >f|=rwz&ۜ n+TdKIܶ%+Q" UXwWص}NW4e)pT{b72S!s ͚*PAvjM=l: wu'7>Ԩ#n]# E%125\V뗬f JpGuUK_mbgPolJ5bFن yj*SӐ(k@Uv*hka;qc °w TM\׿M_k۟zr7>NϽ:wdxsx EpzMqg] /r+F v$a;=nڷqwj^kphΑeuiO'/4V :O2hjqi8UWf7@-z f(B;֐S.+je~qEDim/6h \"+˭QH[d҉NhrĭRGO:-/Me/C"=0\ֺ”uӇv~r$ӂ . !DW$ܐ-ϖ#QĴ.,t4U aAu&7>ی]oQr;>6cT Lq%pԎ=?Ɠ;$O<>s-4^hP#uס M$,,+#$-Ʋ/3ſ P\ ՗lF| -LUw &.x??餿}[ƿhx.ym2P+wqPT~X~aA*J" Bmo@pl2bc S}j( 獝mۀA}i8< uMFW1l@E8 ρ0a>0{ PXZդk5! ըg#2JL1P],KpmN;`:HaemMc \;&WA?>EQٺH1]UɁ6-,LN9e#|W}\yNK=w*j9J65,^>7sYSZcY3Ȓw ;߷6(s0Z\߱~~R;H=4븎*e qGwz·:z'ß XEGxz xnsJw΍;O/~po^=;7` &Fm[" w2g(baH mV]u$_jzTBWȓa:e#}%cs@ Gm72}0Di+P% hprx eqķ?42@:4Ja!EW[be.'Эȟl#smuCBI#_Xtbü |yʕp/P)7oE'tB}?hq|U%k[n)pGWHaߔ!y`񝸟EZl@4xd]_AuS7|#etzuL Il u5!%rT+ z2o"_6~!mfdz6mﱽ`ֿ\w0MvWM*1FPr"[&_RIok9@qW3plpTeS~tيS _4(v\ L`:E=f8F[T ׉ f.RCvy1'7X}jH0Siyˆ?E͕PrSG.ЂpCb&]5 hF&EeFFL@rFR ls@fܟ%rd6=,ltNkp43Tπ`TrCz2rR1:]RyC }ŷ j&@Iͽ9{nc;6q8o׎LgNu.ٻ vֿeorDNݖA\o(JsONmar/Fp]6 d$%:AF Y6~`+xALk;'s?zR-+ulfXiTjψ˓~1LG&,s@>C1]0OkOޤe#l`zY~d0%c((S6:LKy]^IZN 1V#{YRһK@JtBEʼ= Q®OK2o Gv@Jeo.9*\PI \ce6Vq7uMo\׬ٷ~6[ ܀fUn&y+Ȥ.Lͺ*& vu_0ڳ_0;H$3"Yj\=nLmW|H]D(^N>.-N/ Rkӥ H[pR'Lx` r( u篢zt`R=Lc$@J7N>Dg7B6Wu(柹^PK5ì+\!r q|H |?\Fr$XCWTWYt=W4nrʜWB ;|y>eESl;B@"ʼIHm+3SF:h"5\KP5Hz9}:`J4 B >}zdq_΂V#ꛬy˝G~d(ӂ4]@4%j2ZU?~.dcX오#|w=iQYXe*u|C(1& ?@Am9ϧ&ZSYi<3gD3=d1gvUZAKلBMDk@à8yo%vNVъ5>S v$P[+oQ'Ye >͞r7j= VꞅXyq}%sNQ,A " DZhN &> ^g_TFPyEԔ^`P 8 D(e[ xT%RQ &r4%߅,%Cfx1 `{I3cȢ_ _أ҅y}at?Dֱwl- %nҁ IWw<ר 7 Dua6z>t[_1rX~w[/ɏx)V&,;F)a "1~koU`~8Rm(Xt"K^wL v0qry-Le"njFJpeGN؈ԉ0c}VΠsR>Yj7'}7:4WI|VF64^jYGB_O D\*ʱYELNcx8'e =<ބ \RJx5u|z;Q)NҡrvB$<vo~_Zr<նɗ xH?kaT/O$!S茆;"G\jN Bfb6! )x@uc1^#~#8jFsʴٗ4E hEvΔ x]B0Aޒ31>>V6.s6Wd5W gO8@@\]G ~4I2&2q׳4kTԆ/>h7M\r5ELO*XlIs=j-%N\ѸIcDhfԋ]S| ;N %\0c19fC-b9m냺"Fp1H^vP%DL'0Wy㚑u@uMYf)VΞ_YG(1eE(!Kw=?(H,]jsheb; F-DOYts(e怾ߎr c1>Df\,]L}xl tWW]90[+%2=16!l`. 2.Gdy[B/a!gDc xZjr 7٩rA#(NvdZZNH :Z6̮f%=bylD&%_DdJ(#^lfwSzx '>H[9G3MG,8uLZA7c1cg6eʸ0ג OŰ hq+/03DIg6 ΓD{XLӍw`$rDXɗ+ hK%m.Sm`4yi؉1-IL1c?2Dsޜ ;XA:Qkz'q hF~P2 (P!Lrt5L5hT}삆1=8;W]1cmE,g' F*nQ+#Җ㋽v@*b<~Wi a /#3F60m'ԙ`1clc@MeX.{FXQ~,n`]/o%:>w1#4O^*;`9-?d1c`H 5}np/<4&VG6,%[h|b}?u{)sp,c uLO@Ǭ? >6>_J~H_wY /a{g|f†jw @% $7};| -e}?7wpkQ%>p幭>5xi[v)M54MwLf˝cG%J7w:Eool6t1'<{r+V_ƚeݷ=<3wTCs;v6 51u4ዛb O`39$b1 ioi~+6H]R&A}gJAƲ= e}Uv oyԟK(xzD \up`u4I<Ӽdq}zܴ+91shhM}:ΎKf/vjql-¬N)+ܢ$%3/ ϱb4JZ4n h2nX)Ĵ@qvsӲڐ4xz, .;uHd7=Ռ'TJALV6fﱂ@y]X yho3Yhhg˳LfXVrcEzfs%z纟o90xo'|)] u0!9L$0ݚ՜N$#d5 ^mΰa)-9*+ĆWBbX=Ó֖]UƈDB޾m۪:/!EB#Fw lǩ>R:\NO …tM֨RmIxUMc-dZOw k}I=@n2I(wgcC FɦE$iU/(@kۥe5 q g.uM$'G3lӧ@NV9h8I1!Ȥz4hDɥS*ظSH+n(lQ*c`hTX;]4Y¦ł,X0)#$[!(H$2tv q JmX]yJ&nr5\0|*,73癝ytާ5Uľ?1ܰ[z&̿w@/= _iCCPICC profilex}=H@_[*+8dh,q*Bi+`r4iHR\ׂUg]\AIEJ_Rhq?{ܽ S(DŽlnUCFaZ"w~/ x9&̦q'(OtAGKq.A=' :tx8$+*{˜8+kݓ0WW\98@$PF&"H~?jU#P ~?ݭQt1Ų>€h-ز'Rj$BG6pq֤=ryD]%Mo7[o魵 C]-x]ۿgZr iTXtXML:com.adobe.xmp $:|PLTEMMMMMMMMMMMMMMMMMMMMMLLLLLLNNNMMMKKKNNNMMMLLLMMMMMMMMMMMMMMMLLLMMMOOOMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM%%% ...)))<<l0 l7qmAm^l?4 lh@f٦Pu8њ{O&;XFjץt98؉uXY? vl%oi0[Rf/ A}MҀ/@?^黅ނ-&@X}9/_y*@C߆`K UYeNQGt@Ud`Bxߒ&?/_w4ނ$迟Vpsc:7$˛_flBn0Wztޛg pFj0c_+9w04}޿0%w?U6m 7'1R5{>}K/v_y1T0>{5qzM]s__%:sgy]Ŏ3???uo_veY~N?y] s5xC5o2/b~Ngk5/D|"cҚ0U5wPO:a6$OM 7!7S) NfbA]ոj <~s;"_˲uye;xIOa f Dy$ǽ5k'g3>jΪf?Mc컠 ȓGW' ~#2}w,GwW⛝ \r\|7,E>B\trүyޅ["+ůK5Dߋ,'w4/c_&({7x&BpG1[D{1wvz#t) m5sGؐԆķH{)gT%Sd8!># e=2M̸[?}@[D6ESC3Ly_vqqO~9˅PM=n"$HPpr GhM, 3SD#*˹ٗe7^*P5oR?=㪰gB@ {MW];~WIۄKQ&d$BLFҬT_k6 5NC^!/%A#cPI}&}x}f縶.9z?siM]tI/åw Itsu9E[p6$FH"{<=f߲#j kf}'mE $HյhN.3Fu xpMc1B^6 tȫuϵ|<=emYF_6v_s7s8YC$J>Up=]u,%h̞{\w8ܜ68y4!q1r?$*Lb=r<]A8N?js~('pTXOp1+Tr]B9ЯNP`JR_srVN)*b}2׶=psznBD~P$z0T/%,z.7 ,Zs|(9ATK*D ڣ7}JpX=k-K~q|o(Z-anN_ S~]\ԭ QvߎA<!>sgV=yL9فW7pJ 8c7Dq*>eckzCktӟܓ?=דӑnJ}`yx$yyQkkn *d7XM.L~w8jq'?+m6wA]"5D+͞5i/ Ǔ4nEa7glѷ៺, _x$G#s$G`ncݭP˯x ;+0I&adQ@u/}x\ߩgamȋR&|Pb7- @(pb5m'L[}ĺ*O? `Oג;gK6 EY?aK̇jD4p3 'P)>'D%/{M xg+MFq'g.T3~ysÒǀȣ6p}ИN?4Y+RbBj# 2`)ؙӉoo. P[`v%>c~@]^+FT5,G]&?rY; tE$y(#U"wjK[@ Q3 k.ަo0bl6fh[8ݵ9_)@9ޜN@ GOSM\ܬffR3Q\",︮ {,#l#psbbҬ F} *1.QnM26I<1`< "vl/;.lc\0^kLX6(B -@Cdjipr9kn=|## b̺ b/ߚUt\t6:663]qY}'p^ZPiFp/~`*g#$0^\Bɖ,hvqd:|g{G Owp1dVnKRmR lY9\|K2>P(@I6镳~3#-9dKv4_^me0BeR57} |! h, )l1MNX]2'(cZi$ ɧ]­/]bU(@Lo/!(ԽRJ-i+uE!8&yo#A YN|QM55|`{[*XbE95zJ$HN,dRG*_ eYjl+X rfDǓ% xWW=r}X=L_*iC7N ߇ NH'yQ iFiZHKfymqZ|LpG":qg~H-"e` AL7}_<• 8 _Jϖb B1IV/$Ԑ ;M^qy]v@8(@X|gA!ɟ k9 >RK1PC`"F{G .[%9ؒs±zf=.oV 4*¿?> -]`V/fX -iB|$M!8ty).V Az~b1LڞTzb?i.R4;; ɫM&ĴD\? <($Э0\#ߏ9ɴD}c.p|D=`j904 i|H*/k2/ 'Wh3UABJ6ӛMڻΟ, A8XrC!̣5{;?^_  ^.PK0?0n/͇ Tm3񡵥N;B}lxo l)C4{ovW3ȯ_hOHvwWYMQx;"@ּF"&>޵ott+=` NRc!W}MK ܂7ϽMx̨3$].Oxc^3vJWGp\!6~8a5ηF(qҎsq]`d^j0߈G gOHdaɓT9L& poK#g؁]{}빖3 Jl@g4fƿQ1ѬgwF$)'iS+̫mXVlFjOF4]_ 򈭠Ϩo!*EFV ЄD6[>"^ 8EW"1ʡEr̗9L. >mc\Sۻ"V;nDZn_W4 7kΦ πk/>40 .|X}.}:Ոf`kڲv~:<5JA%'އm@ߞn\u=,5F~XhM /c"KaO\7eh V u7 uǕ9(0`%LFn3zuA6܀()if05e^`VRѨ|0Έ(7(6 pr~Et倜\ KYy '~Ƿ<\xقè!ĬMoQizZ js_}9Lo36YmoH2SrӉ%}ayZݥ߸` \Kó߅=4 p'IwWThkzލp͸OKz!@#v/m&I;?Z&x*| h^2` "]dXHoǹ.f~~87DPI NwN=!=Lw`[\/Sօqݿ{%(Lo}<'1:BueLa@pDz?I{xT>\L 0}> {.7Aiʃ>tq(2u>kYFepGH; SOtџu3u&z ]GKؑ4_$]T%"d yRn'9Զ/~ ])>VtI3?!N]g;@'cv."^0DV{^rCѐ!U->X-Vm_TAB^z-;&>Cb`)C6GQ5$@3w'Th1@kr/ 5xHoyߕJ&vyBQZM"A/9nM S燮zCD|Eԝ 8% ÃH.噈s e %d,+"3A^Iĺ"Tg4p$vrDRIUa'B6L(uƤ;HnG2r?ȵi %pd4I! &<bԄ.c>6d'm:-'iP&)r[ IDATu4dt DI_$Opf+Or@/ 2 IU\4B0$@\]D38 U[đF/bǑ$Bg$TC_QXt!^KURkY?#\y:+{+x4۩g% ڋ㸼NsVo֫;*p+ \c?hBbj3Mv8 ^oҠaᱛך;ZW" &rB#r='*2?3X9msGINe|~GjMD'#GؒM<3Y&bh;I}eJ=Σvw8 F;awLՖ'+VYhqnm>רi%ƌE`!iO"p 9! rAwGj2X^tАa(p Ge%HӋx{'=(Sᩋ=Q.K9|rװ@FLԊ)Pq@z]EV{uA힜 UnD8=̾篾sݢ@w`'Ș%s]:,6i :S+3 XU\X|őbt /EX~P+@}X:D糎ŵsL[gCс=B0ҝO"Y$K]);Iٱ$@Z-xT˝C=6 }U¥tzK=0<-џ:@#r h9x߬Tnv7 VEg:v3E+nVH_nHcf Z`"Uc-V\jxcXwH>UVy\,e;ޢKg%^cX3b %>]bT1oQ;\|WZrkڋ`|)rÕ0{j.`3|~#t ~0 pa`2ZH2!`/ycz4 ,NUb91#֕(skȿv$[Z}@z hOK+ŀb>L݊~|Zh6,&p*Ny=s pV[}~岞UB(1%u;Υ# V6t!BY :JJ]/qDMA[Tfϲ;E)m׬ O n9 FDw8HVW s ~P[/@ڌ -]X-eA!V7y4&⌢B;szJ|:G6t%b⿆^u a DK`sU2ZkPC]d.4Uu<8y" f DM5V$k׭_ҝy_ @wd]Vkq'(|8{#?1 vO<3gu8~yqB%61W"6ݪ#,. G$%t'=/ăź*c=oME:Nh.|(0M s!v4Du<M,Dlǐt|V$wjN> լqc4b[*94HH fMfvt!5lҷ wyt+J^8{4 tyI! 5i "fM]z~JY:7H(uoV]V~ڪMy,ҋiĀB5H[kȯ bqKX^$1{ЌSh!>-q=8Bc[<#P!Źl(=GaB+KѢH!&\HouWPUjZX-Ӥ6c;mX!=@_j[hiaMQ p4OUZۉw{g%&C׽A*JٮB?{Ti0K7Qěͳj-р$j1`0KΚ+VUX t@a ˚/칉_\1D68/0R&Bc"z/;켻 ((U}oNhi4wС%Ci]W\sW 8nN,_ޣ%?{tihJ'{gi &vtD(|t ` 9S5t)5hp-x$mL*0xBZ eKw+} Ɯi]"`{c"$ҝviPkӾ'á0-B/H{M-$ڡ8p1.GWm&әLb'f,qPAC9߀]#BS% b41Ύ߸ }FOw,2~X>ZE'yྣ못&B/p /?|BFBOߜtgqZӣ//!ͻ^S&%Kȃ4 F޳^Qk/ثqIy_!LO&E`e" ڡa xO?HcM90=+65A|xvX:<ШC]ع-f"tBNHqʤJ5| 1@zg^vlj,,t'c"2sV_3w^u$0I⫴H!K6FfEk)G0} ozSФݷΚ{4?u7qu:J1T7g7y Y[IVVR-tq.nDH?IOlN\'v h|B!zZw:o#PY델tfvᕫ/V +v}ƇW^4g*8'7)0tN"љ_V15Ɂߎo7 _#+}^t/goT p#A>P(E#t-J'nLrMCl:MZ$X8k !%.GHa2ʮIW']4}ҋxfOO`Ǣ$\nUֈc1Pͧ1 4j|uR;:+R9{.Wj,,)I0^3=oT+.N9@C|+(U+asV/B͜rqv@۵{>n(ݥg:r-Y4<L CG]8ef^)B$=w4[c(gԾ(oj܌o݊<@ngigys9lC?8)l@g"@*@swN~{o? |>Bkq[1 <3C GlS^mEV/z澦@SQ38'DE`9x4_po6_­j4[.τNt{F9xX9'4 kFI7׷F5 14=I.gYPki(`?]; ||.'J #:n̯\av43՛" W1` lC*k,[Nk+;|-rZƨęZV/*pN(ETDuC4B> ShcG9n_93"3O6_c}ɰY\<,a6Үb3 qxY(FmQ*@󋪹yfq'Z@0։¾yGF'I* Jux .{uȐ@zq#~|W3Э ^.ƫ:l+T]/Ӱ ]rH:0_ c$.̀(gtk9|πu0CȔX`҃ڣhKang|eHި;LBe\[DP@F 0iLHbF{uYl+R&ڟ Pr&AԉbY)oH3xNwk5aYG bo 0&o: =3B@YR"We&AΙ901&m fܼNQT/&ڮ0(yKO3[- ,2;%s!BQ1f6Nhtf(U5,c0n%]fo[ހgyH~ǥ=ih_V* lU}o_cP hqt-OO;c Gt7NK!@W+rOǡUE~x:@KIYX+:όX7/@Cӣ/@uEh~Fj)*iMS#a%XD<{'5zxb8rnW1XwM<P*who ꕑ|O+-RtIiak-d؅J Wo|mK<7v ,(@lfòl 7oEW |M1BKg՛*f( _n({C\ 4Jpn>c \TܺΆ2VyzfAs1ϰ͋8+8ur_`&Wq1u+Rco+gD@9q*pC-~|Np;zQ.+p2X-@ڲ}ABv*'2 L,WkVf69[q%AOܾU:PnCC375fi34Tk|̅S <0Cbp"U^H(jo\ӥ*v1oZK5 "M;oM tB_ Q{i W<5d (꫹~dOhg 8 q'favF_[-Pfixg|kn3`3VA4W3GOh3ecZ*8b6f8 \oYD`9J|: MFT|<(U%`3Ngv9т@awR|4G TNHҀ׻ 8ymyI.} aL7Lzml~T/@O,3n`+AyH_֕ OXӪ寘j1%5uVFa!{i~ Sn >`L̜S9^w.\C#ٴSދ..w? ݬFQֲE6]Rgd#Ս믅4|_ |HU{JGKwסFY`n}[jBn0oH'vr0q~1 䃕0Wu QF ӴRm0"HtɊ/$f!'r {K:q ~bGKk3kAєghTI ,2ZSβI妮_ ?PXR<}<+hwQT7~^ayZ±M _z~U^Bhᐝc9]rSdm*+5@{Y>6D/@ s; ΄ vko*㤼0I".nV^/q$uf`>,g\ X7܀kSwn*j) rYV*@W s^V84L3 +FWݫnrڎ ~up'H,oĞwc#0zǬ5-˭NS;!b wO vwVgZ8 z4M^gڡz>`΅\,SiaW鸊!tZ usg \S*)k qo7W ] %?P̭zط4|T^2XVPj{KX"4iWa?k[((r|ɴ+% /E%f/s϶VN=A - ϘQ̗>4Ա]&@K;N-% N[_l3R_Uf6tu0LM QHi4; p s\ڀpM`r=`*F)6"siJ᯦ϯ?d;KZ>#W؎Qˠ4hUwGW4. KUVEn>iSU,xC{Qޥ7SB򡪗_r8t`Vum.t׳ 00ضT YsV8ΟuIDATF]f%1(m*wy.w<Yx(:ֵIo&A^KbVRP9X:5 !MxpOVI8 KNV']襫Ff4[rb/C aO D^B?=qEzzB^Es,UJѰdXv -o wX QmD=ٚ'~q>i_FnQ@k"tcpVZ 0 `K75m-tOQ(U |Ħ~.Rfp(, 5Ki Qjh,%Cnͷghhd*n+WNVit_vBMưKK̀<6Sȑ{㶩Q&7pE,vֱ 25< {}`|:P>yuD2Z2$cW#+tQ+pGPe 0e0Y2wF sY>^9'sӸ1@G_QXM%Dh s|eY4p)pk+^۰{UFOW6hn"*}/a32+ǜ( )g:hRdIWBhUYFPmT TW^yYUҷʰ'Fl]=i`ػ.GU ʇ|[`򴆥3r܁=!;,{Gt{5.*ub \ր-lc!OJ.NOP4tٖ_vߒwbJWx{O: v*wR4h4x y޵΍ XYӠiC;%tiy`c- CW/58K}pQ5YNOuq>$m8qCO{2:<6Y vQpH[df?a2d.QF,ZJ>yW٩s7w\ f闒#f(=*a -L Pۀ;*@tǵ*{ IWLVrZ Qg(dBTWE#VDW=VQ̔Hv=GU bt E|Qɣ .ƿRC"X޶i~f~g $k;Z*TԖ459sO#įÖv8)bNF`div Hj-3c:BY\jz*|v_L`LzQ2IötJPAacA np +x rlh{QDU*oVQ%H J/ Hoa|N% YԸ>*hu<ˍ] 7^[J,Ǟv&Ft ,+%Е:-?y`+e=Q tltO<cST߾4|) !j2=UVN#ek+/3hwC5 .2}Cf@}Os3_,w 1çA_ 1$I6ѥ;[Zz@miSR$$H8Ґ+tbL7oLDnzx @ a)Q;0 U 8kr&\暚_+/=:;k8ĵZC]fAifр(@ ynsuj?|97H/vC= gffl)AE9QpY+b>?[樤 \S4B՟ t0.e3l] ivk[ COj4S\"לl{ELZGك}<8YY{Div'BYf/:nf pܱ+⇉gƦEZ 1k"Lvjxf>z->U %?6 iU$IXv+@tq00t'ee>0K?o]s!߸WV] N3V+«"?ҬL̫ҰUsmN2_lq!?l:Nu5ڭQ`F}!քC@#O SX}撛?xΈ4 L'SFLZ/d;x. & Х@ӜZ”&-q8JS r il؄B"tĄ1V00865|;2#qH7@>;4 MaA@Db ;(g|0/Ita# vZ.ЮKK]=XFXwɯޛkt^RhOD3QO7'9.^53qHc$OT8UŏLoPiYLČ?qfJV˨)S L&@jЁ ,@Bs! 8|,L X9<{S@!HMkx>qC^ƀVA@V׌&3Ӫ. xXL9}w0l;vҀ `3Mh4Hk3p^!(5 S2,pjd7 7I^PnNR=ڹS)<8>T':j9!\bƕ;Ro!@[QW-Z*Vtppka6ۀ)n 4xL.^Yڧ3yhq $Q?B3EFND%'{i9պ<Aun- e  a8r28\ܧjdiiF%8O1 eI7G##3"likYlY޿b:K9e!h~cz _{H~(r >U^H1LQL9?ILXu"1YUKyL`EIf^[1n pF=, 3l;tD8h pfKiTBe-&W=%ù5euAo8[po 0 tmvHЈ޾?~?tmi *zi3oܡ+-Fp>̬1u-)t.$*k>Y=?p3 J]'qi(MnD XZӅVVbچm;tf>dO 0҇Vis_2F"2jxR{Yk$)e܃ug6E%>+q?S=u)I1;vizdICW_O+e90+֗O8 5n:FnH b0`98?gf`ǃ<ÀF*Bv.oNk~T+H]ŻuλVf8XUMXcz|_5 | Wit0I!ؐN0Fpsh췪B׽y"NCɦDTt0Qww=X.*唞 h=VoSZ臅oQ<xBl}  u^RKwMn. ˯0Ƚpl~kxal5oJfS?oIfv ^so?fm]`[j5[gL޽p[迍4򃓛,w 櫃IWĀnI[42gݣ_qe0= |hdJp1t~oFmp降fEf;'Qϫ}קQп=Nk&Q74/fcEu ՆrV0߶G֣  @\?upjF3z{ҔYUsG8Mx~t>5 As]T׳ $)<C [2<Ϙ7 FN6Im&\rWbY30<.˳:ed({Zӷ8/V{!5TfA/͕YP60|vh>81与4<.2/}U?43<|a9g3y u3WJ(8^)\/V?Z٠ Uki%*MjCH8O4^_9!Kfm*MTcͶyEdH.1^EC8~bߏ>.ꇟr &̚ >kDjBfRel!,c)%:Ocӵ!ޱN,4ej:\Vor-꡶ĉmUIIBM!juE^+[` ,mK'ܞXEN%CR'ĵ*RN4:%jىU$[Pet.R0 jFřކ/#}w Y <8OB]gNٓBs9\KLt% ǥL k JNC[GZzg- +U˷{!q%AlW~qsiJ>Q֏۪y:GDz_̎F6/\=e\˹g[ aVm(ZS8_ރN ݏמA"Hs |TKbza?W )B/Q#$&tHQ5%s $ہ/xEfL^#9M& ɇvzV"y!$I>wT@8&17PRu cg-,ԋF&FZq.Q(pu/IG ēouW/QD]bN2i 4Ϟ# E0xf""r5Yƹ~{٨u ۛ{×o2cx4-uāxv3A>Q:O4 Q̧]DMd5 p ӯߟoiziW ~hMy0-u`B"\ImtgyӢ_}czk_9QM8% oWRsjM9^* A~βsw u.NFqf_6[x!0}wJs*tl hb %Ϸ1tk|P\b3FAAAAAo  B!GEi`IENDB`sqlfluff-2.3.5/images/sqlfluff-wide.png000066400000000000000000000400651451700765000201000ustar00rootroot00000000000000PNG  IHDRig{PLTE!!!MMMMMMMMMNNNMMMLLLMMMMMM MMM MMMMMMMMMKKKLLLyyy &&&sssMMM555~~~ UUU"""OOOZZZnnn///FFFMMM>>>999,,,LLLkkk iiiCCCfff ???bbbBBB MMMMMM666888ZZZ ***QQQMMMxxx%%%MMMLLL>>>MMM LLL^^^MMMMMMMMMMMMppp///$$$MMMMMMEEEMMM333MMM``` MMMMMMLLLNNNHHH LLLRRRMMMMMMMMMMMM(((MMMLLLMMMܹƽ³խ222ttt;;;bbb VVV{{{fffQQQHHH}}}---^^^qtRNS 0"w,5>(9aO$ۥKFCS!쬗xd]睗 ~rи~äl)njѽ˥xgpᡑYȭﺷW}3¬jԲ:qS؃B_MFgZWQJ!{<IDATxw\g$S(e=(DP@D\,( :ZVֶj]ބ=DPpOKbmbj?| $#{q|"Vңݞ{@ik>84{A~V 4{,],Qxz;ΎiÇS:4Jx6W  siɷpѿ2@gE݆٬#q6+[lƠ"'ŗDZ=$kI`3Uws00>I}5LEIMnUd ܯQH9iCA"Dͭx0Y6Ӽ(\>Sݏ*fw#r?;Hz>(Ҷ޸wI{1\":qQW.͞-0j6@+p4X1Ppp69?9GmR?FXw굽( 8>C,0z:z{ZJ/(iN; U[>ڊ3%-w'+ou\6Jٹ欣jb16CPxp $,7bk]5ANC9 \N{y&Z,@{xhH5<% 'Hr%j/0*z"tϛ0k M!|0=$w,ˋ7TzF/y;|;g}ρo^gI bޖVu'=f[eYۭU"7M.=9w"?5|V載6ff$͈ez^Kpeig-M`z7 |˱K p/0WH KsgLyTBX\?Ên?),^6U]bjkYS~ 9 }yaO'&hne?$ [fl׊驗-ZzO|S,/fw ߷aSP <~Zƭ56 HA'¿C&R{FISѰEcᩋn5!bYâHyLXf̻-mW7E)vkB *2-ٯ=:-C@ hst4uPZiA*~F j@o;\=Ua~4bH7Ym^NN_3Б Nj `cN/t3ԶUǸSư\m~YeZjw0&쇆΄z3tȋav|(RŲv֡\G5jfV/]엹3~ۼB`8<V}C,RzHISf\kD﹨u@_x y1FBOQruA><چ^n ]~)֋s:=ZנDZV뱞zuoֺՊ]z^9왁boQψ]y|j:ZCۤe]J&/iAr z7'Gþ]Icht :.5(8FcL`U ::Oz5^INɰ T^CMQlT7R/Q2F`iUsI-Rʮ0(Pq(r^h-5uWߠ4bb H^ K3ѦH'&maOqXu_(k;^:#FY0@`K7V?p\a|&._#kܘvG[OrRkE1wwiu ݪ[d9DW# :_O=:M2df&:IN[_8#l<';Z 2Rݶ]vVdSSdO1P?.H6fטM=5#VJ&3" !( bsiIk5jyy䲭s$6}X2 d@5Xtf:h.&`I b썸#Y*D,[? |ҦKk(Kؔ[bK 'A_x;P묲+ #7w!,< xYnqFfX}Ԍj\LӍrRLTgкMQgتέμ:Ap& [8ޕNUΨ{&r#f=aʳtO8)4gMT;6bXЗ|cbib輒 #< QnQ2^.ٜ*Bc⟈* K& w ўS 73]{F[#!{A>ýJ_f\ЂwSsRYM? [xB/;%%W:JSn5lBf/i.3Nڀ9b! X_h9"5f 6`賞If2/(&t~| ,rCm9'j2PHZOّxwmP"0 \."XyرYMh90JaU)Qi9-AA!F8BOC~nCCCN3O;E aSIfjV]ٌoǹ-/J$̷_Elrpi*k 3UIHoWswx#,lEMd׫.!)kl-2Վۀ-XV*t3zۯb95eؙ~D@hcfy,^!Fa`fpl`ր4V/3\lcLb~ 1l)TjZB 4~L`n:˥ɤՅy w|uPHck)ʕfaiyaxwj7p6bTMaBߦ%X T7lzhX.hDي^nwvMs!b5:i{=hߘuE[%qci-.ɯo *g~8滆lUWfoF9Nn}QˢPЄ?zZ|S'62GI?-sR&!]}IL_}}Hɠ{R04#.Ω [Mm@u$9TqXj4=E=%nD}0R;rbn٠p5]jp Ё串*AϐYC?XÜ*hUVPUc+x@g'yl]ZR+>\ 8} =x1@ֶR 3{)E\a0ZԆ@z(vB ],e.F+q& cgl\c<&‚tah̚r"'8ޚ2wuBlƌzY-uyflaZ 7){r֐l3VpH·|6'% / 5z>0a|mfި"I1;5iIiRAs߉1[>G Y&e)hPjȪ .xyh0f:KѾf'%sWJD }]ܧ!6W#g=݀aiN<f/44!A!X )e=8,j3+s"𴰘@5JUKݍpܨ+]1?aohocB)Bք>5#]wɮY3*nʏ~(N>W媼Pzf֓$$=J[ۥHwj6 +LQv[g3Bp@I0U6t/}6ߵZVBbdItfp;1ƨOu}k@TkDg yQl{); i?>8̶* CZ@QS>F;u,`&T*R5l,m3¦Jeu.Y%dQty^J%:g)qm휲{7[w٥S%vc$/3{L&c2IhPrHDulߤ޽dv֭Q;zh³_qƗ]3OƆ[ۨɝqS$'isdH#V3_;YՓf֤M>fWmwk5u39x# TYO"~Rk%&[ͯ hj=}@TBL`j財=IyxlR{BA2q(w +Y$4O[=],UK7B1Ihd珄pa~1L5܇wUfݾ[b1TX\[M6K΢o^*"$ʼnc8AFW| jQP᤽~ϒFٌͱvUwRvtBdFērܐYhfUwwh3hL̊?BcfC,7߃?;0y50C@;H ")b(*.EኄYڂJiTڢ=T фLs4̻g,ұ9z87pj|4`SM#]s i۹K+&Li=;4{Ui\rr44}VLUnٷ'[;|J 5/af=f*,r-#CC|d$6 L]ejOO+8m˥"CB_$ /Iac!K4_Hr#8#'gUp0;xX5x|JkEOi kɰŵyDCIKF6$W؋)e>=\PP/ؖv>BTM[5Zi3q@еB:>[@QM8@dHj7^x,s9;uv RiSg7Nq7tEI7n9ЗdI{kTQD)J#SJ8u%#D<{1\ĕW.ܪڜ*Tv Y/r fHԬ!I i4#^^=*a7tdP 7]LT[eqFx eq.D]xȌE/5e<< #F(rK#_]=Ym~~mۉy1,Tm<٣еl5Q,J"T@o `Msu*,I1CK,xqۨXgfb}rd&KaǂeggDY[ Hx•FgÇ3]{+yz֓m7ۮ/l/OS[A։QpqJ39[ 1 {&Vy!nvfhZn|ӟ#%&yeIҽ3ְQ;Țr3ޚ# ,ň񺚧=w6iQB%Sq-#OWPHr7dh4G t q=:duK_G}Z=ZC 킑_$"AhshZMGmxp{ciJx*3ZS1p#r8cRс`ֳRqp6BXp$ S6UG;Ii M]V>zz:+j l?2ZRmDѻkܜMִP6;,yJ)ɘ{&¹DJCMaЇu]ݶDGu'cሺת4h/ۉ\ӑ"ض8Z3prиɜw3nt,+^C 0zvϗ}8^"R"ɰ n ƒ1gPww]5,΂Jp0;(ZZ{.[ ǰf&*@sA|${ ]Xc]#8}dRC=4}%D駱Ka)Rhbne2?Pq]`/ePfvċ!)6,+ȭ$Rfb,E=ck]Xy /8KJ3ELŁ v"YEnI徠`.k.zSs>JvSj}Ri):7z7a~l}Mi]gBTY#x3j)+3ח iM尃g +8,bﵢN"l< M';*t<ks"? ԓ|d]+ 0_6J>4T'H0"+ۄu5eZȗ)!A~6,I!s,2X؟ M'$8U3hN7i]$㯇z;[%!Cx ǔ?,+|VJWK£-x7QXqP)\JCm2V-!V&!&R_Q90D$v20OxuK4@ EA}ĝQM5-bcQsGK:PDNpK %ჭy\,7'hgGǚJ3 +tEt]T~@/)8P1)i2٤#x-IV\b uFcO6.ܐ gJ К7$reqC-*O4'TZn'B=.iӴH8CԔr# sc'*l+r|h3XkOД!,ͰqV%rq0u>;JC[T #`awaa3E+&ܪ L|I$ٓ+jf$atuRHV !}טC=6А7;}Gic:`<YTʜa =(1!vDD8rݭL3'+z\9>Hd%LbTb6L?4ۈL3J˃65!ሂs KUrgjA -9 ,4ÌP2=XYaHJ7e^d<&G4$zC G6CI H8ig<1-zCo4CWQ vIY`Xi%n ̇(`+M`~pH㋦M7|ٙa 9^*c"Q {ͧ֎s1yfrЍ#X;P 0ڶJ{=qȸrp0 M &]gtǑ, O(&G%ɣ+ikϊ$Ù?#JO0Ĝl8imqv"5*幵HFl `~azϘ$nPaTD'0Jp[ `(PFev ǟuI;Sn230/V[8P8|vϑ,ӡJ[ 78WO+fNqDxEJ(&r)*v*gc:FRD pVha}Ի|1P*My[iS;EipS.rw[Fw1ri:/WrTc*~'Mr엒Ϛ`’Տج p+0#旅2DOu)}VX+ڞV~s^]M"] Giu1\ 3'I2w"!XҌ DN6@ˋ|mG<]{dKi`a8cx[̦ !䥕!׳Q<0\=;YU֓o#Act(~e`f9bT.١zO=A &zEܟɴm[ABUqYn+ovSxqfs=z.0Aˌ|b.-5 < WC]W>Qi [g&`+ۉ"i]-y u8QYJb[W$c:(/ct!v?n($;R02 OJKPAJA=jX{927o3E61~ڒ$ؒ/12`9;.W/8لͰ_&wⒷzDxz (K9:m-vDkڭݡ4|>!Gݏ# w nޙGEUq00Cƌ3@,AȮ $& )%B, .$fai{.;,RKYe]M<5|KDxX @{3Dh0U8`TW*5sceBv\G@E{II|OF2GĮN0]U8` %7@c}?/'ޟYW5[sV,Xj|ӭsm|XEgZm'S"vp2MΦ2o&IN@3݊x׬1D4N^=IwԌpB·?(fH5Fz%޲ұ ;]l*%0'X|ey-9(Y{; Nb{&ksYPpE'0y< [d(>{a'4 QZ5mWm^|B 6#a50Љ\ >btH8%7(WN$#:JS$9V\ul.Iύv]{smjb؛.W5Y ћxXr31bUS>`d\ZVF>h !{K;/C +ͭXn~MB]]s *+ Czx|&74]*u#[JZFZ1YB8vw+`+YNwlKO7>,f^tj-S)UlMvGi@܅mE4h- pDjHH@h o@>dkpDzV>7װ'˯r6ݮ(yuT9ظB朖ss֜~g25*z9&6g\,aE*Q #LNJj c]4}2#DfvAx@66=&pKY|a'lћy\dDP%_Mih$,w)g@o<[dV҉o4,~WO\i4JPR׏>1 'Wi+V逺hF*;@ZNx>S7cl܍2NdJ=: }v2.|ѺF\cg߱24PdGLi!q|?h̯PJx6=U:%{1KR5O8Z(f_j@/7bՇ'pl޿H7|dA hyxPK:D˚bU |R\iAa4{SM@pV$rCdK6SObGK8Gj؞],,7`Kqx{Hנ>/;NQuc AϻOUEwKcnl;)yͧoQd[XǞ]Pa)G Lua1J.l.һ|MwdWo,6=%^_i؟FwftI8M!XOn.i}d"=Є~db2mJcJ[>omOcy:C[i ? 9EqoX.КN w;[l}x p3V؛N`/Z')󛻡f(s>$F& o3IX5VI IUi6 _a9nE4.6J$r 3҃^kOB&{9)5Q@U9mڹ_zZk'auZVDl;zv;=Z|vKhI;7h%A LuIH!(ÇُX*"8w-%NNJC/N?o*=>H9i!YhB4uY]cG!3cppnO3<`c +-X2?\ixzP%%Wj~'ϺN image/svg+xml sqlfluff-2.3.5/mypy.ini000066400000000000000000000010141451700765000150430ustar00rootroot00000000000000[mypy] warn_unused_configs = True warn_unused_ignores = True [mypy-sqlfluff.*] implicit_reexport = True # skip type checking for 3rd party packages for which stubs are not available [mypy-pathspec.*] ignore_missing_imports = True [mypy-diff_cover.*] ignore_missing_imports = True [mypy-dbt.*] ignore_missing_imports = True [mypy-pluggy.*] ignore_missing_imports = True [mypy-tqdm.*] ignore_missing_imports = True [mypy-importlib_metadata.*] ignore_missing_imports = True [mypy-tblib.*] ignore_missing_imports = True sqlfluff-2.3.5/plugins/000077500000000000000000000000001451700765000150315ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/000077500000000000000000000000001451700765000216005ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/MANIFEST.in000066400000000000000000000000761451700765000233410ustar00rootroot00000000000000include src/sqlfluff_plugin_example/plugin_default_config.cfg sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/README.md000066400000000000000000000002541451700765000230600ustar00rootroot00000000000000# Example rules plugin This example plugin showcases the ability to setup installable rule plugins. This interface is supported from version `0.4.0` of SQLFluff onwards. sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/setup.py000066400000000000000000000010741451700765000233140ustar00rootroot00000000000000"""Setup file for an example rules plugin.""" from setuptools import find_packages, setup # Change these names in your plugin, e.g. company name or plugin purpose. PLUGIN_LOGICAL_NAME = "example" PLUGIN_ROOT_MODULE = "sqlfluff_plugin_example" setup( name=f"sqlfluff-plugin-{PLUGIN_LOGICAL_NAME}", version="1.0.0", include_package_data=True, package_dir={"": "src"}, packages=find_packages(where="src"), install_requires="sqlfluff>=0.4.0", entry_points={ "sqlfluff": [f"sqlfluff_{PLUGIN_LOGICAL_NAME} = {PLUGIN_ROOT_MODULE}"] }, ) sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/src/000077500000000000000000000000001451700765000223675ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/src/sqlfluff_plugin_example/000077500000000000000000000000001451700765000273025ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/src/sqlfluff_plugin_example/__init__.py000066400000000000000000000036521451700765000314210ustar00rootroot00000000000000"""An example of a custom rule implemented through the plugin system. This uses the rules API supported from 0.4.0 onwards. """ from typing import List, Type from sqlfluff.core.config import ConfigLoader from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule # For backward compatibility we still support importing # rules within the body of the root plugin module. This is included # here for illustration, but also such that support for this import # order can be tested in the test suite (and that the associated # warning is triggered). # See note below in `get_rules()` for more details. # i.e. we DO NOT recommend importing here: from sqlfluff_plugin_example.rules import Rule_Example_L001 # noqa: F401 @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: It is much better that we only import the rule on demand. The root module of the plugin (i.e. this file which contains all of the hook implementations) should have fully loaded before we try and import the rules. This is partly for performance reasons - but more because the definition of a BaseRule requires that all of the get_configs_info() methods have both been defined _and have run_ before so all the validation information is available for the validation steps in the meta class. """ # i.e. we DO recommend importing here: from sqlfluff_plugin_example.rules import Rule_Example_L001 # noqa: F811 return [Rule_Example_L001] @hookimpl def load_default_config() -> dict: """Loads the default configuration for the plugin.""" return ConfigLoader.get_global().load_config_resource( package="sqlfluff_plugin_example", file_name="plugin_default_config.cfg", ) @hookimpl def get_configs_info() -> dict: """Get rule config validations and descriptions.""" return { "forbidden_columns": {"definition": "A list of column to forbid"}, } sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/src/sqlfluff_plugin_example/plugin_default_config.cfg000066400000000000000000000000741451700765000343130ustar00rootroot00000000000000[sqlfluff:rules:Example_L001] forbidden_columns = bar, baaz sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/src/sqlfluff_plugin_example/rules.py000066400000000000000000000030451451700765000310100ustar00rootroot00000000000000"""An example of a custom rule implemented through the plugin system. This uses the rules API supported from 0.4.0 onwards. """ from sqlfluff.core.rules import ( BaseRule, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler # These two decorators allow plugins # to be displayed in the sqlfluff docs class Rule_Example_L001(BaseRule): """ORDER BY on these columns is forbidden! **Anti-pattern** Using ``ORDER BY`` one some forbidden columns. .. code-block:: sql SELECT * FROM foo ORDER BY bar, baz **Best practice** Do not order by these columns. .. code-block:: sql SELECT * FROM foo ORDER BY bar """ groups = ("all",) config_keywords = ["forbidden_columns"] crawl_behaviour = SegmentSeekerCrawler({"orderby_clause"}) is_fix_compatible = True def __init__(self, *args, **kwargs): """Overwrite __init__ to set config.""" super().__init__(*args, **kwargs) self.forbidden_columns = [ col.strip() for col in self.forbidden_columns.split(",") ] def _eval(self, context: RuleContext): """We should not ORDER BY forbidden_columns.""" for seg in context.segment.segments: col_name = seg.raw.lower() if col_name in self.forbidden_columns: return LintResult( anchor=seg, description=f"Column `{col_name}` not allowed in ORDER BY.", ) sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/test/000077500000000000000000000000001451700765000225575ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/test/rules/000077500000000000000000000000001451700765000237115ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/test/rules/rule_test_cases_test.py000066400000000000000000000006741451700765000305150ustar00rootroot00000000000000"""Runs the rule test cases.""" import os import pytest from sqlfluff.utils.testing.rules import load_test_cases, rules__test_helper ids, test_cases = load_test_cases( test_cases_path=os.path.join( os.path.abspath(os.path.dirname(__file__)), "test_cases", "*.yml" ) ) @pytest.mark.parametrize("test_case", test_cases, ids=ids) def test__rule_test_case(test_case): """Run the tests.""" rules__test_helper(test_case) sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/test/rules/test_cases/000077500000000000000000000000001451700765000260465ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-plugin-example/test/rules/test_cases/Rule_Example_L001.yml000066400000000000000000000004711451700765000316510ustar00rootroot00000000000000rule: Example_L001 no_forbidden_col_used: pass_str: | select a, sum(b) from tbl group by a order by a no_order_by_used: pass_str: | select a, b, c from tbl forbidden_col_used: fail_str: | select bar, baz from tbl order by bar sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/000077500000000000000000000000001451700765000214155ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/LICENSE.md000066400000000000000000000020611451700765000230200ustar00rootroot00000000000000MIT License Copyright (c) 2018 Alan Cruickshank Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/README.md000066400000000000000000000005551451700765000227010ustar00rootroot00000000000000# dbt plugin for SQLFluff This plugin works with [SQLFluff](https://pypi.org/project/sqlfluff/), the SQL linter for humans, to correctly parse and compile SQL projects using [dbt](https://pypi.org/project/dbt/). For more details on how to use this plugin, [see the documentation](https://docs.sqlfluff.com/en/stable/configuration.html#dbt-project-configuration). sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/docker/000077500000000000000000000000001451700765000226645ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/docker/Dockerfile.dev000066400000000000000000000023761451700765000254430ustar00rootroot00000000000000FROM python:3.9-slim-bullseye # Set separate working directory for easier debugging. WORKDIR /app RUN --mount=type=cache,target=/root/.cache/pip pip install --upgrade pip setuptools wheel # Install requirements separately # to take advantage of layer caching. COPY requirements*.txt . RUN --mount=type=cache,target=/root/.cache/pip pip install --upgrade -r requirements.txt -r requirements_dev.txt # Set up dbt-related dependencies. RUN --mount=type=cache,target=/root/.cache/pip pip install dbt-postgres # N.B. we extract the requirements from plugins/sqlfluff-templater-dbt/setup.cfg, # filtering out sqlfluff itself, to prevent it from being installed as a package. # (Below, we install it in editable mode.) COPY plugins/sqlfluff-templater-dbt/setup.cfg /tmp RUN python -c "import configparser; c = configparser.ConfigParser(); c.read('/tmp/setup.cfg'); print(c['options']['install_requires'])" | grep -v sqlfluff > /tmp/dbt-requirements.txt RUN --mount=type=cache,target=/root/.cache/pip pip install --upgrade -r /tmp/dbt-requirements.txt # Copy everything. (Note: If needed, we can use .dockerignore to limit what's copied.) COPY . . # Install sqlfluff and the dbt templater in editable mode. RUN pip install --no-dependencies -e . -e plugins/sqlfluff-templater-dbt sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/docker/docker-compose.yml000066400000000000000000000014011451700765000263150ustar00rootroot00000000000000version: "2.4" services: app: platform: linux/amd64 build: context: ../../.. dockerfile: plugins/sqlfluff-templater-dbt/docker/Dockerfile.dev volumes: # Host source code directory - ../../../:/app/ - ../test/fixtures/dbt/profiles_yml:/root/.dbt depends_on: - postgres entrypoint: /bin/bash environment: - POSTGRES_HOST=postgres tty: true postgres: image: postgres:14-bullseye environment: - POSTGRES_PASSWORD=password ports: # NOTE: "5432:5432" makes the Postgres server accessible to both the host # developer machine *and* the "app" container in Docker. If you don't want # it available on the host machine, change this to simply "5432". - 5432:5432 sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/docker/init000077500000000000000000000002521451700765000235540ustar00rootroot00000000000000#!/usr/bin/env bash pip install --no-dependencies -e . -e plugins/sqlfluff-templater-dbt pushd plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project dbt deps popd sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/docker/shell000077500000000000000000000001751451700765000237240ustar00rootroot00000000000000#!/usr/bin/env bash my_path="$( cd "$(dirname "$0")"; pwd -P)" docker compose -f ${my_path}/docker-compose.yml exec app bash sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/docker/shutdown000077500000000000000000000002101451700765000244560ustar00rootroot00000000000000#!/usr/bin/env bash my_path="$( cd "$(dirname "$0")"; pwd -P)" docker compose -f ${my_path}/docker-compose.yml down -v --remove-orphans sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/docker/startup000077500000000000000000000005551451700765000243210ustar00rootroot00000000000000#!/usr/bin/env bash set -ex export COMPOSE_DOCKER_CLI_BUILD=1 export DOCKER_BUILDKIT=1 my_path="$( cd "$(dirname "$0")"; pwd -P)" ${my_path}/shutdown docker compose -f ${my_path}/docker-compose.yml build docker compose -f ${my_path}/docker-compose.yml up -d docker compose -f ${my_path}/docker-compose.yml exec app "/app/plugins/sqlfluff-templater-dbt/docker/init" sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/setup.cfg000066400000000000000000000041171451700765000232410ustar00rootroot00000000000000[metadata] name = sqlfluff-templater-dbt version = 2.3.5 description = Lint your dbt project SQL long_description = file: README.md long_description_content_type = text/markdown url = https://github.com/sqlfluff/sqlfluff author = Alan Cruickshank author_email = alan@designingoverload.com license = MIT License license_files = LICENSE.md project_urls = Homepage = https://www.sqlfluff.com Documentation = https://docs.sqlfluff.com Changes = https://github.com/sqlfluff/sqlfluff/blob/main/CHANGELOG.md Source = https://github.com/sqlfluff/sqlfluff Issue Tracker = https://github.com/sqlfluff/sqlfluff/issues Twitter = https://twitter.com/SQLFluff Chat = https://github.com/sqlfluff/sqlfluff#sqlfluff-on-slack classifiers = Development Status :: 5 - Production/Stable Environment :: Console Intended Audience :: Developers License :: OSI Approved :: MIT License Operating System :: Unix Operating System :: POSIX Operating System :: MacOS Operating System :: Microsoft :: Windows Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: Implementation :: CPython Topic :: Utilities Topic :: Software Development :: Quality Assurance keywords = sqlfluff sql linter formatter bigquery clickhouse databricks db2 duckdb exasol greenplum hive materialize mysql postgres redshift snowflake soql sparksql sqlite teradata tsql dbt [options] packages = find: python_requires = >=3.7 install_requires = sqlfluff==2.3.5 dbt-core>=1.0.0 jinja2-simple-tags>=0.3.1 markupsafe pydantic rich ruamel.yaml [options.packages.find] include = sqlfluff_templater_dbt sqlfluff_templater_dbt.osmosis [options.entry_points] sqlfluff = sqlfluff_templater_dbt = sqlfluff_templater_dbt sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/setup.py000066400000000000000000000001131451700765000231220ustar00rootroot00000000000000"""Setup file for example plugin.""" from setuptools import setup setup() sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/000077500000000000000000000000001451700765000261455ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/__init__.py000066400000000000000000000003721451700765000302600ustar00rootroot00000000000000"""Defines the hook endpoints for the dbt templater plugin.""" from sqlfluff.core.plugin import hookimpl from sqlfluff_templater_dbt.templater import DbtTemplater @hookimpl def get_templaters(): """Get templaters.""" return [DbtTemplater] sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py000066400000000000000000001005571451700765000305240ustar00rootroot00000000000000"""Defines the dbt templater. NOTE: The dbt python package adds a significant overhead to import. This module is also loaded on every run of SQLFluff regardless of whether the dbt templater is selected in the configuration. The templater is however only _instantiated_ when selected, and as such, all imports of the dbt libraries are contained within the DbtTemplater class and so are only imported when necessary. """ import logging import os import os.path from collections import deque from contextlib import contextmanager from dataclasses import dataclass from typing import ( TYPE_CHECKING, Any, Callable, Deque, Dict, Iterator, List, Optional, Tuple, Union, ) from jinja2 import Environment from jinja2_simple_tags import StandaloneTag from sqlfluff.core.cached_property import cached_property from sqlfluff.core.errors import SQLFluffSkipFile, SQLFluffUserError, SQLTemplaterError from sqlfluff.core.templaters.base import TemplatedFile, large_file_check from sqlfluff.core.templaters.jinja import JinjaTemplater if TYPE_CHECKING: # pragma: no cover from dbt.semver import VersionSpecifier from sqlfluff.cli.formatters import OutputStreamFormatter from sqlfluff.core import FluffConfig # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") @dataclass class DbtConfigArgs: """Arguments to load dbt runtime config.""" project_dir: Optional[str] = None profiles_dir: Optional[str] = None profile: Optional[str] = None target: Optional[str] = None threads: int = 1 single_threaded: bool = False # dict in 1.5.x onwards, json string before. # NOTE: We always set this value when instantiating this # class. If we rely on defaults, this should default to # an empty string pre 1.5.x vars: Optional[Union[Dict, str]] = None # NOTE: The `which` argument here isn't covered in tests, but many # dbt packages assume that it will have been set. # https://github.com/sqlfluff/sqlfluff/issues/4861 # https://github.com/sqlfluff/sqlfluff/issues/4965 which: Optional[str] = "compile" class DbtTemplater(JinjaTemplater): """A templater using dbt.""" name = "dbt" sequential_fail_limit = 3 adapters = {} def __init__(self, **kwargs): self.sqlfluff_config = None self.formatter = None self.project_dir = None self.profiles_dir = None self.working_dir = os.getcwd() self._sequential_fails = 0 super().__init__(**kwargs) def config_pairs(self): """Returns info about the given templater for output by the cli.""" return [("templater", self.name), ("dbt", self.dbt_version)] @cached_property def _dbt_version(self) -> "VersionSpecifier": """Fetches the installed dbt version. This is cached in the raw dbt format. NOTE: We do this only on demand to reduce the amount of loading required to discover the templater. """ from dbt.version import get_installed_version return get_installed_version() @cached_property def dbt_version(self): """Gets the dbt version.""" return self._dbt_version.to_version_string() @cached_property def dbt_version_tuple(self): """Gets the dbt version.""" return int(self._dbt_version.major), int(self._dbt_version.minor) def try_silence_dbt_logs(self) -> None: """Attempt to silence dbt logs. During normal operation dbt is likely to log output such as: .. code-block:: 14:13:10 Registered adapter: snowflake=1.6.0 This is emitted by dbt directly to stdout/stderr, and so for us to silence it (e.g. when outputting to json or yaml) we need to reach into the internals of dbt and silence it directly. https://github.com/sqlfluff/sqlfluff/issues/5054 NOTE: We wrap this in a try clause so that if the API changes within dbt that we don't get a direct fail. This was tested on dbt-code==1.6.0. """ # First check whether we need to silence the logs. If a formatter # is present then assume that it's not a problem if not self.formatter: try: from dbt.events.functions import cleanup_event_logger cleanup_event_logger() except ImportError: pass @cached_property def dbt_config(self): """Loads the dbt config.""" from dbt import flags from dbt.adapters.factory import register_adapter from dbt.config import read_user_config from dbt.config.runtime import RuntimeConfig as DbtRuntimeConfig # Attempt to silence internal logging at this point. # https://github.com/sqlfluff/sqlfluff/issues/5054 self.try_silence_dbt_logs() if self.dbt_version_tuple >= (1, 5): user_config = None # 1.5.x+ this is a dict. cli_vars = self._get_cli_vars() else: # Here, we read flags.PROFILE_DIR directly, prior to calling # set_from_args(). Apparently, set_from_args() sets PROFILES_DIR # to a lowercase version of the value, and the profile wouldn't be # found if the directory name contained uppercase letters. This fix # was suggested and described here: # https://github.com/sqlfluff/sqlfluff/issues/2253#issuecomment-1018722979 user_config = read_user_config(flags.PROFILES_DIR) # Pre 1.5.x this is a string. cli_vars = str(self._get_cli_vars()) flags.set_from_args( DbtConfigArgs( project_dir=self.project_dir, profiles_dir=self.profiles_dir, profile=self._get_profile(), vars=cli_vars, threads=1, ), user_config, ) self.dbt_config = DbtRuntimeConfig.from_args( DbtConfigArgs( project_dir=self.project_dir, profiles_dir=self.profiles_dir, profile=self._get_profile(), target=self._get_target(), vars=cli_vars, threads=1, ) ) register_adapter(self.dbt_config) return self.dbt_config @cached_property def dbt_compiler(self): """Loads the dbt compiler.""" from dbt.compilation import Compiler as DbtCompiler self.dbt_compiler = DbtCompiler(self.dbt_config) return self.dbt_compiler @cached_property def dbt_manifest(self): """Loads the dbt manifest.""" from dbt.exceptions import DbtProjectError # NOTE: The uninstalled packages error only exists from around # dbt 1.4 onwards. Before that we'll just get a slightly uglier # error - not a breaking issue. try: from dbt.exceptions import UninstalledPackagesFoundError summary_errors = (DbtProjectError, UninstalledPackagesFoundError) except ImportError: summary_errors = (DbtProjectError,) # Set dbt not to run tracking. We don't load # a full project and so some tracking routines # may fail. from dbt.tracking import do_not_track do_not_track() # dbt 0.20.* and onward from dbt.parser.manifest import ManifestLoader old_cwd = os.getcwd() try: # Changing cwd temporarily as dbt is not using project_dir to # read/write `target/partial_parse.msgpack`. This can be undone when # https://github.com/dbt-labs/dbt-core/issues/6055 is solved. # For dbt 1.4+ this isn't necessary, but it is required for 1.3 # and before. if self.dbt_version_tuple < (1, 4): os.chdir(self.project_dir) self.dbt_manifest = ManifestLoader.get_full_manifest(self.dbt_config) except summary_errors as err: # pragma: no cover raise SQLFluffUserError(f"{err.__class__.__name__}: {err}") finally: if self.dbt_version_tuple < (1, 4): os.chdir(old_cwd) return self.dbt_manifest @cached_property def dbt_selector_method(self): """Loads the dbt selector method.""" if self.formatter: # pragma: no cover TODO? self.formatter.dispatch_compilation_header( "dbt templater", "Compiling dbt project..." ) from dbt.graph.selector_methods import ( MethodManager as DbtSelectorMethodManager, ) from dbt.graph.selector_methods import ( MethodName as DbtMethodName, ) selector_methods_manager = DbtSelectorMethodManager( self.dbt_manifest, previous_state=None ) self.dbt_selector_method = selector_methods_manager.get_method( DbtMethodName.Path, method_arguments=[] ) if self.formatter: # pragma: no cover TODO? self.formatter.dispatch_compilation_header( "dbt templater", "Project Compiled." ) return self.dbt_selector_method def _get_profiles_dir(self): """Get the dbt profiles directory from the configuration. The default is `~/.dbt` but we use the default_profiles_dir from the dbt library to support a change of default in the future, as well as to support the same overwriting mechanism as dbt (currently an environment variable). """ # Where default_profiles_dir is available, use it. For dbt 1.2 and # earlier, it is not, so fall back to the flags option which should # still be available in those versions. from dbt import flags # From dbt 1.3 onwards, the default_profiles_dir resolver is # available. Before that version we use the flags module try: from dbt.cli.resolvers import default_profiles_dir except ImportError: default_profiles_dir = None default_dir = ( default_profiles_dir() if default_profiles_dir is not None else flags.PROFILES_DIR ) dbt_profiles_dir = os.path.abspath( os.path.expanduser( self.sqlfluff_config.get_section( (self.templater_selector, self.name, "profiles_dir") ) or (os.getenv("DBT_PROFILES_DIR") or default_dir) ) ) if not os.path.exists(dbt_profiles_dir): templater_logger.error( f"dbt_profiles_dir: {dbt_profiles_dir} could not be accessed. " "Check it exists." ) return dbt_profiles_dir def _get_project_dir(self): """Get the dbt project directory from the configuration. Defaults to the working directory. """ dbt_project_dir = os.path.abspath( os.path.expanduser( self.sqlfluff_config.get_section( (self.templater_selector, self.name, "project_dir") ) or os.getcwd() ) ) if not os.path.exists(dbt_project_dir): templater_logger.error( f"dbt_project_dir: {dbt_project_dir} could not be accessed. " "Check it exists." ) return dbt_project_dir def _get_profile(self): """Get a dbt profile name from the configuration.""" return self.sqlfluff_config.get_section( (self.templater_selector, self.name, "profile") ) def _get_target(self): """Get a dbt target name from the configuration.""" return self.sqlfluff_config.get_section( (self.templater_selector, self.name, "target") ) def _get_cli_vars(self) -> dict: cli_vars = self.sqlfluff_config.get_section( (self.templater_selector, self.name, "context") ) return cli_vars if cli_vars else {} def sequence_files( self, fnames: List[str], config=None, formatter=None ) -> Iterator[str]: """Reorder fnames to process dependent files first. This avoids errors when an ephemeral model is processed before use. """ if formatter: # pragma: no cover formatter.dispatch_compilation_header("dbt templater", "Sorting Nodes...") # Initialise config if not already done self.sqlfluff_config = config if not self.project_dir: self.project_dir = self._get_project_dir() if not self.profiles_dir: self.profiles_dir = self._get_profiles_dir() # Populate full paths for selected files full_paths: Dict[str, str] = {} selected_files = set() for fname in fnames: fpath = os.path.join(self.working_dir, fname) full_paths[fpath] = fname selected_files.add(fpath) ephemeral_nodes: Dict[str, Tuple[str, Any]] = {} # Extract the ephemeral models for key, node in self.dbt_manifest.nodes.items(): if node.config.materialized == "ephemeral": # The key is the full filepath. # The value tuple, with the filepath and a list of dependent keys ephemeral_nodes[key] = ( os.path.join(self.project_dir, node.original_file_path), node.depends_on.nodes, ) # Yield ephemeral nodes first. We use a deque for efficient re-queuing. # We iterate through the deque, yielding any nodes without dependents, # or where those dependents have already yielded, first. The original # mapping is still used to hold the metadata on each key. already_yielded = set() ephemeral_buffer: Deque[str] = deque(ephemeral_nodes.keys()) while ephemeral_buffer: key = ephemeral_buffer.popleft() fpath, dependents = ephemeral_nodes[key] # If it's not in our selection, skip it if fpath not in selected_files: templater_logger.debug("- Purging unselected ephemeral: %r", fpath) # If there are dependent nodes in the set, don't process it yet. elif any( dependent in ephemeral_buffer for dependent in dependents ): # pragma: no cover templater_logger.debug( "- Requeuing ephemeral with dependents: %r", fpath ) # Requeue it for later ephemeral_buffer.append(key) # Otherwise yield it. else: templater_logger.debug("- Yielding Ephemeral: %r", fpath) yield full_paths[fpath] already_yielded.add(full_paths[fpath]) for fname in fnames: if fname not in already_yielded: yield fname # Dedupe here so we don't yield twice already_yielded.add(fname) else: templater_logger.debug( "- Skipping yield of previously sequenced file: %r", fname ) @large_file_check def process( self, *, fname: str, in_str: Optional[str] = None, config: Optional["FluffConfig"] = None, formatter: Optional["OutputStreamFormatter"] = None, ): """Compile a dbt model and return the compiled SQL. Args: fname: Path to dbt model(s) in_str: fname contents using configured encoding config: A specific config to use for this templating operation. Only necessary for some templaters. formatter: Optional object for output. """ # Stash the formatter if provided to use in cached methods. self.formatter = formatter self.sqlfluff_config = config self.project_dir = self._get_project_dir() self.profiles_dir = self._get_profiles_dir() fname_absolute_path = os.path.abspath(fname) try: # These are the names in dbt-core 1.4.1+ # https://github.com/dbt-labs/dbt-core/pull/6539 from dbt.exceptions import CompilationError, FailedToConnectError except ImportError: # These are the historic names for older dbt-core versions from dbt.exceptions import CompilationException as CompilationError from dbt.exceptions import ( FailedToConnectException as FailedToConnectError, ) try: os.chdir(self.project_dir) processed_result = self._unsafe_process(fname_absolute_path, in_str, config) # Reset the fail counter self._sequential_fails = 0 return processed_result except FailedToConnectError as e: return None, [ SQLTemplaterError( "dbt tried to connect to the database and failed: you could use " "'execute' to skip the database calls. See " "https://docs.getdbt.com/reference/dbt-jinja-functions/execute/ " f"Error: {e.msg}", fatal=True, ) ] except CompilationError as e: # Increment the counter self._sequential_fails += 1 if e.node: _msg = ( f"dbt compilation error on file '{e.node.original_file_path}'" f", {e.msg}" ) else: _msg = f"dbt compilation error: {e.msg}" return None, [ SQLTemplaterError( _msg, # It's fatal if we're over the limit fatal=self._sequential_fails > self.sequential_fail_limit, ) ] # If a SQLFluff error is raised, just pass it through except SQLTemplaterError as e: # pragma: no cover return None, [e] finally: os.chdir(self.working_dir) def _find_node(self, fname, config=None): if not config: # pragma: no cover raise ValueError( "For the dbt templater, the `process()` method " "requires a config object." ) if not fname: # pragma: no cover raise ValueError( "For the dbt templater, the `process()` method requires a file name" ) elif fname == "stdin": # pragma: no cover raise ValueError( "The dbt templater does not support stdin input, provide a path instead" ) selected = self.dbt_selector_method.search( included_nodes=self.dbt_manifest.nodes, # Selector needs to be a relative path selector=os.path.relpath(fname, start=os.getcwd()), ) results = [self.dbt_manifest.expect(uid) for uid in selected] if not results: skip_reason = self._find_skip_reason(fname) if skip_reason: raise SQLFluffSkipFile( f"Skipped file {fname} because it is {skip_reason}" ) raise SQLFluffSkipFile( "File %s was not found in dbt project" % fname ) # pragma: no cover return results[0] def _find_skip_reason(self, fname) -> Optional[str]: """Return string reason if model okay to skip, otherwise None.""" # Scan macros. abspath = os.path.abspath(fname) for macro in self.dbt_manifest.macros.values(): if os.path.abspath(macro.original_file_path) == abspath: return "a macro" # Scan disabled nodes. for nodes in self.dbt_manifest.disabled.values(): for node in nodes: if os.path.abspath(node.original_file_path) == abspath: return "disabled" return None # pragma: no cover def _unsafe_process(self, fname, in_str=None, config=None): original_file_path = os.path.relpath(fname, start=os.getcwd()) # Below, we monkeypatch Environment.from_string() to intercept when dbt # compiles (i.e. runs Jinja) to expand the "node" corresponding to fname. # We do this to capture the Jinja context at the time of compilation, i.e.: # - Jinja Environment object # - Jinja "globals" dictionary # # This info is captured by the "make_template()" function, which in # turn is used by our parent class' (JinjaTemplater) slice_file() # function. old_from_string = Environment.from_string # Start with render_func undefined. We need to know whether it has been # overwritten. render_func: Optional[Callable[[str], str]] = None if self.dbt_version_tuple >= (1, 3): compiled_sql_attribute = "compiled_code" raw_sql_attribute = "raw_code" else: # pragma: no cover compiled_sql_attribute = "compiled_sql" raw_sql_attribute = "raw_sql" def from_string(*args, **kwargs): """Replaces (via monkeypatch) the jinja2.Environment function.""" nonlocal render_func # Is it processing the node corresponding to fname? globals = kwargs.get("globals") if globals: model = globals.get("model") if model: if model.get("original_file_path") == original_file_path: # Yes. Capture the important arguments and create # a render_func() closure with overwrites the variable # from within _unsafe_process when from_string is run. env = args[0] globals = args[2] if len(args) >= 3 else kwargs["globals"] # Overwrite the outer render_func def render_func(in_str): env.add_extension(SnapshotExtension) template = env.from_string(in_str, globals=globals) return template.render() return old_from_string(*args, **kwargs) # NOTE: We need to inject the project root here in reaction to the # breaking change upstream with dbt. Coverage works in 1.5.2, but # appears to no longer be covered in 1.5.3. # This change was backported and so exists in some versions # but not others. When not present, no additional action is needed. # https://github.com/dbt-labs/dbt-core/pull/7949 # On the 1.5.x branch this was between 1.5.1 and 1.5.2 try: from dbt.task.contextvars import cv_project_root cv_project_root.set(self.project_dir) # pragma: no cover except ImportError: cv_project_root = None # NOTE: _find_node will raise a compilation exception if the project # fails to compile, and we catch that in the outer `.process()` method. node = self._find_node(fname, config) templater_logger.debug( "_find_node for path %r returned object of type %s.", fname, type(node) ) save_ephemeral_nodes = dict( (k, v) for k, v in self.dbt_manifest.nodes.items() if v.config.materialized == "ephemeral" and not getattr(v, "compiled", False) ) try: # These are the names in dbt-core 1.4.1+ # https://github.com/dbt-labs/dbt-core/pull/6539 from dbt.exceptions import UndefinedMacroError except ImportError: # These are the historic names for older dbt-core versions from dbt.exceptions import UndefinedMacroException as UndefinedMacroError with self.connection(): # Apply the monkeypatch. Environment.from_string = from_string try: node = self.dbt_compiler.compile_node( node=node, manifest=self.dbt_manifest, ) except UndefinedMacroError as err: # The explanation on the undefined macro error is already fairly # explanatory, so just pass it straight through. raise SQLTemplaterError(str(err)) except Exception as err: # pragma: no cover # NOTE: We use .error() here rather than .exception() because # for most users, the trace which accompanies the latter isn't # particularly helpful. templater_logger.error( "Fatal dbt compilation error on %s. This occurs most often " "during incorrect sorting of ephemeral models before linting. " "Please report this error on github at " "https://github.com/sqlfluff/sqlfluff/issues, including " "both the raw and compiled sql for the model affected.", fname, ) # Additional error logging in case we get a fatal dbt error. raise SQLFluffSkipFile( # pragma: no cover f"Skipped file {fname} because dbt raised a fatal " f"exception during compilation: {err!s}" ) # NOTE: We don't do a `raise ... from err` here because the # full trace is not useful for most users. In debugging # issues here it may be valuable to add the `from err` part # after the above `raise` statement. finally: # Undo the monkeypatch. Environment.from_string = old_from_string if hasattr(node, "injected_sql"): # If injected SQL is present, it contains a better picture # of what will actually hit the database (e.g. with tests). # However it's not always present. compiled_sql = node.injected_sql # pragma: no cover else: compiled_sql = getattr(node, compiled_sql_attribute) raw_sql = getattr(node, raw_sql_attribute) if not compiled_sql: # pragma: no cover raise SQLTemplaterError( "dbt templater compilation failed silently, check your " "configuration by running `dbt compile` directly." ) source_dbt_sql = in_str if not source_dbt_sql.rstrip().endswith("-%}"): n_trailing_newlines = len(source_dbt_sql) - len( source_dbt_sql.rstrip("\n") ) else: # Source file ends with right whitespace stripping, so there's # no need to preserve/restore trailing newlines, as they would # have been removed regardless of dbt's # keep_trailing_newlines=False behavior. n_trailing_newlines = 0 templater_logger.debug( " Trailing newline count in source dbt model: %r", n_trailing_newlines, ) templater_logger.debug(" Raw SQL before compile: %r", source_dbt_sql) templater_logger.debug(" Node raw SQL: %r", raw_sql) templater_logger.debug(" Node compiled SQL: %r", compiled_sql) # When using dbt-templater, trailing newlines are ALWAYS REMOVED during # compiling. Unless fixed (like below), this will cause: # 1. Assertion errors in TemplatedFile, when it sanity checks the # contents of the sliced_file array. # 2. LT12 linting errors when running "sqlfluff lint foo_bar.sql" # since the linter will use the compiled code with the newlines # removed. # 3. "No newline at end of file" warnings in Git/GitHub since # sqlfluff uses the compiled SQL to write fixes back to the # source SQL in the dbt model. # # The solution is (note that both the raw and compiled files have # had trailing newline(s) removed by the dbt-templater. # 1. Check for trailing newlines before compiling by looking at the # raw SQL in the source dbt file. Remember the count of trailing # newlines. # 2. Set node.raw_sql/node.raw_code to the original source file contents. # 3. Append the count from #1 above to compiled_sql. (In # production, slice_file() does not usually use this string, # but some test scenarios do. setattr(node, raw_sql_attribute, source_dbt_sql) # So for files that have no templated elements in them, render_func # will still be null at this point. If so, we replace it with a lambda # which just directly returns the input , but _also_ reset the trailing # newlines counter because they also won't have been stripped. if render_func is None: # NOTE: In this case, we shouldn't re-add newlines, because they # were never taken away. n_trailing_newlines = 0 # Overwrite the render_func placeholder. def render_func(in_str): """A render function which just returns the input.""" return in_str # At this point assert that we _have_ a render_func assert render_func is not None # TRICKY: dbt configures Jinja2 with keep_trailing_newline=False. # As documented (https://jinja.palletsprojects.com/en/3.0.x/api/), # this flag's behavior is: "Preserve the trailing newline when # rendering templates. The default is False, which causes a single # newline, if present, to be stripped from the end of the template." # # Below, we use "append_to_templated" to effectively "undo" this. raw_sliced, sliced_file, templated_sql = self.slice_file( source_dbt_sql, render_func=render_func, config=config, append_to_templated="\n" if n_trailing_newlines else "", ) # :HACK: If calling compile_node() compiled any ephemeral nodes, # restore them to their earlier state. This prevents a runtime error # in the dbt "_inject_ctes_into_sql()" function that occurs with # 2nd-level ephemeral model dependencies (e.g. A -> B -> C, where # both B and C are ephemeral). Perhaps there is a better way to do # this, but this seems good enough for now. for k, v in save_ephemeral_nodes.items(): if getattr(self.dbt_manifest.nodes[k], "compiled", False): self.dbt_manifest.nodes[k] = v return ( TemplatedFile( source_str=source_dbt_sql, templated_str=templated_sql, fname=fname, sliced_file=sliced_file, raw_sliced=raw_sliced, ), # No violations returned in this way. [], ) @contextmanager def connection(self): """Context manager that manages a dbt connection, if needed.""" from dbt.adapters.factory import get_adapter # We have to register the connection in dbt >= 1.0.0 ourselves # In previous versions, we relied on the functionality removed in # https://github.com/dbt-labs/dbt-core/pull/4062. adapter = self.adapters.get(self.project_dir) if adapter is None: adapter = get_adapter(self.dbt_config) self.adapters[self.project_dir] = adapter adapter.acquire_connection("master") adapter.set_relations_cache(self.dbt_manifest) yield # :TRICKY: Once connected, we never disconnect. Making multiple # connections during linting has proven to cause major performance # issues. class SnapshotExtension(StandaloneTag): """Dummy "snapshot" tags so raw dbt templates will parse. Context: dbt snapshots (https://docs.getdbt.com/docs/building-a-dbt-project/snapshots/#example) use custom Jinja "snapshot" and "endsnapshot" tags. However, dbt does not actually register those tags with Jinja. Instead, it finds and removes these tags during a preprocessing step. However, DbtTemplater needs those tags to actually parse, because JinjaTracer creates and uses Jinja to process another template similar to the original one. """ tags = {"snapshot", "endsnapshot"} def render(self, format_string=None): """Dummy method that renders the tag.""" return "" sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/000077500000000000000000000000001451700765000223745ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/__init__.py000066400000000000000000000000311451700765000244770ustar00rootroot00000000000000"""Init PY for tests.""" sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/conftest.py000066400000000000000000000007511451700765000245760ustar00rootroot00000000000000"""pytest fixtures.""" import os import pytest @pytest.fixture(scope="session", autouse=True) def dbt_flags(): """Set dbt flags for dbt templater tests.""" # Setting this to True disables some code in dbt-core that randomly runs # some test code in core/dbt/parser/models.py, ModelParser. render_update(). # We've seen occasional runtime errors from that code: # TypeError: cannot pickle '_thread.RLock' object os.environ["DBT_USE_EXPERIMENTAL_PARSER"] = "True" sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/000077500000000000000000000000001451700765000242455ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/000077500000000000000000000000001451700765000250165ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/.sqlfluff000066400000000000000000000002251451700765000266400ustar00rootroot00000000000000[sqlfluff] templater = dbt dialect = postgres # exclude_rules = LT12 [sqlfluff:templater:dbt] profiles_dir = profiles_yml project_dir = dbt_project sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/000077500000000000000000000000001451700765000273155ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/.gitignore000066400000000000000000000001041451700765000313000ustar00rootroot00000000000000target/ # dbt <1.0.0 dbt_modules/ # dbt >=1.0.0 dbt_packages/ logs/ sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/.sqlfluffignore000066400000000000000000000000761451700765000323470ustar00rootroot00000000000000# dbt <1.0.0 dbt_modules/ # dbt >=1.0.0 dbt_packages/ target/ sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/dbt_project.yml000066400000000000000000000005141451700765000323370ustar00rootroot00000000000000name: 'my_new_project' version: '1.0.0' config-version: 2 profile: 'default' test-paths: ["tests"] models: my_new_project: materialized: view vars: my_new_project: # Default date stamp of run ds: "2020-01-01" # passed_through_cli: testing for vars passed through cli('--vars' option) rather than dbt_project sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/macros/000077500000000000000000000000001451700765000306015ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/macros/echo.sql000066400000000000000000000000651451700765000322410ustar00rootroot00000000000000{% macro echo(colname) %} {{colname}} {% endmacro %} my_default_config.sql000066400000000000000000000001471451700765000347230ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/macros-- Issue #335 {% macro my_default_config(type) %} {{ config(materialized="view") }} {% endmacro %} sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/macros/my_headers.sql000066400000000000000000000001621451700765000334410ustar00rootroot00000000000000-- Issue #516 {% macro my_headers() %} -- Materialization: {{ config.get('materialization') }} {% endmacro %} sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/000077500000000000000000000000001451700765000306005ustar00rootroot00000000000000depends_on_ephemeral/000077500000000000000000000000001451700765000346615ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/modelsa.sql000066400000000000000000000000641451700765000356220ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/depends_on_ephemeralselect 1 from {{ source('jaffle_shop', 'orders') }} b.sql000066400000000000000000000000521451700765000356200ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/depends_on_ephemeralselect * from {{ ref('c') }} where id = 1 c.sql000066400000000000000000000000711451700765000356220ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/depends_on_ephemeral{{ config(materialized='ephemeral') }} select 1 as id d.sql000066400000000000000000000000221451700765000356170ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/depends_on_ephemeralSELECT 1 FROM bar ephemeral_3_level/000077500000000000000000000000001451700765000340745ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/modelsfact_product_contract_values.sql000066400000000000000000000001561451700765000425500ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/ephemeral_3_level{{ config( materialized='ephemeral', ) }} SELECT * FROM {{ ref('stg_creditview_products') }} stg_creditview_products.sql000066400000000000000000000001651451700765000415640ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/ephemeral_3_level{{ config( materialized='ephemeral', ) }} SELECT * FROM {{ ref('stg_max_product_contract_seats') }} stg_max_product_contract_seats.sql000066400000000000000000000002261451700765000431130ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/ephemeral_3_level{{ config( materialized='ephemeral', ) }} SELECT 'Zaphod Breedlebrox' as numero_uno, 'Ford Prefect' as two, 'Vogon poetry' as trois sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/000077500000000000000000000000001451700765000336245ustar00rootroot00000000000000AM03_test.sql000066400000000000000000000001261451700765000357640ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect birth_date, name from cows order by birth_date asc, name desc ST06_test.sql000066400000000000000000000003111451700765000360140ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- ST06 should ignore this as one of the select targets uses a macro select {{ dbt_utils.surrogate_key(['spots', 'moos']) }} as spot_moo_id, date(birth_date) as birth_date, name from cows access_graph_nodes.sql000066400000000000000000000004371451700765000401040ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- Verify that the dbt context variable graph is accessible {% set graph_node = graph.nodes.values() | selectattr('name', 'equalto', 'fact_product_contract_values') | first -%} {%- set num_parents = graph_node.depends_on.nodes | length -%} select {{ num_parents }} as number_of_parents call_statement.sql000066400000000000000000000002761451700765000372720ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{% call statement('unique_keys', fetch_result=True) %} select 'tests' as key_name {% endcall %} {% set unique_keys = load_result('unique_keys') %} select 1, '{{ unique_keys.data[0][0] }}' disabled_model.sql000066400000000000000000000000461451700765000372150ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ config(enabled=false) }} select 1 ends_with_whitespace_stripping.sql000066400000000000000000000001221451700765000425600ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect 1 {% if target.database == 'test' -%} union all select 2 {%- endif -%} incremental.sql000066400000000000000000000007141451700765000365710ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- https://github.com/sqlfluff/sqlfluff/issues/780 {{ config( materialized = 'incremental', unique_key='product_id' ) }} select {#- Attributes #} products.product_id, products.valid_date_local, products._fivetran_deleted from products inner join dispensaries where not products._fivetran_deleted {% if is_incremental() -%} and products.valid_date_local >= ( select max(valid_date_local) from {{ this }}) {% endif %} issue_1608.sql000066400000000000000000000003411451700765000360720ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ config(materialized='view') }} with cte_example as ( select 1 as col_name ), final as ( select col_name, {{- echo('col_name') -}} as col_name2 from cte_example ) select * from final issue_1608.sql.after000066400000000000000000000003401451700765000371710ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ config(materialized='view') }} with cte_example as ( select 1 as col_name ), final as ( select col_name, {{- echo('col_name') -}} as col_name2 from cte_example ) select * from final last_day.sql000066400000000000000000000001661451700765000360710ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectwith last_day_macro as ( select {{ dbt_utils.last_day('2021-11-05', 'month') }} ) select * from last_day_macro macro_in_macro.sql000066400000000000000000000002141451700765000372330ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- Issue #335 {{ my_default_config("table") }} with source_data as ( select "hello_world" as hello_world ) select * from source_data multiple_trailing_newline.sql000066400000000000000000000000271451700765000415320ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect a from table_a operator_errors.sql000066400000000000000000000000331451700765000375110ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectSELECT 1+2 FROM some_table select_distinct_group_by.sql000066400000000000000000000001151451700765000413510ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect distinct a, b, c from table_a {{ dbt_utils.group_by(3) }} single_trailing_newline.sql000066400000000000000000000000261451700765000411570ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectselect a from table_a src_jaffle_shop.yml000066400000000000000000000001441451700765000374160ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_projectversion: 2 sources: - name: jaffle_shop tables: - name: orders - name: customers templated_inside_comment.sql000066400000000000000000000000331451700765000413160ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{# {{ 1 + 2 }} #} select 1 trailing_newlines.sql000066400000000000000000000000341451700765000400000ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ "" }} select 1 use_dbt_utils.sql000066400000000000000000000007411451700765000371350ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- get_query_results_as_dict() verifies SQLFluff can successfully use dbt_utils -- functions that require a database connection. -- https://github.com/sqlfluff/sqlfluff/issues/2297 {% set saved_var = dbt_utils.get_query_results_as_dict( "SELECT schema_name FROM information_schema.schemata" ) %} with orders as ( select * from {{ source("jaffle_shop", "orders") }} ) select a, b, c, count(*) as occurrences from orders {{ dbt_utils.group_by(3) }} use_headers.sql000066400000000000000000000001201451700765000365460ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project{{ config(materialization="view") }} {{ my_headers() }} select * from table_a use_var.sql000066400000000000000000000001011451700765000357220ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project-- Issue #333 select * from table_a where ds = '{{ var("ds") }}' utf8/000077500000000000000000000000001451700765000344335ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project.sqlfluff000066400000000000000000000000701451700765000362530ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8[sqlfluff] dialect = ansi rules = CP01 encoding = utf-8 test.sql000066400000000000000000000002111451700765000361250ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8{{ config(materialized='table') }} SELECT FIRST_COLUMN, SECOND_COLUMN FROM TABLE_TO_TEST where TYPE_OF_TEST = 'TESTING ÅÄÖ' test.sql.fixed000066400000000000000000000002111451700765000372230ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/my_new_project/utf8{{ config(materialized='table') }} SELECT FIRST_COLUMN, SECOND_COLUMN FROM TABLE_TO_TEST WHERE TYPE_OF_TEST = 'TESTING ÅÄÖ' sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/vars_from_cli.sql000066400000000000000000000000661451700765000341500ustar00rootroot00000000000000-- Issue #1262 SELECT {{ var('passed_through_cli') }} sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/packages.yml000066400000000000000000000003331451700765000316150ustar00rootroot00000000000000packages: # Reference: dbt_utils compatibility matrix: # https://docs.google.com/spreadsheets/d/1RoDdC69auAtrwiqmkRsgcFdZ3MdNpeKcJrWkmEpXVIs/edit#gid=0 - package: dbt-labs/dbt_utils version: ["0.8.0"] sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/snapshots/000077500000000000000000000000001451700765000313375ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/snapshots/issue_1771.sql000066400000000000000000000003461451700765000336720ustar00rootroot00000000000000{% snapshot dim_aggregated_brand_hierarchy_snapshot %} {{ config( strategy='check', unique_key='c1', target_schema='snapshots', check_cols='all' ) }} select c1 from foo {% endsnapshot %} issue_1771.sql.after000066400000000000000000000003561451700765000347140ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/snapshots{% snapshot dim_aggregated_brand_hierarchy_snapshot %} {{ config( strategy='check', unique_key='c1', target_schema='snapshots', check_cols='all' ) }} select c1 from foo {% endsnapshot %} sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/tests/000077500000000000000000000000001451700765000304575ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/tests/test.sql000066400000000000000000000000261451700765000321550ustar00rootroot00000000000000select a from table_a sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/000077500000000000000000000000001451700765000275125ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/compiler_error.sql000066400000000000000000000001361451700765000332560ustar00rootroot00000000000000{% set cols = ["a", "b", "b"] %} select {% for col in cols %} {{ col }} from table_a exception_connect_database.sql000066400000000000000000000003141451700765000355050ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models select {%- for col in dbt_utils.get_column_values( table=ref("select_distinct_group_by"), column="ids" ) %} {{ col }}{{ "," if not loop.last }} {%- endfor %} from table_a sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/unknown_macro.sql000066400000000000000000000002001451700765000331030ustar00rootroot00000000000000-- Refer to a macro which doesn't exist -- https://github.com/sqlfluff/sqlfluff/issues/3849 select * from {{ invalid_macro() }} sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/unknown_ref.sql000066400000000000000000000002111451700765000325600ustar00rootroot00000000000000-- Refer to a relation which doesn't exist -- https://github.com/sqlfluff/sqlfluff/issues/3849 select * from {{ ref("i_do_not_exist") }} sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml/000077500000000000000000000000001451700765000275225ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml/profiles.yml000066400000000000000000000004421451700765000320700ustar00rootroot00000000000000default: target: dev outputs: dev: type: postgres host: "{{ env_var('POSTGRES_HOST', 'localhost') }}" user: postgres pass: password port: 5432 dbname: postgres schema: dbt_alice threads: 4 config: send_anonymous_usage_stats: false sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml_fail/000077500000000000000000000000001451700765000305155ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml_fail/profiles.yml000066400000000000000000000003761451700765000330710ustar00rootroot00000000000000default: target: dev outputs: dev: type: postgres host: localhost user: postgres pass: password port: 2345 dbname: postgres schema: dbt_alice threads: 4 config: send_anonymous_usage_stats: false sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/000077500000000000000000000000001451700765000304155ustar00rootroot00000000000000access_graph_nodes.sql000066400000000000000000000001321451700765000346650ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output-- Verify that the dbt context variable graph is accessible select 1 as number_of_parents sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/call_statement.sql000066400000000000000000000000241451700765000341310ustar00rootroot00000000000000 select 1, 'tests' sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/dbt_utils_0.8.0/000077500000000000000000000000001451700765000331315ustar00rootroot00000000000000last_day.sql000066400000000000000000000003301451700765000353670ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/dbt_utils_0.8.0with last_day_macro as ( select cast( date_trunc('month', 2021-11-05) + ((interval '1 month') * (1)) + ((interval '1 day') * (-1)) as date) ) select * from last_day_macro ends_with_whitespace_stripping.sql000066400000000000000000000000111451700765000373460ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_outputselect 1 sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/last_day.sql000066400000000000000000000003441451700765000327370ustar00rootroot00000000000000with last_day_macro as ( select cast( date_trunc('month', 2021-11-05) + ((interval '1 month') * (1)) + ((interval '1 day') * (-1)) as date) ) select * from last_day_macro sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/macro_in_macro.sql000066400000000000000000000001621451700765000341050ustar00rootroot00000000000000-- Issue #335 with source_data as ( select "hello_world" as hello_world ) select * from source_data templated_inside_comment.sql000066400000000000000000000000121451700765000361040ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output select 1 trailing_newlines.sql000066400000000000000000000000241451700765000345700ustar00rootroot00000000000000sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output select 1 sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/use_dbt_utils.sql000066400000000000000000000005261451700765000340060ustar00rootroot00000000000000-- get_query_results_as_dict() verifies SQLFluff can successfully use dbt_utils -- functions that require a database connection. -- https://github.com/sqlfluff/sqlfluff/issues/2297 with orders as ( select * from "postgres"."jaffle_shop"."orders" ) select a, b, c, count(*) as occurrences from orders group by 1,2,3 sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/use_headers.sql000066400000000000000000000000701451700765000334220ustar00rootroot00000000000000 -- Materialization: view select * from table_a sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/use_var.sql000066400000000000000000000000741451700765000326030ustar00rootroot00000000000000-- Issue #333 select * from table_a where ds = '2020-01-01' sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templater.py000066400000000000000000000014571451700765000273740ustar00rootroot00000000000000"""Fixtures for dbt templating tests.""" import pytest from sqlfluff.core import FluffConfig DBT_FLUFF_CONFIG = { "core": { "templater": "dbt", "dialect": "postgres", }, "templater": { "dbt": { "profiles_dir": ( "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml" ), "project_dir": ( "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project" ), }, }, } @pytest.fixture() def project_dir(): """Returns the dbt project directory.""" return DBT_FLUFF_CONFIG["templater"]["dbt"]["project_dir"] @pytest.fixture() def dbt_templater(): """Returns an instance of the DbtTemplater.""" return FluffConfig(overrides={"dialect": "ansi"}).get_templater("dbt") sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/test.sql000066400000000000000000000001411451700765000265120ustar00rootroot00000000000000 with dbt__CTE__INTERNAL_test as ( select * from a )select count(*) from dbt__CTE__INTERNAL_test sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/linter_test.py000066400000000000000000000073151451700765000253100ustar00rootroot00000000000000"""The Test file for the linter class.""" import os import os.path import shutil import sys import pytest from sqlfluff.cli.commands import lint from sqlfluff.core import FluffConfig, Linter from sqlfluff.utils.testing.cli import invoke_assert_code from test.fixtures.dbt.templater import DBT_FLUFF_CONFIG, project_dir # noqa: F401 @pytest.mark.parametrize( "path", ["models/my_new_project/disabled_model.sql", "macros/echo.sql"] ) def test__linter__skip_file(path, project_dir): # noqa """Test that the linter skips disabled dbt models and macros.""" conf = FluffConfig(configs=DBT_FLUFF_CONFIG) lntr = Linter(config=conf) model_file_path = os.path.join(project_dir, path) linted_path = lntr.lint_path(path=model_file_path) # Check that the file is still there assert len(linted_path.files) == 1 linted_file = linted_path.files[0] # Normalise paths to control for OS variance assert os.path.normpath(linted_file.path) == os.path.normpath(model_file_path) assert not linted_file.templated_file assert not linted_file.tree def test__linter__lint_ephemeral_3_level(project_dir): # noqa """Test linter can lint a project with 3-level ephemeral dependencies.""" # This was previously crashing inside dbt, in a function named # inject_ctes_into_sql(). (issue 2671). conf = FluffConfig(configs=DBT_FLUFF_CONFIG) lntr = Linter(config=conf) model_file_path = os.path.join(project_dir, "models/ephemeral_3_level") lntr.lint_path(path=model_file_path) def test__linter__config_pairs(project_dir): # noqa """Test that the dbt templater returns version information in it's config.""" conf = FluffConfig(configs=DBT_FLUFF_CONFIG) lntr = Linter(config=conf) # NOTE: This method is called within the config readout. assert lntr.templater.config_pairs() == [ ("templater", "dbt"), ("dbt", lntr.templater.dbt_version), ] @pytest.mark.skipif( sys.platform.startswith("win"), reason="Fails on GitHub Windows with: Paths don't have the same drive", ) def test_dbt_target_dir(tmpdir): """Test with dbt project in subdir that target/ is created in the correct place. https://github.com/sqlfluff/sqlfluff/issues/2895 """ tmp_base_dir = str(tmpdir) tmp_dbt_dir = os.path.join(tmp_base_dir, "dir1", "dir2", "dbt") # tmp_project_dir = os.path.join(tmp_dbt_dir, "dbt_project") os.makedirs(os.path.dirname(tmp_dbt_dir)) shutil.copytree( "plugins/sqlfluff-templater-dbt/test/fixtures/dbt", tmp_dbt_dir, ) os.unlink(os.path.join(tmp_dbt_dir, ".sqlfluff")) old_cwd = os.getcwd() # Invoke SQLFluff from <>, linting a file in the dbt project at # <>/dir1/dir2/dbt/dbt_project. Prior to the bug fix, a # "target" directory would incorrectly be created in <>. # (It should be created in <>/dir1/dir2/dbt/dbt_project.) os.chdir(tmp_base_dir) with open(".sqlfluff", "w") as f: print( """[sqlfluff] templater = dbt dialect = postgres [sqlfluff:templater:dbt] project_dir = {tmp_base_dir}/dir1/dir2/dbt/dbt_project profiles_dir = {old_cwd}/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml """.format( old_cwd=old_cwd, tmp_base_dir=tmp_base_dir ), file=f, ) try: invoke_assert_code( ret_code=0, args=[ lint, [ "dir1/dir2/dbt/dbt_project/models/my_new_project/use_dbt_utils.sql", ], ], ) assert not os.path.exists("target") assert os.path.exists("dir1/dir2/dbt/dbt_project/target") finally: os.chdir(old_cwd) sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/rules_test.py000066400000000000000000000055471451700765000251520ustar00rootroot00000000000000"""Tests for the standard set of rules.""" import os import os.path from pathlib import Path import pytest from sqlfluff.core import Linter from sqlfluff.core.config import FluffConfig from sqlfluff.utils.testing.rules import assert_rule_raises_violations_in_file from test.fixtures.dbt.templater import ( # noqa DBT_FLUFF_CONFIG, dbt_templater, project_dir, ) @pytest.mark.parametrize( "rule,path,violations", [ # Group By ("AM01", "models/my_new_project/select_distinct_group_by.sql", [(1, 8)]), # Multiple trailing newline ("LT12", "models/my_new_project/multiple_trailing_newline.sql", [(3, 1)]), ], ) def test__rules__std_file_dbt(rule, path, violations, project_dir): # noqa """Test linter finds the given errors in (and only in) the right places (DBT).""" assert_rule_raises_violations_in_file( rule=rule, fpath=os.path.join(project_dir, path), violations=violations, fluff_config=FluffConfig(configs=DBT_FLUFF_CONFIG, overrides=dict(rules=rule)), ) def test__rules__fix_utf8(project_dir): # noqa """Verify that non-ASCII characters are preserved by 'fix'.""" rule = "CP01" path = "models/my_new_project/utf8/test.sql" lntr = Linter( config=FluffConfig(configs=DBT_FLUFF_CONFIG, overrides=dict(rules=rule)) ) lnt = lntr.lint_path(os.path.join(project_dir, path), fix=True) # Check that we did actually find issues. # NOTE: This test is mostly useful to distinguish between whether there's # a problem with the rule - or a problem with the file. violations_dict = lnt.violation_dict() print("Violations Dict: ", violations_dict) qual_path = os.path.normpath(Path(project_dir) / path) assert qual_path in violations_dict, f"{path} not in violations dict." assert violations_dict[qual_path], f"No issues found for {qual_path}." lnt.persist_changes(fixed_file_suffix="FIXED") # TODO: Check contents of file: # ./plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project/models/ # my_new_project/utf8/testFIXED.sql # Against a git file, similar to the autofix tests fixed_path = Path(project_dir) / "models/my_new_project/utf8/testFIXED.sql" cmp_filepath = Path(project_dir) / "models/my_new_project/utf8/test.sql.fixed" fixed_buff = fixed_path.read_text("utf8") comp_buff = cmp_filepath.read_text("utf8") # Assert that we fixed as expected assert fixed_buff == comp_buff os.unlink(fixed_path) def test__rules__order_by(project_dir): # noqa """Verify that rule AM03 works with dbt.""" rule = "AM03" path = "models/my_new_project/AM03_test.sql" lntr = Linter( config=FluffConfig(configs=DBT_FLUFF_CONFIG, overrides=dict(rules=rule)) ) lnt = lntr.lint_path(os.path.join(project_dir, path)) violations = lnt.check_tuples() assert len(violations) == 0 sqlfluff-2.3.5/plugins/sqlfluff-templater-dbt/test/templater_test.py000066400000000000000000000466021451700765000260120ustar00rootroot00000000000000"""Tests for the dbt templater.""" import glob import json import logging import os import shutil from copy import deepcopy from pathlib import Path from unittest import mock import pytest from sqlfluff.cli.commands import lint from sqlfluff.core import FluffConfig, Lexer, Linter from sqlfluff.core.errors import SQLFluffSkipFile from sqlfluff.utils.testing.cli import invoke_assert_code from sqlfluff.utils.testing.logging import fluff_log_catcher from sqlfluff_templater_dbt.templater import DbtTemplater from test.fixtures.dbt.templater import ( # noqa: F401 DBT_FLUFF_CONFIG, dbt_templater, project_dir, ) def test__templater_dbt_missing(dbt_templater, project_dir): # noqa: F811 """Check that a nice error is returned when dbt module is missing.""" try: import dbt # noqa: F401 pytest.skip(reason="dbt is installed") except ModuleNotFoundError: pass with pytest.raises(ModuleNotFoundError, match=r"pip install sqlfluff\[dbt\]"): dbt_templater.process( in_str="", fname=os.path.join(project_dir, "models/my_new_project/test.sql"), config=FluffConfig(configs=DBT_FLUFF_CONFIG), ) def test__templater_dbt_profiles_dir_expanded(dbt_templater): # noqa: F811 """Check that the profiles_dir is expanded.""" dbt_templater.sqlfluff_config = FluffConfig( configs={ "core": {"dialect": "ansi"}, "templater": { "dbt": { "profiles_dir": "~/.dbt", "profile": "default", "target": "dev", } }, }, ) profiles_dir = dbt_templater._get_profiles_dir() # Normalise paths to control for OS variance assert os.path.normpath(profiles_dir) == os.path.normpath( os.path.expanduser("~/.dbt") ) assert dbt_templater._get_profile() == "default" assert dbt_templater._get_target() == "dev" @pytest.mark.parametrize( "fname", [ # dbt_utils "use_dbt_utils.sql", # macro calling another macro "macro_in_macro.sql", # config.get(...) "use_headers.sql", # var(...) "use_var.sql", # {# {{ 1 + 2 }} #} "templated_inside_comment.sql", # {{ dbt_utils.last_day( "last_day.sql", # Many newlines at end, tests templater newline handling "trailing_newlines.sql", # Ends with whitespace stripping, so trailing newline handling should # be disabled "ends_with_whitespace_stripping.sql", # Access dbt graph nodes "access_graph_nodes.sql", # Call statements "call_statement.sql", ], ) def test__templater_dbt_templating_result( project_dir, dbt_templater, fname # noqa: F811 ): """Test that input sql file gets templated into output sql file.""" _run_templater_and_verify_result(dbt_templater, project_dir, fname) def test_dbt_profiles_dir_env_var_uppercase( project_dir, dbt_templater, tmpdir, monkeypatch # noqa: F811 ): """Tests specifying the dbt profile dir with env var.""" profiles_dir = tmpdir.mkdir("SUBDIR") # Use uppercase to test issue 2253 monkeypatch.setenv("DBT_PROFILES_DIR", str(profiles_dir)) shutil.copy( os.path.join(project_dir, "../profiles_yml/profiles.yml"), str(profiles_dir) ) _run_templater_and_verify_result(dbt_templater, project_dir, "use_dbt_utils.sql") def _run_templater_and_verify_result(dbt_templater, project_dir, fname): # noqa: F811 path = Path(project_dir) / "models/my_new_project" / fname config = FluffConfig(configs=DBT_FLUFF_CONFIG) templated_file, _ = dbt_templater.process( in_str=path.read_text(), fname=str(path), config=config, ) template_output_folder_path = Path( "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/templated_output/" ) fixture_path = _get_fixture_path(template_output_folder_path, fname) assert str(templated_file) == fixture_path.read_text() # Check we can lex the output too. # https://github.com/sqlfluff/sqlfluff/issues/4013 lexer = Lexer(config=config) _, lexing_violations = lexer.lex(templated_file) assert not lexing_violations def _get_fixture_path(template_output_folder_path, fname): fixture_path: Path = template_output_folder_path / fname # Default fixture location dbt_version_specific_fixture_folder = "dbt_utils_0.8.0" # Determine where it would exist. version_specific_path = ( Path(template_output_folder_path) / dbt_version_specific_fixture_folder / fname ) if version_specific_path.is_file(): # Ok, it exists. Use this path instead. fixture_path = version_specific_path return fixture_path @pytest.mark.parametrize( "fnames_input, fnames_expected_sequence", [ [ ( Path("models") / "depends_on_ephemeral" / "a.sql", Path("models") / "depends_on_ephemeral" / "b.sql", Path("models") / "depends_on_ephemeral" / "d.sql", ), # c.sql is not present in the original list and should not appear here, # even though b.sql depends on it. This test ensures that "out of scope" # files, e.g. those ignored using ".sqlfluffignore" or in directories # outside what was specified, are not inadvertently processed. ( Path("models") / "depends_on_ephemeral" / "a.sql", Path("models") / "depends_on_ephemeral" / "b.sql", Path("models") / "depends_on_ephemeral" / "d.sql", ), ], [ ( Path("models") / "depends_on_ephemeral" / "a.sql", Path("models") / "depends_on_ephemeral" / "b.sql", Path("models") / "depends_on_ephemeral" / "c.sql", Path("models") / "depends_on_ephemeral" / "d.sql", ), # c.sql should come before b.sql because b.sql depends on c.sql. # It also comes first overall because ephemeral models come first. ( Path("models") / "depends_on_ephemeral" / "c.sql", Path("models") / "depends_on_ephemeral" / "a.sql", Path("models") / "depends_on_ephemeral" / "b.sql", Path("models") / "depends_on_ephemeral" / "d.sql", ), ], ], ) def test__templater_dbt_sequence_files_ephemeral_dependency( project_dir, dbt_templater, fnames_input, fnames_expected_sequence # noqa: F811 ): """Test that dbt templater sequences files based on dependencies.""" result = dbt_templater.sequence_files( [str(Path(project_dir) / fn) for fn in fnames_input], config=FluffConfig(configs=DBT_FLUFF_CONFIG), ) pd = Path(project_dir) expected = [str(pd / fn) for fn in fnames_expected_sequence] assert list(result) == expected @pytest.mark.parametrize( "raw_file,templated_file,result", [ ( "select * from a", """ with dbt__CTE__INTERNAL_test as ( select * from a )select count(*) from dbt__CTE__INTERNAL_test """, # The unwrapper should trim the ends. [ ("literal", slice(0, 15, None), slice(0, 15, None)), ], ) ], ) def test__templater_dbt_slice_file_wrapped_test( raw_file, templated_file, result, dbt_templater, caplog # noqa: F811 ): """Test that wrapped queries are sliced safely using _check_for_wrapped().""" def _render_func(in_str) -> str: """Create a dummy render func. Importantly one that does actually allow different content to be added. """ # Find the raw location in the template for the test case. loc = templated_file.find(raw_file) # Replace the new content at the previous position. # NOTE: Doing this allows the tracer logic to do what it needs to do. return templated_file[:loc] + in_str + templated_file[loc + len(raw_file) :] with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): _, resp, _ = dbt_templater.slice_file( raw_file, render_func=_render_func, ) assert resp == result @pytest.mark.parametrize( "fname", [ "tests/test.sql", "models/my_new_project/single_trailing_newline.sql", "models/my_new_project/multiple_trailing_newline.sql", ], ) def test__templater_dbt_templating_test_lex( project_dir, dbt_templater, fname # noqa: F811 ): """Demonstrate the lexer works on both dbt models and dbt tests. Handle any number of newlines. """ path = Path(project_dir) / fname config = FluffConfig(configs=DBT_FLUFF_CONFIG) source_dbt_sql = path.read_text() # Count the newlines. n_trailing_newlines = len(source_dbt_sql) - len(source_dbt_sql.rstrip("\n")) print( f"Loaded {path!r} (n_newlines: {n_trailing_newlines}): " f"{source_dbt_sql!r}", ) templated_file, _ = dbt_templater.process( in_str=source_dbt_sql, fname=str(path), config=config, ) lexer = Lexer(config=config) # Test that it successfully lexes. _, _ = lexer.lex(templated_file) assert ( templated_file.source_str == "select a\nfrom table_a" + "\n" * n_trailing_newlines ) assert ( templated_file.templated_str == "select a\nfrom table_a" + "\n" * n_trailing_newlines ) @pytest.mark.parametrize( "path,reason", [ ( "models/my_new_project/disabled_model.sql", "it is disabled", ), ( "macros/echo.sql", "it is a macro", ), ], ) def test__templater_dbt_skips_file( path, reason, dbt_templater, project_dir # noqa: F811 ): """A disabled dbt model should be skipped.""" with pytest.raises(SQLFluffSkipFile, match=reason): dbt_templater.process( in_str="", fname=os.path.join(project_dir, path), config=FluffConfig(configs=DBT_FLUFF_CONFIG), ) @pytest.mark.parametrize( "fname", [ "use_var.sql", "incremental.sql", "single_trailing_newline.sql", "ST06_test.sql", ], ) def test__dbt_templated_models_do_not_raise_lint_error( project_dir, fname, caplog # noqa: F811 ): """Test that templated dbt models do not raise a linting error.""" linter = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG)) # Log rules output. with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules"): lnt = linter.lint_path( path=os.path.join(project_dir, "models/my_new_project/", fname) ) for linted_file in lnt.files: # Log the rendered file to facilitate better debugging of the files. print(f"## FILE: {linted_file.path}") print("\n\n## RENDERED FILE:\n\n") print(linted_file.templated_file.templated_str) print("\n\n## PARSED TREE:\n\n") print(linted_file.tree.stringify()) print("\n\n## VIOLATIONS:") for idx, v in enumerate(linted_file.violations): print(f" {idx}:{v.get_info_dict()}") violations = lnt.check_tuples() assert len(violations) == 0 def _clean_path(glob_expression): """Clear out files matching the provided glob expression.""" for fsp in glob.glob(glob_expression): os.remove(fsp) @pytest.mark.parametrize( "path", ["models/my_new_project/issue_1608.sql", "snapshots/issue_1771.sql"] ) def test__dbt_templated_models_fix_does_not_corrupt_file( project_dir, path, caplog # noqa: F811 ): """Test issues where previously "sqlfluff fix" corrupted the file.""" test_glob = os.path.join(project_dir, os.path.dirname(path), "*FIXED.sql") _clean_path(test_glob) lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG)) with caplog.at_level(logging.INFO, logger="sqlfluff.linter"): lnt = lntr.lint_path(os.path.join(project_dir, path), fix=True) try: lnt.persist_changes(fixed_file_suffix="FIXED") with open(os.path.join(project_dir, path + ".after")) as f: comp_buff = f.read() with open(os.path.join(project_dir, path.replace(".sql", "FIXED.sql"))) as f: fixed_buff = f.read() assert fixed_buff == comp_buff finally: _clean_path(test_glob) def test__templater_dbt_templating_absolute_path( project_dir, dbt_templater # noqa: F811 ): """Test that absolute path of input path does not cause RuntimeError.""" try: dbt_templater.process( in_str="", fname=os.path.abspath( os.path.join(project_dir, "models/my_new_project/use_var.sql") ), config=FluffConfig(configs=DBT_FLUFF_CONFIG), ) except Exception as e: pytest.fail(f"Unexpected RuntimeError: {e}") @pytest.mark.parametrize( "fname,exception_msg", [ ( "compiler_error.sql", "dbt compilation error on file 'models/my_new_project/compiler_error.sql'" ", Unexpected end of template. Jinja was looking for the following tags:" " 'endfor' or 'else'. The innermost block that needs to be closed is " "'for'.\n line 5\n {{ col }}", ), ( "unknown_ref.sql", # https://github.com/sqlfluff/sqlfluff/issues/3849 "Model 'model.my_new_project.unknown_ref' " "(models/my_new_project/unknown_ref.sql) depends on a node named " "'i_do_not_exist' which was not found", ), ( "unknown_macro.sql", # https://github.com/sqlfluff/sqlfluff/issues/3849 "Compilation Error in model unknown_macro " "(models/my_new_project/unknown_macro.sql)\n 'invalid_macro' is " "undefined. This can happen when calling a macro that does not exist.", ), ], ) def test__templater_dbt_handle_exceptions( project_dir, dbt_templater, fname, exception_msg # noqa: F811 ): """Test that exceptions during compilation are returned as violation.""" from dbt.adapters.factory import get_adapter src_fpath = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/" + fname target_fpath = os.path.abspath( os.path.join(project_dir, "models/my_new_project/", fname) ) # We move the file that throws an error in and out of the project directory # as dbt throws an error if a node fails to parse while computing the DAG os.rename(src_fpath, target_fpath) try: _, violations = dbt_templater.process( in_str="", fname=target_fpath, config=FluffConfig(configs=DBT_FLUFF_CONFIG, overrides={"dialect": "ansi"}), ) finally: os.rename(target_fpath, src_fpath) get_adapter(dbt_templater.dbt_config).connections.release() assert violations # NB: Replace slashes to deal with different platform paths being returned. assert exception_msg in violations[0].desc().replace("\\", "/") @mock.patch("dbt.adapters.postgres.impl.PostgresAdapter.set_relations_cache") def test__templater_dbt_handle_database_connection_failure( set_relations_cache, project_dir, dbt_templater # noqa: F811 ): """Test the result of a failed database connection.""" from dbt.adapters.factory import get_adapter try: from dbt.exceptions import ( FailedToConnectException as DbtFailedToConnectException, ) except ImportError: from dbt.exceptions import ( FailedToConnectError as DbtFailedToConnectException, ) # Clear the adapter cache to force this test to create a new connection. DbtTemplater.adapters.clear() set_relations_cache.side_effect = DbtFailedToConnectException("dummy error") src_fpath = ( "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models" "/exception_connect_database.sql" ) target_fpath = os.path.abspath( os.path.join( project_dir, "models/my_new_project/exception_connect_database.sql" ) ) dbt_fluff_config_fail = deepcopy(DBT_FLUFF_CONFIG) dbt_fluff_config_fail["templater"]["dbt"][ "profiles_dir" ] = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/profiles_yml_fail" # We move the file that throws an error in and out of the project directory # as dbt throws an error if a node fails to parse while computing the DAG os.rename(src_fpath, target_fpath) try: _, violations = dbt_templater.process( in_str="", fname=target_fpath, config=FluffConfig(configs=DBT_FLUFF_CONFIG), ) finally: os.rename(target_fpath, src_fpath) get_adapter(dbt_templater.dbt_config).connections.release() assert violations # NB: Replace slashes to deal with different platform paths being returned. assert ( violations[0] .desc() .replace("\\", "/") .startswith("dbt tried to connect to the database") ) def test__project_dir_does_not_exist_error(dbt_templater): # noqa: F811 """Test an error is logged if the given dbt project directory doesn't exist.""" dbt_templater.sqlfluff_config = FluffConfig( configs={ "core": {"dialect": "ansi"}, "templater": {"dbt": {"project_dir": "./non_existing_directory"}}, } ) with fluff_log_catcher(logging.ERROR, "sqlfluff.templater") as caplog: dbt_project_dir = dbt_templater._get_project_dir() assert ( f"dbt_project_dir: {dbt_project_dir} could not be accessed. " "Check it exists." ) in caplog.text @pytest.mark.parametrize( ("model_path", "var_value"), [ ("models/vars_from_cli.sql", "expected_value"), ("models/vars_from_cli.sql", [1]), ("models/vars_from_cli.sql", {"nested": 1}), ], ) def test__context_in_config_is_loaded( project_dir, dbt_templater, model_path, var_value # noqa: F811 ): """Test that variables inside .sqlfluff are passed to dbt.""" context = {"passed_through_cli": var_value} if var_value else {} config_dict = deepcopy(DBT_FLUFF_CONFIG) config_dict["templater"]["dbt"]["context"] = context config = FluffConfig(config_dict) path = Path(project_dir) / model_path processed, violations = dbt_templater.process( in_str=path.read_text(), fname=str(path), config=config ) assert violations == [] assert str(var_value) in processed.templated_str def test__dbt_log_supression(): """Test that when we try and parse in JSON format we get JSON. This actually tests that we can successfully suppress unwanted logging from dbt. """ oldcwd = os.getcwd() try: os.chdir("plugins/sqlfluff-templater-dbt/test/fixtures/dbt") result = invoke_assert_code( ret_code=1, args=[ lint, [ "--disable-progress-bar", "dbt_project/models/my_new_project/operator_errors.sql", "-f", "json", ], ], ) finally: os.chdir(oldcwd) # Check that the full output parses as json parsed = json.loads(result.output) assert isinstance(parsed, list) assert len(parsed) == 1 first_file = parsed[0] assert isinstance(first_file, dict) # NOTE: Path translation for linux/windows. assert ( first_file["filepath"].replace("\\", "/") == "dbt_project/models/my_new_project/operator_errors.sql" ) assert len(first_file["violations"]) == 2 sqlfluff-2.3.5/pytest.ini000066400000000000000000000007031451700765000154010ustar00rootroot00000000000000[pytest] markers = dbt: Marks tests needing the "dbt" plugin (deselect with '-m "not dbt"'). integration: Marks tests outside of the core suite. parse_suite: Marks the suite of parsing tests across a range of dialects (part of integration). fix_suite: Marks the suite of fixing tests across a range of dialects (part of integration). rules_suite: Marks the suite of rules tests. Also known as the yaml tests (part of integration). sqlfluff-2.3.5/requirements.txt000066400000000000000000000023271451700765000166400ustar00rootroot00000000000000# Used for finding os-specific application config dirs appdirs # Cached property for performance gains # (use functools version for python >= 3.8) backports.cached-property; python_version < '3.8' # To get the encoding of files. chardet click colorama>=0.3 # Used for diffcover plugin diff-cover>=2.5.0 # importlib_metadata backport for python 3.7 # Require versions with .distributions https://github.com/sqlfluff/sqlfluff/issues/3763 importlib_metadata>=1.0.0; python_version < '3.8' Jinja2 # Used for .sqlfluffignore pathspec # We provide a testing library for plugins in sqlfluff.utils.testing pytest # We require pyyaml >= 5.1 so that we can preserve the ordering of keys. pyyaml>=5.1 # The new regex module to allow for more complex pattern matching, # whilst remaining backwards compatible with existing regex use cases. # e.g. capturing repeated groups in nested tsql block comments. # This was introduced in https://github.com/sqlfluff/sqlfluff/pull/2027 # and further details can be found in that PR. regex # For returning exceptions from multiprocessing.Pool.map() tblib # For parsing pyproject.toml toml; python_version < '3.11' # For handling progress bars tqdm # better type hints for older python versions typing_extensions sqlfluff-2.3.5/requirements_dev.txt000066400000000000000000000010321451700765000174660ustar00rootroot00000000000000# Install with -U to keep all requirements up-to-date # code linting and formatting flake8 flake8-docstrings pydocstyle!=6.2.0, !=6.2.1 # See: https://github.com/PyCQA/pydocstyle/issues/618 black>=22.1.0 flake8-black>=0.2.4 ruff import-linter # documentation checks doc8 Pygments # testing coverage>=6.4 hypothesis pytest pytest-cov pytest-xdist # MyPy mypy types-toml types-pkg_resources types-chardet types-appdirs types-colorama types-pyyaml types-Jinja2 types-regex # Requests is required for the util script requests ghapi yamllint sqlfluff-2.3.5/setup.cfg000066400000000000000000000141311451700765000151710ustar00rootroot00000000000000[metadata] name = sqlfluff version = 2.3.5 description = The SQL Linter for Humans long_description = file: README.md long_description_content_type = text/markdown url = https://github.com/sqlfluff/sqlfluff author = Alan Cruickshank author_email = alan@designingoverload.com license = MIT License license_files = LICENSE.md project_urls = Homepage = https://www.sqlfluff.com Documentation = https://docs.sqlfluff.com Changes = https://github.com/sqlfluff/sqlfluff/blob/main/CHANGELOG.md Source = https://github.com/sqlfluff/sqlfluff Issue Tracker = https://github.com/sqlfluff/sqlfluff/issues Twitter = https://twitter.com/SQLFluff Chat = https://github.com/sqlfluff/sqlfluff#sqlfluff-on-slack classifiers = Development Status :: 5 - Production/Stable Environment :: Console Intended Audience :: Developers License :: OSI Approved :: MIT License Operating System :: Unix Operating System :: POSIX Operating System :: MacOS Operating System :: Microsoft :: Windows Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: Implementation :: CPython Programming Language :: SQL Topic :: Utilities Topic :: Software Development :: Quality Assurance keywords = sqlfluff sql linter formatter athena bigquery clickhouse databricks db2 duckdb exasol greenplum hive materialize mysql postgres redshift snowflake soql sparksql sqlite teradata trino tsql dbt [options] package_dir = =src packages = find: python_requires = >=3.7 install_requires = # Used for finding os-specific application config dirs appdirs # Cached property for performance gains # (use functools version for python >= 3.8) backports.cached-property; python_version < '3.8' # To get the encoding of files. chardet click colorama>=0.3 # Used for diffcover plugin diff-cover>=2.5.0 # importlib_metadata backport for python 3.7 # Require versions with .distributions https://github.com/sqlfluff/sqlfluff/issues/3763 importlib_metadata>=1.0.0; python_version < '3.8' importlib_resources; python_version < '3.9' Jinja2 # Used for .sqlfluffignore pathspec # We provide a testing library for plugins in sqlfluff.utils.testing pytest # We require pyyaml >= 5.1 so that we can preserve the ordering of keys. pyyaml>=5.1 # The new regex module to allow for more complex pattern matching, # whilst remaining backwards compatible with existing regex use cases. # e.g. capturing repeated groups in nested tsql block comments. # This was introduced in https://github.com/sqlfluff/sqlfluff/pull/2027 # and further details can be found in that PR. regex # For returning exceptions from multiprocessing.Pool.map() tblib # For parsing pyproject.toml toml; python_version < '3.11' # For handling progress bars tqdm # better type hints for older python versions typing_extensions [options.packages.find] where = src [options.entry_points] console_scripts = sqlfluff = sqlfluff.cli.commands:cli diff_cover = sqlfluff = sqlfluff.diff_quality_plugin sqlfluff = sqlfluff = sqlfluff.core.plugin.lib # NOTE: We namespace the rules plugins with `rules`, because some # of them might later collide with other types of plugins. In particular # `tsql` may eventually refer to a dialect plugin and `jinja` may refer # to a templater plugin. sqlfluff_rules_capitalisation = sqlfluff.rules.capitalisation sqlfluff_rules_aliasing = sqlfluff.rules.aliasing sqlfluff_rules_layout = sqlfluff.rules.layout sqlfluff_rules_references = sqlfluff.rules.references sqlfluff_rules_ambiguous = sqlfluff.rules.ambiguous sqlfluff_rules_structure = sqlfluff.rules.structure sqlfluff_rules_convention = sqlfluff.rules.convention sqlfluff_rules_jinja = sqlfluff.rules.jinja sqlfluff_rules_tsql = sqlfluff.rules.tsql [options.package_data] sqlfluff = config.ini core/default_config.cfg py.typed [sqlfluff_docs] stable_version = 2.3.5 [importlinter] root_package = sqlfluff [importlinter:contract:core-dependencies] name = Forbid dependencies outside core type = forbidden source_modules = sqlfluff.core forbidden_modules = sqlfluff.api sqlfluff.cli sqlfluff.dialects sqlfluff.rules sqlfluff.utils [importlinter:contract:api-dependencies] name = API may not depend on CLI type = forbidden source_modules = sqlfluff.api forbidden_modules = sqlfluff.cli [importlinter:contract:helper-interdependence] name = Helper methods must be internally indepentent type = independence modules = sqlfluff.core.helpers.string sqlfluff.core.helpers.slice sqlfluff.core.helpers.dict [importlinter:contract:core-layers] name = Dependency layers within core # NOTE: Several modules within core currently have somewhat more # convoluted dependency loops, especially when it comes to type checking. # Those are currently excluded from this work, but might be picked up in # future work to help with better isolation. type = layers layers= # `linter` references many things, including rules. sqlfluff.core.linter # `rules` should be independent from linter, but can reference the others. sqlfluff.core.rules # `parser` should be independent of `rules` and `linter`. sqlfluff.core.parser # `errors` should be a utility library, which can be referenced by the others. sqlfluff.core.errors # `helpers` should be independent and not reference any of the above. sqlfluff.core.helpers ignore_imports = # `errors` references `rules` and `parser`, but only for type checking. # Splitting it up seems overkill for now, so an allowable exception. sqlfluff.core.errors -> sqlfluff.core.rules sqlfluff.core.errors -> sqlfluff.core.parser sqlfluff-2.3.5/setup.py000066400000000000000000000001461451700765000150630ustar00rootroot00000000000000#!/usr/bin/env python """The script for setting up sqlfluff.""" from setuptools import setup setup() sqlfluff-2.3.5/src/000077500000000000000000000000001451700765000141375ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/000077500000000000000000000000001451700765000157615ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/__init__.py000066400000000000000000000020311451700765000200660ustar00rootroot00000000000000"""Sqlfluff is a SQL linter for humans.""" import sys import pytest # Expose the public API. from sqlfluff.api import fix, lint, list_dialects, list_rules, parse # Import metadata (using importlib_metadata backport for python versions <3.8) if sys.version_info >= (3, 8): from importlib import metadata else: # pragma: no cover import importlib_metadata as metadata __all__ = ( "lint", "fix", "parse", "list_rules", "list_dialects", ) # Get the current version __version__ = metadata.version("sqlfluff") # Check major python version if sys.version_info[0] < 3: raise Exception("Sqlfluff does not support Python 2. Please upgrade to Python 3.") # Check minor python version elif sys.version_info[1] < 7: raise Exception( "Sqlfluff %s only supports Python 3.7 and beyond. " "Use an earlier version of sqlfluff or a later version of Python" % __version__ ) # Register helper functions to support variable introspection on failure. pytest.register_assert_rewrite("sqlfluff.utils.testing") sqlfluff-2.3.5/src/sqlfluff/__main__.py000066400000000000000000000002121451700765000200460ustar00rootroot00000000000000"""Export cli to __main__ for use like python -m sqlfluff.""" from sqlfluff.cli.commands import cli if __name__ == "__main__": cli() sqlfluff-2.3.5/src/sqlfluff/api/000077500000000000000000000000001451700765000165325ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/api/__init__.py000066400000000000000000000005101451700765000206370ustar00rootroot00000000000000"""Elements which wrap the sqlfluff core library for public use.""" # Expose the simple api from sqlfluff.api.info import list_dialects, list_rules from sqlfluff.api.simple import APIParsingError, fix, lint, parse __all__ = ( "lint", "fix", "parse", "APIParsingError", "list_rules", "list_dialects", ) sqlfluff-2.3.5/src/sqlfluff/api/info.py000066400000000000000000000007201451700765000200360ustar00rootroot00000000000000"""Information API.""" from typing import List from sqlfluff.core import Linter, dialect_readout from sqlfluff.core.dialects import DialectTuple from sqlfluff.core.linter import RuleTuple def list_rules() -> List[RuleTuple]: """Return a list of available rule tuples.""" linter = Linter() return linter.rule_tuples() def list_dialects() -> List[DialectTuple]: """Return a list of available dialect info.""" return list(dialect_readout()) sqlfluff-2.3.5/src/sqlfluff/api/simple.py000066400000000000000000000150451451700765000204020ustar00rootroot00000000000000"""The simple public API methods.""" from typing import Any, Dict, List, Optional from sqlfluff.core import ( FluffConfig, Linter, SQLBaseError, SQLFluffUserError, dialect_selector, ) def get_simple_config( dialect: Optional[str] = None, rules: Optional[List[str]] = None, exclude_rules: Optional[List[str]] = None, config_path: Optional[str] = None, ) -> FluffConfig: """Get a config object from simple API arguments.""" # Create overrides for simple API arguments. overrides = {} if dialect is not None: # Check the requested dialect exists and is valid. try: dialect_selector(dialect) except SQLFluffUserError as err: # pragma: no cover raise SQLFluffUserError(f"Error loading dialect '{dialect}': {str(err)}") except KeyError: raise SQLFluffUserError(f"Error: Unknown dialect '{dialect}'") overrides["dialect"] = dialect if rules is not None: overrides["rules"] = ",".join(rules) if exclude_rules is not None: overrides["exclude_rules"] = ",".join(exclude_rules) # Instantiate a config object. try: return FluffConfig.from_root( extra_config_path=config_path, ignore_local_config=True, overrides=overrides, ) except SQLFluffUserError as err: # pragma: no cover raise SQLFluffUserError(f"Error loading config: {str(err)}") class APIParsingError(ValueError): """An exception which holds a set of violations.""" def __init__(self, violations: List[SQLBaseError], *args: Any): self.violations = violations self.msg = f"Found {len(violations)} issues while parsing string." for viol in violations: self.msg += f"\n{viol!s}" super().__init__(self.msg, *args) def lint( sql: str, dialect: str = "ansi", rules: Optional[List[str]] = None, exclude_rules: Optional[List[str]] = None, config: Optional[FluffConfig] = None, config_path: Optional[str] = None, ) -> List[Dict[str, Any]]: """Lint a SQL string. Args: sql (:obj:`str`): The SQL to be linted. dialect (:obj:`str`, optional): A reference to the dialect of the SQL to be linted. Defaults to `ansi`. rules (:obj:`Optional[List[str]`, optional): A list of rule references to lint for. Defaults to None. exclude_rules (:obj:`Optional[List[str]`, optional): A list of rule references to avoid linting for. Defaults to None. config (:obj:`Optional[FluffConfig]`, optional): A configuration object to use for the operation. Defaults to None. config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config, which is only used if a `config` is not already provided. Defaults to None. Returns: :obj:`List[Dict[str, Any]]` for each violation found. """ cfg = config or get_simple_config( dialect=dialect, rules=rules, exclude_rules=exclude_rules, config_path=config_path, ) linter = Linter(config=cfg) result = linter.lint_string_wrapped(sql) result_records = result.as_records() # Return just the violations for this file return [] if not result_records else result_records[0]["violations"] def fix( sql: str, dialect: str = "ansi", rules: Optional[List[str]] = None, exclude_rules: Optional[List[str]] = None, config: Optional[FluffConfig] = None, config_path: Optional[str] = None, fix_even_unparsable: Optional[bool] = None, ) -> str: """Fix a SQL string. Args: sql (:obj:`str`): The SQL to be fixed. dialect (:obj:`str`, optional): A reference to the dialect of the SQL to be fixed. Defaults to `ansi`. rules (:obj:`Optional[List[str]`, optional): A subset of rule references to fix for. Defaults to None. exclude_rules (:obj:`Optional[List[str]`, optional): A subset of rule references to avoid fixing for. Defaults to None. config (:obj:`Optional[FluffConfig]`, optional): A configuration object to use for the operation. Defaults to None. config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config, which is only used if a `config` is not already provided. Defaults to None. fix_even_unparsable (:obj:`bool`, optional): Optional override for the corresponding SQLFluff configuration value. Returns: :obj:`str` for the fixed SQL if possible. """ cfg = config or get_simple_config( dialect=dialect, rules=rules, exclude_rules=exclude_rules, config_path=config_path, ) linter = Linter(config=cfg) result = linter.lint_string_wrapped(sql, fix=True) if fix_even_unparsable is None: fix_even_unparsable = cfg.get("fix_even_unparsable") should_fix = True if not fix_even_unparsable: # If fix_even_unparsable wasn't set, check for templating or parse # errors and suppress fixing if there were any. _, num_filtered_errors = result.count_tmp_prs_errors() if num_filtered_errors > 0: should_fix = False if should_fix: sql = result.paths[0].files[0].fix_string()[0] return sql def parse( sql: str, dialect: str = "ansi", config: Optional[FluffConfig] = None, config_path: Optional[str] = None, ) -> Dict[str, Any]: """Parse a SQL string. Args: sql (:obj:`str`): The SQL to be parsed. dialect (:obj:`str`, optional): A reference to the dialect of the SQL to be parsed. Defaults to `ansi`. config (:obj:`Optional[FluffConfig]`, optional): A configuration object to use for the operation. Defaults to None. config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config, which is only used if a `config` is not already provided. Defaults to None. Returns: :obj:`Dict[str, Any]` JSON containing the parsed structure. """ cfg = config or get_simple_config( dialect=dialect, config_path=config_path, ) linter = Linter(config=cfg) parsed = linter.parse_string(sql) # If we encounter any parsing errors, raise them in a combined issue. if parsed.violations: raise APIParsingError(parsed.violations) # Return a JSON representation of the parse tree. if parsed.tree is None: # pragma: no cover return {} record = parsed.tree.as_record(show_raw=True) assert record return record sqlfluff-2.3.5/src/sqlfluff/cli/000077500000000000000000000000001451700765000165305ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/cli/__init__.py000066400000000000000000000001071451700765000206370ustar00rootroot00000000000000"""init py for cli.""" EXIT_SUCCESS = 0 EXIT_FAIL = 1 EXIT_ERROR = 2 sqlfluff-2.3.5/src/sqlfluff/cli/autocomplete.py000066400000000000000000000022631451700765000216060ustar00rootroot00000000000000"""autocompletion commands.""" from typing import List from sqlfluff import list_dialects # Older versions of click don't have shell completion # so handle for now, as version 8 still fairly recent # See: https://github.com/sqlfluff/sqlfluff/issues/2543 shell_completion_enabled = True try: from click.shell_completion import CompletionItem except ImportError: # pragma: no cover # In older versions don't enable completion. # We don't force newer versions of click however. # See: https://github.com/sqlfluff/sqlfluff/issues/2543 shell_completion_enabled = False # NOTE: Important that we refer to the "CompletionItem" type # as a string rather than a direct reference so that we don't # get import errors when running with older versions of click. def dialect_shell_complete(ctx, param, incomplete) -> List["CompletionItem"]: """Shell completion for possible dialect names. We use this over click.Choice as we want to internally handle error messages and codes for incorrect/outdated dialects. """ dialect_names = [e.label for e in list_dialects()] return [ CompletionItem(name) for name in dialect_names if name.startswith(incomplete) ] sqlfluff-2.3.5/src/sqlfluff/cli/click_deprecated_option.py000066400000000000000000000070501451700765000237410ustar00rootroot00000000000000"""Allows to provide deprecated options for click's command.""" from typing import Any, Callable import click from click import Context, OptionParser, echo, style from click.parser import Option, ParsingState class DeprecatedOption(click.Option): """Allows to provide deprecated options for click's command. Works with `DeprecatedOptionsCommand` (see below). Expects to be provided into standard `@click.option` with: * two parameter declarations arguments - old one (deprecated) and new one (preferred); * `cls` parameter (standard click Option) as `cls=DeprecatedOption`; * `deprecated` parameter - which says which ones are deprecated, like`deprecated=["--disable_progress_bar"]1. This is based on * https://stackoverflow.com/a/50402799/5172513 It's somewhat hackish and may broke when click internals are changed, it is even mentioned in SO: > This code reaches into some private structures in the parser, but this is unlikely to be an issue. This parser code was last changed 4 years ago. The parser code is unlikely to undergo significant revisions. Hopefully will be removed when * https://github.com/pallets/click/issues/2263 is finished. """ def __init__(self, *args, **kwargs) -> None: self.deprecated = kwargs.pop("deprecated", ()) self.preferred = args[0][-1] super().__init__(*args, **kwargs) class DeprecatedOptionsCommand(click.Command): """Allows to provide deprecated options for click's command. Works with `DeprecatedOption` (see above). Expects to be provided into standard `@click.command` as: * `@cli.command(cls=DeprecatedOptionsCommand)` """ def make_parser(self, ctx: Context) -> OptionParser: """Hook 'make_parser' and during processing check the name. Used to invoke the option to see if it is preferred. """ parser: OptionParser = super().make_parser(ctx) # get the parser options options = set(parser._short_opt.values()) options |= set(parser._long_opt.values()) for option in options: if not isinstance(option.obj, DeprecatedOption): continue option.process = self._make_process(option) # type: ignore return parser def _make_process(self, an_option: Option) -> Callable: """Construct a closure to the parser option processor.""" orig_process: Callable = an_option.process deprecated = getattr(an_option.obj, "deprecated", None) preferred = getattr(an_option.obj, "preferred", None) if not deprecated: raise ValueError( f"Expected `deprecated` value for `{an_option.obj.name!r}`" ) def process(value: Any, state: ParsingState) -> None: """Custom process method. The function above us on the stack used 'opt' to pick option from a dict, see if it is deprecated. """ # reach up the stack and get 'opt' import inspect frame = inspect.currentframe() try: opt = frame.f_back.f_locals.get("opt") # type: ignore finally: del frame assert deprecated if opt in deprecated: msg = ( f"DeprecationWarning: The option {opt!r} is deprecated, " f"use {preferred!r}." ) echo(style(msg, fg="red"), err=True) return orig_process(value, state) return process sqlfluff-2.3.5/src/sqlfluff/cli/commands.py000066400000000000000000001310301451700765000207010ustar00rootroot00000000000000"""Contains the CLI.""" import json import logging import os import sys import time from itertools import chain from logging import LogRecord from typing import Callable, Optional, Tuple import click # To enable colour cross platform import colorama import yaml from tqdm import tqdm from sqlfluff.cli import EXIT_ERROR, EXIT_FAIL, EXIT_SUCCESS from sqlfluff.cli.autocomplete import dialect_shell_complete, shell_completion_enabled from sqlfluff.cli.click_deprecated_option import ( DeprecatedOption, DeprecatedOptionsCommand, ) from sqlfluff.cli.formatters import ( OutputStreamFormatter, format_linting_result_header, ) from sqlfluff.cli.helpers import LazySequence, get_package_version from sqlfluff.cli.outputstream import OutputStream, make_output_stream # Import from sqlfluff core. from sqlfluff.core import ( FluffConfig, Linter, SQLFluffUserError, SQLLintError, SQLTemplaterError, dialect_readout, dialect_selector, ) from sqlfluff.core.config import progress_bar_configuration from sqlfluff.core.enums import Color, FormatType from sqlfluff.core.linter import LintingResult from sqlfluff.core.plugin.host import get_plugin_manager class StreamHandlerTqdm(logging.StreamHandler): """Modified StreamHandler which takes care of writing within `tqdm` context. It uses `tqdm` write which takes care of conflicting prints with progressbar. Without it, there were left artifacts in DEBUG mode (not sure about another ones, but probably would happen somewhere). """ def emit(self, record: LogRecord) -> None: """Behaves like original one except uses `tqdm` to write.""" try: msg = self.format(record) tqdm.write(msg, file=self.stream) self.flush() except Exception: # pragma: no cover self.handleError(record) def set_logging_level( verbosity: int, formatter: OutputStreamFormatter, logger: Optional[logging.Logger] = None, stderr_output: bool = False, ) -> None: """Set up logging for the CLI. We either set up global logging based on the verbosity or, if `logger` is specified, we only limit to a single sqlfluff logger. Verbosity is applied in the same way. Implementation: If `logger` is not specified, the handler is attached to the `sqlfluff` logger. If it is specified then it attaches the the logger in question. In addition if `logger` is specified, then that logger will also not propagate. """ fluff_logger = logging.getLogger("sqlfluff") # Don't propagate logging fluff_logger.propagate = False # Enable colorama colorama.init() # Set up the log handler which is able to print messages without overlapping # with progressbars. handler = StreamHandlerTqdm(stream=sys.stderr if stderr_output else sys.stdout) # NB: the unicode character at the beginning is to squash any badly # tamed ANSI colour statements, and return us to normality. handler.setFormatter(logging.Formatter("\u001b[0m%(levelname)-10s %(message)s")) # Set up a handler to colour warnings red. # See: https://docs.python.org/3/library/logging.html#filter-objects def red_log_filter(record: logging.LogRecord) -> bool: if record.levelno >= logging.WARNING: record.msg = f"{formatter.colorize(record.msg, Color.red)} " return True handler.addFilter(red_log_filter) if logger: focus_logger = logging.getLogger(f"sqlfluff.{logger}") focus_logger.addHandler(handler) else: fluff_logger.addHandler(handler) # NB: We treat the parser logger slightly differently because it's noisier. # It's important that we set levels for all each time so # that we don't break tests by changing the granularity # between tests. parser_logger = logging.getLogger("sqlfluff.parser") if verbosity < 3: fluff_logger.setLevel(logging.WARNING) parser_logger.setLevel(logging.NOTSET) elif verbosity == 3: fluff_logger.setLevel(logging.INFO) parser_logger.setLevel(logging.WARNING) elif verbosity == 4: fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.INFO) elif verbosity > 4: fluff_logger.setLevel(logging.DEBUG) parser_logger.setLevel(logging.DEBUG) class PathAndUserErrorHandler: """Make an API call but with error handling for the CLI.""" def __init__(self, formatter) -> None: self.formatter = formatter def __enter__(self) -> "PathAndUserErrorHandler": return self def __exit__(self, exc_type, exc_val, exc_tb) -> None: if exc_type is SQLFluffUserError: click.echo( "\nUser Error: " + self.formatter.colorize( str(exc_val), Color.red, ) ) sys.exit(EXIT_ERROR) def common_options(f: Callable) -> Callable: """Add common options to commands via a decorator. These are applied to all of the cli commands. """ f = click.version_option()(f) f = click.option( "-v", "--verbose", count=True, default=None, help=( "Verbosity, how detailed should the output be. This is *stackable*, so " "`-vv` is more verbose than `-v`. For the most verbose option try `-vvvv` " "or `-vvvvv`." ), )(f) f = click.option( "-n", "--nocolor", is_flag=True, default=None, help="No color - output will be without ANSI color codes.", )(f) return f def core_options(f: Callable) -> Callable: """Add core operation options to commands via a decorator. These are applied to the main (but not all) cli commands like `parse`, `lint` and `fix`. """ # Only enable dialect completion if on version of click # that supports it if shell_completion_enabled: f = click.option( "-d", "--dialect", default=None, help="The dialect of SQL to lint", shell_complete=dialect_shell_complete, )(f) else: # pragma: no cover f = click.option( "-d", "--dialect", default=None, help="The dialect of SQL to lint", )(f) f = click.option( "-t", "--templater", default=None, help="The templater to use (default=jinja)", type=click.Choice( # Use LazySequence so that we don't load templaters until required. LazySequence( lambda: [ templater.name for templater in chain.from_iterable( get_plugin_manager().hook.get_templaters() ) ] ) ), )(f) f = click.option( "-r", "--rules", default=None, help=( "Narrow the search to only specific rules. For example " "specifying `--rules LT01` will only search for rule `LT01` (Unnecessary " "trailing whitespace). Multiple rules can be specified with commas e.g. " "`--rules LT01,LT02` will specify only looking for violations of rule " "`LT01` and rule `LT02`." ), )(f) f = click.option( "-e", "--exclude-rules", default=None, help=( "Exclude specific rules. For example " "specifying `--exclude-rules LT01` will remove rule `LT01` (Unnecessary " "trailing whitespace) from the set of considered rules. This could either " "be the allowlist, or the general set if there is no specific allowlist. " "Multiple rules can be specified with commas e.g. " "`--exclude-rules LT01,LT02` will exclude violations of rule " "`LT01` and rule `LT02`." ), )(f) f = click.option( "--config", "extra_config_path", default=None, help=( "Include additional config file. By default the config is generated " "from the standard configuration files described in the documentation. " "This argument allows you to specify an additional configuration file that " "overrides the standard configuration files. N.B. cfg format is required." ), type=click.Path(), )(f) f = click.option( "--ignore-local-config", is_flag=True, help=( "Ignore config files in default search path locations. " "This option allows the user to lint with the default config " "or can be used in conjunction with --config to only " "reference the custom config file." ), )(f) f = click.option( "--encoding", default=None, help=( "Specify encoding to use when reading and writing files. Defaults to " "autodetect." ), )(f) f = click.option( "-i", "--ignore", default=None, help=( "Ignore particular families of errors so that they don't cause a failed " "run. For example `--ignore parsing` would mean that any parsing errors " "are ignored and don't influence the success or fail of a run. " "`--ignore` behaves somewhat like `noqa` comments, except it " "applies globally. Multiple options are possible if comma separated: " "e.g. `--ignore parsing,templating`." ), )(f) f = click.option( "--bench", is_flag=True, help="Set this flag to engage the benchmarking tool output.", )(f) f = click.option( "--logger", type=click.Choice( ["templater", "lexer", "parser", "linter", "rules", "config"], case_sensitive=False, ), help="Choose to limit the logging to one of the loggers.", )(f) f = click.option( "--disable-noqa", is_flag=True, default=None, help="Set this flag to ignore inline noqa comments.", )(f) f = click.option( "--library-path", default=None, help=( "Override the `library_path` value from the [sqlfluff:templater:jinja]" " configuration value. Set this to 'none' to disable entirely." " This overrides any values set by users in configuration files or" " inline directives." ), )(f) return f def lint_options(f: Callable) -> Callable: """Add lint operation options to commands via a decorator. These are cli commands that do linting, i.e. `lint` and `fix`. """ f = click.option( "-p", "--processes", type=int, default=None, help=( "The number of parallel processes to run. Positive numbers work as " "expected. Zero and negative numbers will work as number_of_cpus - " "number. e.g -1 means all cpus except one. 0 means all cpus." ), )(f) f = click.option( "--disable_progress_bar", "--disable-progress-bar", is_flag=True, help="Disables progress bars.", cls=DeprecatedOption, deprecated=["--disable_progress_bar"], )(f) f = click.option( "--persist-timing", default=None, help=( "A filename to persist the timing information for a linting run to " "in csv format for external analysis. NOTE: This feature should be " "treated as beta, and the format of the csv file may change in " "future releases without warning." ), )(f) f = click.option( "--warn-unused-ignores", is_flag=True, default=False, help="Warn about unneeded '-- noqa:' comments.", )(f) return f def get_config( extra_config_path: Optional[str] = None, ignore_local_config: bool = False, **kwargs, ) -> FluffConfig: """Get a config object from kwargs.""" plain_output = OutputStreamFormatter.should_produce_plain_output(kwargs["nocolor"]) if kwargs.get("dialect"): try: # We're just making sure it exists at this stage. # It will be fetched properly in the linter. dialect_selector(kwargs["dialect"]) except SQLFluffUserError as err: click.echo( OutputStreamFormatter.colorize_helper( plain_output, f"Error loading dialect '{kwargs['dialect']}': {str(err)}", color=Color.red, ) ) sys.exit(EXIT_ERROR) except KeyError: click.echo( OutputStreamFormatter.colorize_helper( plain_output, f"Error: Unknown dialect '{kwargs['dialect']}'", color=Color.red, ) ) sys.exit(EXIT_ERROR) from_root_kwargs = {} if "require_dialect" in kwargs: from_root_kwargs["require_dialect"] = kwargs.pop("require_dialect") library_path = kwargs.pop("library_path", None) if not kwargs.get("warn_unused_ignores", True): # If it's present AND True, then keep it, otherwise remove this so # that we default to the root config. del kwargs["warn_unused_ignores"] # Instantiate a config object (filtering out the nulls) overrides = {k: kwargs[k] for k in kwargs if kwargs[k] is not None} if library_path is not None: # Check for a null value if library_path.lower() == "none": library_path = None # Set an explicit None value. # Set the global override overrides["library_path"] = library_path try: return FluffConfig.from_root( extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, overrides=overrides, **from_root_kwargs, ) except SQLFluffUserError as err: # pragma: no cover click.echo( OutputStreamFormatter.colorize_helper( plain_output, f"Error loading config: {str(err)}", color=Color.red, ) ) sys.exit(EXIT_ERROR) def get_linter_and_formatter( cfg: FluffConfig, output_stream: Optional[OutputStream] = None ) -> Tuple[Linter, OutputStreamFormatter]: """Get a linter object given a config.""" try: # We're just making sure it exists at this stage. # It will be fetched properly in the linter. dialect = cfg.get("dialect") if dialect: dialect_selector(dialect) except KeyError: # pragma: no cover click.echo(f"Error: Unknown dialect '{cfg.get('dialect')}'") sys.exit(EXIT_ERROR) formatter = OutputStreamFormatter( output_stream=output_stream or make_output_stream(cfg), nocolor=cfg.get("nocolor"), verbosity=cfg.get("verbose"), output_line_length=cfg.get("output_line_length"), ) return Linter(config=cfg, formatter=formatter), formatter @click.group( context_settings={"help_option_names": ["-h", "--help"]}, epilog="""\b\bExamples:\n sqlfluff lint --dialect postgres .\n sqlfluff lint --dialect postgres --rules ST05 .\n sqlfluff fix --dialect sqlite --rules LT10,ST05 src/queries\n sqlfluff parse --dialect sqlite --templater jinja src/queries/common.sql """, ) @click.version_option() def cli() -> None: """SQLFluff is a modular SQL linter for humans.""" # noqa D403 @cli.command() @common_options def version(**kwargs) -> None: """Show the version of sqlfluff.""" c = get_config(**kwargs, require_dialect=False) if c.get("verbose") > 0: # Instantiate the linter lnt, formatter = get_linter_and_formatter(c) # Dispatch the detailed config from the linter. formatter.dispatch_config(lnt) else: # Otherwise just output the package version. click.echo(get_package_version(), color=c.get("color")) @cli.command() @common_options def rules(**kwargs) -> None: """Show the current rules in use.""" c = get_config(**kwargs, dialect="ansi") lnt, formatter = get_linter_and_formatter(c) try: click.echo(formatter.format_rules(lnt), color=c.get("color")) # No cover for clause covering poorly formatted rules. # Without creating a poorly formed plugin, these are hard to # test. except (SQLFluffUserError, AssertionError) as err: # pragma: no cover click.echo( OutputStreamFormatter.colorize_helper( c.get("color"), f"Error loading rules: {str(err)}", color=Color.red, ) ) sys.exit(EXIT_ERROR) @cli.command() @common_options def dialects(**kwargs) -> None: """Show the current dialects available.""" c = get_config(**kwargs, require_dialect=False) _, formatter = get_linter_and_formatter(c) click.echo(formatter.format_dialects(dialect_readout), color=c.get("color")) def dump_file_payload(filename: Optional[str], payload: str) -> None: """Write the output file content to stdout or file.""" # If there's a file specified to write to, write to it. if filename: with open(filename, "w") as out_file: out_file.write(payload) # Otherwise write to stdout else: click.echo(payload) @cli.command(cls=DeprecatedOptionsCommand) @common_options @core_options @lint_options @click.option( "-f", "--format", "format", default="human", type=click.Choice([ft.value for ft in FormatType], case_sensitive=False), help="What format to return the lint result in (default=human).", ) @click.option( "--write-output", help=( "Optionally provide a filename to write the results to, mostly used in " "tandem with --format. NB: Setting an output file re-enables normal " "stdout logging." ), ) @click.option( "--annotation-level", default="notice", type=click.Choice(["notice", "warning", "failure", "error"], case_sensitive=False), help=( "When format is set to github-annotation or github-annotation-native, " "default annotation level (default=notice). failure and error are equivalent." ), ) @click.option( "--nofail", is_flag=True, help=( "If set, the exit code will always be zero, regardless of violations " "found. This is potentially useful during rollout." ), ) @click.option( "--disregard-sqlfluffignores", is_flag=True, help="Perform the operation regardless of .sqlfluffignore configurations", ) @click.argument("paths", nargs=-1, type=click.Path(allow_dash=True)) def lint( paths: Tuple[str], format: str, write_output: Optional[str], annotation_level: str, nofail: bool, disregard_sqlfluffignores: bool, logger: Optional[logging.Logger] = None, bench: bool = False, processes: Optional[int] = None, disable_progress_bar: Optional[bool] = False, persist_timing: Optional[str] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, **kwargs, ) -> None: """Lint SQL files via passing a list of files or using stdin. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. Linting SQL files: sqlfluff lint path/to/file.sql sqlfluff lint directory/of/sql/files Linting a file via stdin (note the lone '-' character): cat path/to/file.sql | sqlfluff lint - echo 'select col from tbl' | sqlfluff lint - """ config = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) non_human_output = (format != FormatType.human.value) or (write_output is not None) file_output = None output_stream = make_output_stream(config, format, write_output) lnt, formatter = get_linter_and_formatter(config, output_stream) verbose = config.get("verbose") progress_bar_configuration.disable_progress_bar = disable_progress_bar formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=non_human_output, ) # Output the results as we go if verbose >= 1 and not non_human_output: click.echo(format_linting_result_header()) with PathAndUserErrorHandler(formatter): # add stdin if specified via lone '-' if ("-",) == paths: result = lnt.lint_string_wrapped(sys.stdin.read(), fname="stdin") else: result = lnt.lint_paths( paths, ignore_non_existent_files=False, ignore_files=not disregard_sqlfluffignores, processes=processes, ) # Output the final stats if verbose >= 1 and not non_human_output: click.echo(formatter.format_linting_stats(result, verbose=verbose)) if format == FormatType.json.value: file_output = json.dumps(result.as_records()) elif format == FormatType.yaml.value: file_output = yaml.dump(result.as_records(), sort_keys=False) elif format == FormatType.none.value: file_output = "" elif format == FormatType.github_annotation.value: if annotation_level == "error": annotation_level = "failure" github_result = [] for record in result.as_records(): filepath = record["filepath"] for violation in record["violations"]: # NOTE: The output format is designed for this GitHub action: # https://github.com/yuzutech/annotations-action # It is similar, but not identical, to the native GitHub format: # https://docs.github.com/en/rest/reference/checks#annotations-items github_result.append( { "file": filepath, "line": violation["line_no"], "start_column": violation["line_pos"], "end_column": violation["line_pos"], "title": "SQLFluff", "message": f"{violation['code']}: {violation['description']}", "annotation_level": annotation_level, } ) file_output = json.dumps(github_result) elif format == FormatType.github_annotation_native.value: if annotation_level == "failure": annotation_level = "error" github_result_native = [] for record in result.as_records(): filepath = record["filepath"] for violation in record["violations"]: # NOTE: The output format is designed for GitHub action: # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-notice-message line = f"::{annotation_level} " line += "title=SQLFluff," line += f"file={filepath}," line += f"line={violation['line_no']}," line += f"col={violation['line_pos']}" line += "::" line += f"{violation['code']}: {violation['description']}" if violation["name"]: line += f" [{violation['name']}]" github_result_native.append(line) file_output = "\n".join(github_result_native) if file_output: dump_file_payload(write_output, file_output) if persist_timing: result.persist_timing_records(persist_timing) output_stream.close() if bench: click.echo("==== overall timings ====") click.echo(formatter.cli_table([("Clock time", result.total_time)])) timing_summary = result.timing_summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo( formatter.cli_table(timing_summary[step].items(), cols=3, col_width=20) ) if not nofail: if not non_human_output: formatter.completion_message() sys.exit(result.stats(EXIT_FAIL, EXIT_SUCCESS)["exit code"]) else: sys.exit(EXIT_SUCCESS) def do_fixes( result: LintingResult, formatter: Optional[OutputStreamFormatter] = None, fixed_file_suffix: str = "", ) -> bool: """Actually do the fixes.""" if formatter and formatter.verbosity >= 0: click.echo("Persisting Changes...") res = result.persist_changes( formatter=formatter, fixed_file_suffix=fixed_file_suffix ) if all(res.values()): if formatter and formatter.verbosity >= 0: click.echo("Done. Please check your files to confirm.") return True # If some failed then return false click.echo( "Done. Some operations failed. Please check your files to confirm." ) # pragma: no cover click.echo( "Some errors cannot be fixed or there is another error blocking it." ) # pragma: no cover return False # pragma: no cover def _stdin_fix(linter: Linter, formatter, fix_even_unparsable: bool) -> None: """Handle fixing from stdin.""" exit_code = EXIT_SUCCESS stdin = sys.stdin.read() result = linter.lint_string_wrapped(stdin, fname="stdin", fix=True) templater_error = result.num_violations(types=SQLTemplaterError) > 0 unfixable_error = result.num_violations(types=SQLLintError, fixable=False) > 0 if not fix_even_unparsable: exit_code = formatter.handle_files_with_tmp_or_prs_errors(result) if result.num_violations(types=SQLLintError, fixable=True) > 0: stdout = result.paths[0].files[0].fix_string()[0] else: stdout = stdin if templater_error: click.echo( formatter.colorize( "Fix aborted due to unparsable template variables.", Color.red, ), err=True, ) click.echo( formatter.colorize( "Use --FIX-EVEN-UNPARSABLE' to attempt to fix the SQL anyway.", Color.red, ), err=True, ) if unfixable_error: click.echo( formatter.colorize("Unfixable violations detected.", Color.red), err=True, ) click.echo(stdout, nl=False) sys.exit(EXIT_FAIL if templater_error or unfixable_error else exit_code) def _paths_fix( linter: Linter, formatter, paths, processes, fix_even_unparsable, force, fixed_suffix, bench, show_lint_violations, warn_force: bool = True, persist_timing: Optional[str] = None, ) -> None: """Handle fixing from paths.""" # Lint the paths (not with the fix argument at this stage), outputting as we go. if formatter.verbosity >= 0: click.echo("==== finding fixable violations ====") exit_code = EXIT_SUCCESS if force and warn_force and formatter.verbosity >= 0: click.echo( f"{formatter.colorize('FORCE MODE', Color.red)}: " "Attempting fixes..." ) with PathAndUserErrorHandler(formatter): result: LintingResult = linter.lint_paths( paths, fix=True, ignore_non_existent_files=False, processes=processes, # If --force is set, then apply the changes as we go rather # than waiting until the end. apply_fixes=force, fixed_file_suffix=fixed_suffix, fix_even_unparsable=fix_even_unparsable, ) if not fix_even_unparsable: exit_code = formatter.handle_files_with_tmp_or_prs_errors(result) # NB: We filter to linting violations here, because they're # the only ones which can be potentially fixed. num_fixable = result.num_violations(types=SQLLintError, fixable=True) if num_fixable > 0: if not force and formatter.verbosity >= 0: click.echo("==== fixing violations ====") click.echo(f"{num_fixable} " "fixable linting violations found") if not force: click.echo( "Are you sure you wish to attempt to fix these? [Y/n] ", nl=False ) c = click.getchar().lower() click.echo("...") if c in ("y", "\r", "\n"): if formatter.verbosity >= 0: click.echo("Attempting fixes...") success = do_fixes( result, formatter, fixed_file_suffix=fixed_suffix, ) if not success: sys.exit(EXIT_FAIL) # pragma: no cover else: formatter.completion_message() elif c == "n": click.echo("Aborting...") exit_code = EXIT_FAIL else: # pragma: no cover click.echo("Invalid input, please enter 'Y' or 'N'") click.echo("Aborting...") exit_code = EXIT_FAIL else: if formatter.verbosity >= 0: click.echo("==== no fixable linting violations found ====") formatter.completion_message() error_types = [ ( dict(types=SQLLintError, fixable=False), " [{} unfixable linting violations found]", EXIT_FAIL, ), ] for num_violations_kwargs, message_format, error_level in error_types: num_violations = result.num_violations(**num_violations_kwargs) if num_violations > 0 and formatter.verbosity >= 0: click.echo(message_format.format(num_violations)) exit_code = max(exit_code, error_level) if bench: click.echo("==== overall timings ====") click.echo(formatter.cli_table([("Clock time", result.total_time)])) timing_summary = result.timing_summary() for step in timing_summary: click.echo(f"=== {step} ===") click.echo( formatter.cli_table(timing_summary[step].items(), cols=3, col_width=20) ) if show_lint_violations: click.echo("==== lint for unfixable violations ====") all_results = result.violation_dict(**num_violations_kwargs) sorted_files = sorted(all_results.keys()) for file in sorted_files: violations = all_results.get(file, []) click.echo(formatter.format_filename(file, success=(not violations))) for violation in violations: click.echo(formatter.format_violation(violation)) if persist_timing: result.persist_timing_records(persist_timing) sys.exit(exit_code) @cli.command(cls=DeprecatedOptionsCommand) @common_options @core_options @lint_options @click.option( "-f", "--force", is_flag=True, help=( "Skip the confirmation prompt and go straight to applying " "fixes. Fixes will also be applied file by file, during the " "linting process, rather than waiting until all files are " "linted before fixing. **Use this with caution.**" ), ) @click.option( "-q", "--quiet", is_flag=True, help=( "Reduces the amount of output to stdout to a minimal level. " "This is effectively the opposite of -v. NOTE: It will only " "take effect if -f/--force is also set." ), ) @click.option( "-x", "--fixed-suffix", default=None, help="An optional suffix to add to fixed files.", ) @click.option( "--FIX-EVEN-UNPARSABLE", is_flag=True, default=None, help=( "Enables fixing of files that have templating or parse errors. " "Note that the similar-sounding '--ignore' or 'noqa' features merely " "prevent errors from being *displayed*. For safety reasons, the 'fix'" "command will not make any fixes in files that have templating or parse " "errors unless '--FIX-EVEN-UNPARSABLE' is enabled on the command line" "or in the .sqlfluff config file." ), ) @click.option( "--show-lint-violations", is_flag=True, help="Show lint violations", ) @click.argument("paths", nargs=-1, type=click.Path(allow_dash=True)) def fix( force: bool, paths: Tuple[str], bench: bool = False, quiet: bool = False, fixed_suffix: str = "", logger: Optional[logging.Logger] = None, processes: Optional[int] = None, disable_progress_bar: Optional[bool] = False, persist_timing: Optional[str] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, show_lint_violations: bool = False, **kwargs, ) -> None: """Fix SQL files. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ # some quick checks fixing_stdin = ("-",) == paths if quiet: if kwargs["verbose"]: click.echo( "ERROR: The --quiet flag can only be used if --verbose is not set.", ) sys.exit(EXIT_ERROR) kwargs["verbose"] = -1 config = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) fix_even_unparsable = config.get("fix_even_unparsable") output_stream = make_output_stream( config, None, os.devnull if fixing_stdin else None ) lnt, formatter = get_linter_and_formatter(config, output_stream) verbose = config.get("verbose") progress_bar_configuration.disable_progress_bar = disable_progress_bar formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=fixing_stdin, ) # handle stdin case. should output formatted sql to stdout and nothing else. if fixing_stdin: _stdin_fix(lnt, formatter, fix_even_unparsable) else: _paths_fix( lnt, formatter, paths, processes, fix_even_unparsable, force, fixed_suffix, bench, show_lint_violations, persist_timing=persist_timing, ) @cli.command(name="format", cls=DeprecatedOptionsCommand) @common_options @core_options @lint_options @click.option( "-x", "--fixed-suffix", default=None, help="An optional suffix to add to fixed files.", ) @click.argument("paths", nargs=-1, type=click.Path(allow_dash=True)) def cli_format( paths: Tuple[str], bench: bool = False, fixed_suffix: str = "", logger: Optional[logging.Logger] = None, processes: Optional[int] = None, disable_progress_bar: Optional[bool] = False, persist_timing: Optional[str] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, **kwargs, ) -> None: """Autoformat SQL files. This effectively force applies `sqlfluff fix` with a known subset of fairly stable rules. Enabled rules are ignored, but rule exclusions (via CLI) or config are still respected. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ # some quick checks fixing_stdin = ("-",) == paths if kwargs.get("rules"): click.echo( "Specifying rules is not supported for sqlfluff format.", ) sys.exit(EXIT_ERROR) # Override rules for sqlfluff format kwargs["rules"] = ( # All of the capitalisation rules "capitalisation," # All of the layout rules "layout," # Safe rules from other groups "ambiguous.union," "convention.not_equal," "convention.coalesce," "convention.select_trailing_comma," "convention.is_null," "jinja.padding," "structure.distinct," ) config = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) output_stream = make_output_stream( config, None, os.devnull if fixing_stdin else None ) lnt, formatter = get_linter_and_formatter(config, output_stream) verbose = config.get("verbose") progress_bar_configuration.disable_progress_bar = disable_progress_bar formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=fixing_stdin, ) # handle stdin case. should output formatted sql to stdout and nothing else. if fixing_stdin: _stdin_fix(lnt, formatter, fix_even_unparsable=False) else: _paths_fix( lnt, formatter, paths, processes, fix_even_unparsable=False, force=True, # Always force in format mode. fixed_suffix=fixed_suffix, bench=bench, show_lint_violations=False, warn_force=False, # don't warn about being in force mode. persist_timing=persist_timing, ) def quoted_presenter(dumper, data): """Re-presenter which always double quotes string values needing escapes.""" if "\n" in data or "\t" in data or "'" in data: return dumper.represent_scalar("tag:yaml.org,2002:str", data, style='"') else: return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="") @cli.command() @common_options @core_options @click.argument("path", nargs=1, type=click.Path(allow_dash=True)) @click.option( "-c", "--code-only", is_flag=True, help="Output only the code elements of the parse tree.", ) @click.option( "-m", "--include-meta", is_flag=True, help=( "Include meta segments (indents, dedents and placeholders) in the output. " "This only applies when outputting json or yaml." ), ) @click.option( "-f", "--format", default=FormatType.human.value, type=click.Choice( [ FormatType.human.value, FormatType.json.value, FormatType.yaml.value, FormatType.none.value, ], case_sensitive=False, ), help="What format to return the parse result in.", ) @click.option( "--write-output", help=( "Optionally provide a filename to write the results to, mostly used in " "tandem with --format. NB: Setting an output file re-enables normal " "stdout logging." ), ) @click.option( "--parse-statistics", is_flag=True, help=( "Set this flag to enabled detailed debugging readout " "on the use of terminators in the parser." ), ) @click.option( "--nofail", is_flag=True, help=( "If set, the exit code will always be zero, regardless of violations " "found. This is potentially useful during rollout." ), ) def parse( path: str, code_only: bool, include_meta: bool, format: str, write_output: Optional[str], bench: bool, nofail: bool, logger: Optional[logging.Logger] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, parse_statistics: bool = False, **kwargs, ) -> None: """Parse SQL files and just spit out the result. PATH is the path to a sql file or directory to lint. This can be either a file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-') character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will be interpreted like passing the current working directory as a path argument. """ c = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) # We don't want anything else to be logged if we want json or yaml output # unless we're writing to a file. non_human_output = (format != FormatType.human.value) or (write_output is not None) output_stream = make_output_stream(c, format, write_output) lnt, formatter = get_linter_and_formatter(c, output_stream) verbose = c.get("verbose") progress_bar_configuration.disable_progress_bar = True formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=non_human_output, ) t0 = time.monotonic() # handle stdin if specified via lone '-' with PathAndUserErrorHandler(formatter): if "-" == path: parsed_strings = [ lnt.parse_string( sys.stdin.read(), "stdin", config=lnt.config, parse_statistics=parse_statistics, ), ] else: # A single path must be specified for this command parsed_strings = list( lnt.parse_path( path=path, parse_statistics=parse_statistics, ) ) total_time = time.monotonic() - t0 violations_count = 0 # iterative print for human readout if format == FormatType.human.value: violations_count = formatter.print_out_violations_and_timing( output_stream, bench, code_only, total_time, verbose, parsed_strings ) else: parsed_strings_dict = [ dict( filepath=linted_result.fname, segments=linted_result.tree.as_record( code_only=code_only, show_raw=True, include_meta=include_meta ) if linted_result.tree else None, ) for linted_result in parsed_strings ] if format == FormatType.yaml.value: # For yaml dumping always dump double quoted strings if they contain # tabs or newlines. yaml.add_representer(str, quoted_presenter) file_output = yaml.dump(parsed_strings_dict, sort_keys=False) elif format == FormatType.json.value: file_output = json.dumps(parsed_strings_dict) elif format == FormatType.none.value: file_output = "" # Dump the output to stdout or to file as appropriate. dump_file_payload(write_output, file_output) if violations_count > 0 and not nofail: sys.exit(EXIT_FAIL) # pragma: no cover else: sys.exit(EXIT_SUCCESS) @cli.command() @common_options @core_options @click.argument("path", nargs=1, type=click.Path(allow_dash=True)) def render( path: str, bench: bool, logger: Optional[logging.Logger] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, **kwargs, ) -> None: """Render SQL files and just spit out the result. PATH is the path to a sql file. This should be either a single file file ('path/to/file.sql') or a single ('-') character to indicate reading from *stdin*. """ c = get_config( extra_config_path, ignore_local_config, require_dialect=False, **kwargs ) # We don't want anything else to be logged if we want json or yaml output # unless we're writing to a file. output_stream = make_output_stream(c, None, None) lnt, formatter = get_linter_and_formatter(c, output_stream) verbose = c.get("verbose") progress_bar_configuration.disable_progress_bar = True formatter.dispatch_config(lnt) # Set up logging. set_logging_level( verbosity=verbose, formatter=formatter, logger=logger, stderr_output=False, ) # handle stdin if specified via lone '-' with PathAndUserErrorHandler(formatter): if "-" == path: raw_sql = sys.stdin.read() fname = "stdin" file_config = lnt.config else: raw_sql, file_config, _ = lnt.load_raw_file_and_config(path, lnt.config) fname = path # Get file specific config file_config.process_raw_file_for_config(raw_sql, fname) rendered = lnt.render_string(raw_sql, fname, file_config, "utf8") if rendered.templater_violations: for v in rendered.templater_violations: click.echo(formatter.format_violation(v)) sys.exit(EXIT_FAIL) else: click.echo(rendered.templated_file.templated_str) sys.exit(EXIT_SUCCESS) # This "__main__" handler allows invoking SQLFluff using "python -m", which # simplifies the use of cProfile, e.g.: # python -m cProfile -s cumtime -m sqlfluff.cli.commands lint slow_file.sql if __name__ == "__main__": cli.main(sys.argv[1:]) # pragma: no cover sqlfluff-2.3.5/src/sqlfluff/cli/formatters.py000066400000000000000000000601331451700765000212730ustar00rootroot00000000000000"""Defines the formatters for the CLI.""" import sys from io import StringIO from typing import List, Optional, Tuple, Union import click from colorama import Style from sqlfluff.cli import EXIT_FAIL, EXIT_SUCCESS from sqlfluff.cli.helpers import ( get_package_version, get_python_implementation, get_python_version, pad_line, wrap_field, ) from sqlfluff.cli.outputstream import OutputStream from sqlfluff.core import FluffConfig, Linter, SQLBaseError, TimingSummary from sqlfluff.core.enums import Color from sqlfluff.core.linter import LintedFile, LintingResult, ParsedString def split_string_on_spaces(s: str, line_length: int = 100) -> List[str]: """Split a string into lines based on whitespace. For short strings the functionality is trivial. >>> split_string_on_spaces("abc") ['abc'] For longer sections it will split at an appropriate point. >>> split_string_on_spaces("abc def ghi", line_length=7) ['abc def', 'ghi'] After splitting, multi-space sections should be intact. >>> split_string_on_spaces("a ' ' b c d e f", line_length=11) ["a ' ' b c", 'd e f'] """ line_buff = [] str_buff = "" # NOTE: We *specify* the single space split, so that on reconstruction # we can accurately represent multi space strings. for token in s.split(" "): # Can we put this token on this line without going over? if str_buff: if len(str_buff) + len(token) > line_length: line_buff.append(str_buff) str_buff = token else: str_buff += " " + token else: # In the case that the buffer is already empty, add it without checking, # otherwise there might be things that we might never. str_buff = token # If we have left over buff, add it in if str_buff: line_buff.append(str_buff) return line_buff def format_linting_result_header() -> str: """Format the header of a linting result output.""" text_buffer = StringIO() text_buffer.write("==== readout ====\n") return text_buffer.getvalue() class OutputStreamFormatter: """Formatter which writes to an OutputStream. On instantiation, this formatter accepts a function to dispatch messages. Each public method accepts an object or data in a common format, with this class handling the formatting and output. This class is designed to be subclassed if we eventually want to provide other methods of surfacing output. Args: output_stream: Output is sent here verbosity: Specifies how verbose output should be filter_empty: If True, empty messages will not be dispatched output_line_length: Maximum line length """ def __init__( self, output_stream: OutputStream, nocolor: bool, verbosity: int = 0, filter_empty: bool = True, output_line_length: int = 80, ): self._output_stream = output_stream self.plain_output = self.should_produce_plain_output(nocolor) self.verbosity = verbosity self._filter_empty = filter_empty self.output_line_length = output_line_length @staticmethod def should_produce_plain_output(nocolor: bool) -> bool: """Returns True if text output should be plain (not colored).""" return nocolor or not sys.stdout.isatty() def _dispatch(self, s: str) -> None: """Dispatch a string to the callback. This method is designed as a point for subclassing. """ # The strip here is to filter out any empty messages if (not self._filter_empty) or s.strip(" \n\t"): self._output_stream.write(s) def _format_config(self, linter: Linter) -> str: """Format the config of a `Linter`.""" text_buffer = StringIO() # Only show version information if verbosity is high enough if self.verbosity > 0: text_buffer.write("==== sqlfluff ====\n") config_content = [ ("sqlfluff", get_package_version()), ("python", get_python_version()), ("implementation", get_python_implementation()), ("verbosity", self.verbosity), ] if linter.dialect: config_content.append(("dialect", linter.dialect.name)) config_content += linter.templater.config_pairs() text_buffer.write( self.cli_table(config_content, col_width=30, max_label_width=15) ) text_buffer.write("\n") if linter.config.get("rule_allowlist"): text_buffer.write( self.cli_table( [("rules", ", ".join(linter.config.get("rule_allowlist")))], col_width=41, ) ) if self.verbosity > 1: text_buffer.write("\n== Raw Config:\n") text_buffer.write(self.format_config_vals(linter.config.iter_vals())) return text_buffer.getvalue() def dispatch_config(self, linter: Linter) -> None: """Dispatch configuration output appropriately.""" self._dispatch(self._format_config(linter)) def dispatch_persist_filename(self, filename: str, result: str) -> None: """Dispatch filenames during a persist operation.""" # Only show the skip records at higher levels of verbosity if self.verbosity >= 2 or result != "SKIP": self._dispatch(self.format_filename(filename=filename, success=result)) def _format_path(self, path: str) -> str: """Format paths.""" return f"=== [ path: {self.colorize(path, Color.lightgrey)} ] ===\n" def dispatch_path(self, path: str) -> None: """Dispatch paths for display.""" if self.verbosity > 0: self._dispatch(self._format_path(path)) def dispatch_template_header( self, fname: str, linter_config: FluffConfig, file_config: FluffConfig ) -> None: """Dispatch the header displayed before templating.""" if self.verbosity > 1: self._dispatch(self.format_filename(filename=fname, success="TEMPLATING")) # This is where we output config diffs if they exist. if file_config: # Only output config diffs if there is a config to diff to. config_diff = file_config.diff_to(linter_config) if config_diff: # pragma: no cover self._dispatch(" Config Diff:") self._dispatch( self.format_config_vals( linter_config.iter_vals(cfg=config_diff) ) ) def dispatch_parse_header(self, fname: str) -> None: """Dispatch the header displayed before parsing.""" if self.verbosity > 1: self._dispatch(self.format_filename(filename=fname, success="PARSING")) def dispatch_lint_header(self, fname: str, rules: List[str]) -> None: """Dispatch the header displayed before linting.""" if self.verbosity > 1: self._dispatch( self.format_filename( filename=fname, success=f"LINTING ({', '.join(rules)})" ) ) def dispatch_compilation_header(self, templater: str, message: str) -> None: """Dispatch the header displayed before linting.""" self._dispatch( f"=== [{self.colorize(templater, Color.lightgrey)}] {message}" ) # pragma: no cover def dispatch_processing_header(self, processes: int) -> None: """Dispatch the header displayed before linting.""" if self.verbosity > 0: self._dispatch( # pragma: no cover f"{self.colorize('effective configured processes: ', Color.lightgrey)} " f"{processes}" ) def dispatch_dialect_warning(self, dialect) -> None: """Dispatch a warning for dialects.""" self._dispatch(self.format_dialect_warning(dialect)) # pragma: no cover def _format_file_violations( self, fname: str, violations: List[SQLBaseError] ) -> str: """Format a set of violations in a `LintingResult`.""" text_buffer = StringIO() # Success is based on there being no fails, but we still # want to show the results if there are warnings (even # if no fails). fails = sum( int(not violation.ignore and not violation.warning) for violation in violations ) warns = sum(int(violation.warning) for violation in violations) show = fails + warns > 0 # Only print the filename if it's either a failure or verbosity > 1 if self.verbosity > 0 or show: text_buffer.write(self.format_filename(fname, success=fails == 0)) text_buffer.write("\n") # If we have violations, print them if show: # sort by position in file (using line number and position) s = sorted(violations, key=lambda v: (v.line_no, v.line_pos)) for violation in s: text_buffer.write( self.format_violation( violation, max_line_length=self.output_line_length ) ) text_buffer.write("\n") str_buffer = text_buffer.getvalue() # Remove the trailing newline if there is one if len(str_buffer) > 0 and str_buffer[-1] == "\n": str_buffer = str_buffer[:-1] return str_buffer def dispatch_file_violations( self, fname: str, linted_file: LintedFile, only_fixable: bool, warn_unused_ignores: bool, ) -> None: """Dispatch any violations found in a file.""" if self.verbosity < 0: return s = self._format_file_violations( fname, linted_file.get_violations( fixable=True if only_fixable else None, filter_warning=False, warn_unused_ignores=warn_unused_ignores, ), ) self._dispatch(s) def colorize(self, s: str, color: Optional[Color] = None) -> str: """Optionally use ANSI colour codes to colour a string.""" return self.colorize_helper(self.plain_output, s, color) @staticmethod def colorize_helper( plain_output: bool, s: str, color: Optional[Color] = None ) -> str: """Static version of colorize() method.""" if not color or plain_output: return s else: return f"{color.value}{s}{Style.RESET_ALL}" def cli_table_row( self, fields: List[Tuple[str, str]], col_width, max_label_width=10, sep_char=": ", divider_char=" ", label_color=Color.lightgrey, val_align="right", ) -> str: """Make a row of a CLI table, using wrapped values.""" # Do some intel first cols = len(fields) last_col_idx = cols - 1 wrapped_fields = [ wrap_field( field[0], field[1], width=col_width, max_label_width=max_label_width, sep_char=sep_char, ) for field in fields ] max_lines = max(fld["lines"] for fld in wrapped_fields) last_line_idx = max_lines - 1 # Make some text buff = StringIO() for line_idx in range(max_lines): for col_idx in range(cols): # Assume we pad labels left and values right fld = wrapped_fields[col_idx] ll = fld["label_list"] vl = fld["val_list"] buff.write( self.colorize( pad_line( ll[line_idx] if line_idx < len(ll) else "", width=fld["label_width"], ), color=label_color, ) ) if line_idx == 0: buff.write(sep_char) else: buff.write(" " * len(sep_char)) buff.write( pad_line( vl[line_idx] if line_idx < len(vl) else "", width=fld["val_width"], align=val_align, ) ) if col_idx != last_col_idx: buff.write(divider_char) elif line_idx != last_line_idx: buff.write("\n") return buff.getvalue() def cli_table( self, fields, col_width=20, cols=2, divider_char=" ", sep_char=": ", label_color=Color.lightgrey, float_format="{0:.2f}", max_label_width=10, val_align="right", ) -> str: """Make a crude ascii table. Assume that `fields` is an iterable of (label, value) pairs. """ # First format all the values into strings formatted_fields = [] for label, value in fields: label = str(label) if isinstance(value, float): value = float_format.format(value) else: value = str(value) formatted_fields.append((label, value)) # Set up a buffer to hold the whole table buff = StringIO() while len(formatted_fields) > 0: row_buff: List[Tuple[str, str]] = [] while len(row_buff) < cols and len(formatted_fields) > 0: row_buff.append(formatted_fields.pop(0)) buff.write( self.cli_table_row( row_buff, col_width=col_width, max_label_width=max_label_width, sep_char=sep_char, divider_char=divider_char, label_color=label_color, val_align=val_align, ) ) if len(formatted_fields) > 0: buff.write("\n") return buff.getvalue() def format_filename( self, filename: str, success: Union[str, bool] = False, success_text: str = "PASS", ) -> str: """Format filenames.""" if isinstance(success, str): status_string = success else: status_string = success_text if success else "FAIL" if status_string in ("PASS", "FIXED", success_text): status_string = self.colorize(status_string, Color.green) elif status_string in ("FAIL", "ERROR"): status_string = self.colorize(status_string, Color.red) return f"== [{self.colorize(filename, Color.lightgrey)}] {status_string}" def format_violation( self, violation: SQLBaseError, max_line_length: int = 90 ) -> str: """Format a violation.""" if not isinstance(violation, SQLBaseError): # pragma: no cover raise ValueError(f"Unexpected violation format: {violation}") desc: str = violation.desc() line_elem = " -" if violation.line_no is None else f"{violation.line_no:4d}" pos_elem = " -" if violation.line_pos is None else f"{violation.line_pos:4d}" if violation.ignore: desc = "IGNORE: " + desc # pragma: no cover elif violation.warning: desc = "WARNING: " + desc # pragma: no cover # If the rule has a name, add that the description. if hasattr(violation, "rule"): rule = getattr(violation, "rule", None) if rule and rule.name: desc += f" [{self.colorize(rule.name, Color.lightgrey)}]" split_desc = split_string_on_spaces(desc, line_length=max_line_length - 25) out_buff = "" # Grey out the violation if we're ignoring or warning it. section_color: Color if violation.ignore or violation.warning: # For now keep warnings and ignores the same colour. The additional # text in the description allows distinction. section_color = Color.lightgrey else: section_color = Color.blue for idx, line in enumerate(split_desc): if idx == 0: rule_code = violation.rule_code().rjust(4) if "PRS" in rule_code: section_color = Color.red out_buff += self.colorize( f"L:{line_elem} | P:{pos_elem} | {rule_code} | ", section_color, ) else: out_buff += ( "\n" + (" " * 23) + self.colorize( "| ", section_color, ) ) out_buff += line return out_buff def format_linting_stats(self, result, verbose=0) -> str: """Format a set of stats given a `LintingResult`.""" text_buffer = StringIO() all_stats = result.stats(EXIT_FAIL, EXIT_SUCCESS) text_buffer.write("==== summary ====\n") if verbose >= 2: output_fields = [ "files", "violations", "clean files", "unclean files", "avg per file", "unclean rate", "status", ] special_formats = {"unclean rate": "{0:.0%}"} else: output_fields = ["violations", "status"] special_formats = {} # Generate content tuples, applying special formats for some fields summary_content = [ ( key, special_formats[key].format(all_stats[key]) if key in special_formats else all_stats[key], ) for key in output_fields ] # Render it all as a table text_buffer.write(self.cli_table(summary_content, max_label_width=14)) return text_buffer.getvalue() def format_config_vals(self, config_vals) -> str: """Format an iterable of config values from a config object.""" text_buffer = StringIO() for i, k, v in config_vals: val = "" if v is None else str(v) text_buffer.write( (" " * i) + self.colorize( pad_line(str(k) + ":", 20, "left"), color=Color.lightgrey ) + pad_line(val, 20, "left") + "\n" ) return text_buffer.getvalue() def _format_rule_description(self, rule) -> str: """Format individual rule. This is a helper function in .format_rules(). """ if rule.name: name = self.colorize(rule.name, Color.blue) description = f"[{name}] {rule.description}" else: description = rule.description if rule.groups: groups = self.colorize(", ".join(rule.groups), Color.lightgrey) description += f"\ngroups: {groups}" if rule.aliases: aliases = self.colorize(", ".join(rule.aliases), Color.lightgrey) description += f" aliases: {aliases}" return description def format_rules(self, linter: Linter, verbose: int = 0) -> str: """Format the a set of rules given a `Linter`.""" text_buffer = StringIO() text_buffer.write("==== sqlfluff - rules ====\n") text_buffer.write( self.cli_table( [ ( t.code, self._format_rule_description(t), ) for t in linter.rule_tuples() ], col_width=80, cols=1, label_color=Color.blue, val_align="left", ) ) return text_buffer.getvalue() def format_dialects(self, dialect_readout, verbose=0) -> str: """Format the dialects yielded by `dialect_readout`.""" text_buffer = StringIO() text_buffer.write("==== sqlfluff - dialects ====\n") readouts = [ ( dialect.label, f"{dialect.name} dialect [inherits from '{dialect.inherits_from}']", ) for dialect in dialect_readout() ] text_buffer.write( self.cli_table( readouts, col_width=60, cols=1, label_color=Color.blue, val_align="right", ) ) return text_buffer.getvalue() def format_dialect_warning(self, dialect) -> str: """Output a warning for parsing errors.""" return self.colorize( ( "WARNING: Parsing errors found and dialect is set to " f"'{dialect}'. Have you configured your dialect correctly?" ), Color.lightgrey, ) def handle_files_with_tmp_or_prs_errors(self, lint_result: LintingResult) -> int: """Discard lint fixes for files with templating or parse errors. Returns 1 if there are any files with templating or parse errors after filtering, else 0. (Intended as a process exit code.) """ total_errors, num_filtered_errors = lint_result.count_tmp_prs_errors() lint_result.discard_fixes_for_lint_errors_in_files_with_tmp_or_prs_errors() if total_errors: click.echo( message=self.colorize( f" [{total_errors} templating/parsing errors found]", Color.red ), color=self.plain_output, err=True, ) if num_filtered_errors < total_errors: color = Color.red if num_filtered_errors else Color.green click.echo( message=self.colorize( f" [{num_filtered_errors} templating/parsing errors " f'remaining after "ignore" & "warning"]', color=color, ), color=not self.plain_output, err=num_filtered_errors > 0, ) return EXIT_FAIL if num_filtered_errors else EXIT_SUCCESS def print_out_violations_and_timing( self, output_stream: OutputStream, bench: bool, code_only: bool, total_time: float, verbose: int, parsed_strings: List[ParsedString], ) -> int: """Used by human formatting during the parse.""" violations_count = 0 timing = TimingSummary() for parsed_string in parsed_strings: timing.add(parsed_string.time_dict) if parsed_string.tree: output_stream.write(parsed_string.tree.stringify(code_only=code_only)) else: # TODO: Make this prettier output_stream.write("...Failed to Parse...") # pragma: no cover violations_count += len(parsed_string.violations) if parsed_string.violations: output_stream.write("==== parsing violations ====") # pragma: no cover for v in parsed_string.violations: output_stream.write(self.format_violation(v)) # pragma: no cover if parsed_string.violations: output_stream.write( self.format_dialect_warning(parsed_string.config.get("dialect")) ) if verbose >= 2: output_stream.write("==== timings ====") output_stream.write(self.cli_table(parsed_string.time_dict.items())) if verbose >= 2 or bench: output_stream.write("==== overall timings ====") output_stream.write(self.cli_table([("Clock time", total_time)])) timing_summary = timing.summary() for step in timing_summary: output_stream.write(f"=== {step} ===") output_stream.write(self.cli_table(timing_summary[step].items())) return violations_count def completion_message(self) -> None: """Prints message when SQLFluff is finished.""" click.echo("All Finished" f"{'' if self.plain_output else ' 📜 🎉'}!") sqlfluff-2.3.5/src/sqlfluff/cli/helpers.py000066400000000000000000000052001451700765000205410ustar00rootroot00000000000000"""CLI helper utilities.""" import sys import textwrap from collections import abc from typing import Any, Callable, Dict, List from sqlfluff import __version__ as pkg_version from sqlfluff.core.cached_property import cached_property def get_python_version() -> str: """Get the current python version as a string.""" return "{0[0]}.{0[1]}.{0[2]}".format(sys.version_info) def get_python_implementation() -> str: """Get the current python implementation as a string. This is useful if testing in pypy or similar. """ return sys.implementation.name def get_package_version() -> str: """Get the current version of the sqlfluff package.""" return pkg_version def wrap_elem(s: str, width: int) -> List[str]: """Wrap a string into a list of strings all less than .""" return textwrap.wrap(s, width=width) def wrap_field( label: str, val: str, width: int, max_label_width: int = 10, sep_char: str = ": " ) -> Dict[str, Any]: """Wrap a field (label, val). Returns: A dict of {label_list, val_list, sep_char, lines} """ if len(label) > max_label_width: label_list = wrap_elem(label, width=max_label_width) label_width = max(len(line) for line in label_list) else: label_width = len(label) label_list = [label] max_val_width = width - len(sep_char) - label_width val_list = [] for v in val.split("\n"): val_list.extend(wrap_elem(v, width=max_val_width)) return dict( label_list=label_list, val_list=val_list, sep_char=sep_char, lines=max(len(label_list), len(val_list)), label_width=label_width, val_width=max_val_width, ) def pad_line(s: str, width: int, align: str = "left") -> str: """Pad a string with a given alignment to a specific width with spaces.""" gap = width - len(s) if gap <= 0: return s elif align == "left": return s + (" " * gap) elif align == "right": return (" " * gap) + s else: raise ValueError(f"Unknown alignment: {align}") # pragma: no cover class LazySequence(abc.Sequence): """A Sequence which only populates on the first access. This is useful for being able to define sequences within the click cli decorators, but that don't trigger their contents until first called. """ def __init__(self, getter=Callable[[], abc.Sequence]): self._getter = getter @cached_property def _sequence(self) -> abc.Sequence: return self._getter() def __getitem__(self, key): return self._sequence[key] def __len__(self): return len(self._sequence) sqlfluff-2.3.5/src/sqlfluff/cli/outputstream.py000066400000000000000000000043741451700765000216660ustar00rootroot00000000000000"""Classes for managing linter output, used with OutputStreamFormatter.""" import abc import os from typing import Any, Optional import click from tqdm import tqdm from sqlfluff.core import FluffConfig from sqlfluff.core.enums import FormatType class OutputStream(abc.ABC): """Base class for linter output stream.""" def __init__(self, config: FluffConfig, context: Any = None) -> None: self.config = config def write(self, message: str) -> None: """Write message to output.""" raise NotImplementedError # pragma: no cover def close(self) -> None: """Close output stream.""" pass class TqdmOutput(OutputStream): """Outputs to stdout, coordinates to avoid conflict with tqdm. It may happen that progressbar conflicts with extra printing. Nothing very serious happens then, except that there is printed (not removed) progressbar line. The `external_write_mode` allows to disable tqdm for writing time. """ def __init__(self, config: FluffConfig) -> None: super().__init__(config) def write(self, message: str) -> None: """Write message to stdout.""" with tqdm.external_write_mode(): click.echo(message=message, color=self.config.get("color")) class FileOutput(OutputStream): """Outputs to a specified file.""" def __init__(self, config: FluffConfig, output_path: str) -> None: super().__init__(config) self.file = open(output_path, "w") def write(self, message: str) -> None: """Write message to output_path.""" print(message, file=self.file) def close(self) -> None: """Close output file.""" self.file.close() def make_output_stream( config: FluffConfig, format: Optional[str] = None, output_path: Optional[str] = None, ) -> OutputStream: """Create and return appropriate OutputStream instance.""" if format is None or format == FormatType.human.value: if not output_path: # Human-format output to stdout. return TqdmOutput(config) else: # Human-format output to a file. return FileOutput(config, output_path) else: # Discard human output as not required return FileOutput(config, os.devnull) sqlfluff-2.3.5/src/sqlfluff/core/000077500000000000000000000000001451700765000167115ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/__init__.py000066400000000000000000000030651451700765000210260ustar00rootroot00000000000000"""The core elements of sqlfluff.""" import tblib.pickling_support # Config objects from sqlfluff.core.config import FluffConfig # Dialect introspection from sqlfluff.core.dialects import dialect_readout, dialect_selector # All of the errors. from sqlfluff.core.errors import ( SQLBaseError, SQLFluffUserError, SQLLexError, SQLLintError, SQLParseError, SQLTemplaterError, ) # Public classes from sqlfluff.core.linter import Linter from sqlfluff.core.parser import Lexer, Parser # Timing objects from sqlfluff.core.timing import TimingSummary __all__ = ( "FluffConfig", "Linter", "Lexer", "Parser", "dialect_selector", "dialect_readout", "SQLBaseError", "SQLTemplaterError", "SQLLexError", "SQLParseError", "SQLLintError", "SQLFluffUserError", "TimingSummary", ) # This is for "sqlfluff lint" and "sqlfluff fix" multiprocessing (--processes) # support. If an exception (i.e. runtime error) occurs in a worker process, we # want to return the tracebook to the main process and report it there, as part # of the normal output. However, anything returned from a multiprocessing.Pool # worker must be serializable using "pickle". By default, Python traceback # objects cannot be pickled. The tblib package addresses this limitation; we # simply need to install it before creating the worker pool. See these links for # additional context: # https://pypi.org/project/tblib/ # https://stackoverflow.com/questions/6126007/python-getting-a-traceback-from-a-multiprocessing-process tblib.pickling_support.install() sqlfluff-2.3.5/src/sqlfluff/core/cached_property.py000066400000000000000000000004131451700765000224340ustar00rootroot00000000000000"""Module to handle cached_property version dependent imports.""" import sys if sys.version_info >= (3, 8): from functools import cached_property else: # pragma: no cover from backports.cached_property import cached_property __all__ = ("cached_property",) sqlfluff-2.3.5/src/sqlfluff/core/config.py000066400000000000000000001322511451700765000205340ustar00rootroot00000000000000"""Module for loading config.""" from __future__ import annotations try: from importlib.resources import files except ImportError: # pragma: no cover # fallback for python <=3.8 from importlib_resources import files # type: ignore import configparser import logging import os import os.path import sys from dataclasses import dataclass from itertools import chain from pathlib import Path from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Type, Union, ) import appdirs import pluggy from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.helpers.dict import dict_diff, nested_combine from sqlfluff.core.helpers.string import ( split_colon_separated_string, split_comma_separated_string, ) from sqlfluff.core.plugin.host import get_plugin_manager if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.templaters.base import RawTemplater if sys.version_info >= (3, 11): import tomllib else: # pragma: no cover import toml as tomllib # Instantiate the config logger config_logger = logging.getLogger("sqlfluff.config") global_loader = None """:obj:`ConfigLoader`: A variable to hold the single module loader when loaded. We define a global loader, so that between calls to load config, we can still cache appropriately """ ConfigElemType = Tuple[Tuple[str, ...], Any] ALLOWABLE_LAYOUT_CONFIG_KEYS = ( "spacing_before", "spacing_after", "spacing_within", "line_position", "align_within", "align_scope", ) @dataclass class _RemovedConfig: old_path: Tuple[str, ...] warning: str new_path: Optional[Tuple[str, ...]] = None translation_func: Optional[Callable[[str], str]] = None REMOVED_CONFIGS = [ _RemovedConfig( ("rules", "L003", "hanging_indents"), ( "Hanging indents are no longer supported in SQLFluff " "from version 2.0.0 onwards. See " "https://docs.sqlfluff.com/en/stable/layout.html#hanging-indents" ), ), _RemovedConfig( ("rules", "max_line_length"), ( "The max_line_length config has moved " "from sqlfluff:rules to the root sqlfluff level." ), ("max_line_length",), (lambda x: x), ), _RemovedConfig( ("rules", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "L002", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "L003", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "L004", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "L016", "tab_space_size"), ( "The tab_space_size config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "tab_space_size"), (lambda x: x), ), _RemovedConfig( ("rules", "indent_unit"), ( "The indent_unit config has moved " "from sqlfluff:rules to sqlfluff:indentation." ), ("indentation", "indent_unit"), (lambda x: x), ), _RemovedConfig( ("rules", "LT03", "operator_new_lines"), ( "Use the line_position config in the appropriate " "sqlfluff:layout section (e.g. sqlfluff:layout:type" ":binary_operator)." ), ("layout", "type", "binary_operator", "line_position"), (lambda x: "trailing" if x == "before" else "leading"), ), _RemovedConfig( ("rules", "comma_style"), ( "Use the line_position config in the appropriate " "sqlfluff:layout section (e.g. sqlfluff:layout:type" ":comma)." ), ("layout", "type", "comma", "line_position"), (lambda x: x), ), # LT04 used to have a more specific version of the same /config itself. _RemovedConfig( ("rules", "LT04", "comma_style"), ( "Use the line_position config in the appropriate " "sqlfluff:layout section (e.g. sqlfluff:layout:type" ":comma)." ), ("layout", "type", "comma", "line_position"), (lambda x: x), ), _RemovedConfig( ("rules", "L003", "lint_templated_tokens"), "No longer used.", ), _RemovedConfig( ("core", "recurse"), "Removed as unused in production and unnecessary for debugging.", ), ] def coerce_value(val: str) -> Any: """Try to coerce to a more specific type.""" # Try to coerce it to a more specific type, # otherwise just make it a string. try: v: Any = int(val) except ValueError: try: v = float(val) except ValueError: cleaned_val = val.strip().lower() if cleaned_val in ["true"]: v = True elif cleaned_val in ["false"]: v = False elif cleaned_val in ["none"]: v = None else: v = val return v class ConfigLoader: """The class for loading config files. Note: Unlike most cfg file readers, sqlfluff is case-sensitive in how it reads config files. This is to ensure we support the case sensitivity of jinja. """ def __init__(self) -> None: # TODO: check that this cache implementation is actually useful self._config_cache: Dict[str, Dict[str, Any]] = {} @classmethod def get_global(cls) -> ConfigLoader: """Get the singleton loader.""" global global_loader if not global_loader: global_loader = cls() return global_loader @classmethod def _walk_toml( cls, config: Dict[str, Any], base_key: Tuple[str, ...] = () ) -> List[Tuple[Tuple[str, ...], Any]]: """Recursively walk the nested config inside a TOML file. For standard usage it mimics the standard loader. >>> ConfigLoader._walk_toml({"foo": "bar"}) [(('foo',), 'bar')] >>> ConfigLoader._walk_toml({"foo": {"bar": "baz"}}) [(('foo', 'bar'), 'baz')] For the "rules" section, there's a special handling to condense nested sections from the toml for rules which contain a dot (or more) (".") in their name. >>> ConfigLoader._walk_toml({"rules": {"a": {"b": {"c": "d"}}}}) [(('rules', 'a.b', 'c'), 'd')] >>> ConfigLoader._walk_toml({"rules": ... {"capitalisation": {"keywords": ... {"capitalisation_policy": "upper"} ... }} ... }) [(('rules', 'capitalisation.keywords', 'capitalisation_policy'), 'upper')] NOTE: Some rules make have more than one dot in their name. >>> ConfigLoader._walk_toml({"rules": ... {"a": {"b": {"c": {"d": {"e": "f"}}}}} ... }) [(('rules', 'a.b.c.d', 'e'), 'f')] """ buff: List[Tuple[Tuple[str, ...], Any]] = [] # NOTE: For the "rules" section of the sqlfluff config, # rule names are often qualified with a dot ".". In the # toml scenario this can get interpreted as a nested # section, and we resolve that edge case here. if len(base_key) == 3 and base_key[0] == "rules": base_key = ("rules", ".".join(base_key[1:])) for k, v in config.items(): key = base_key + (k,) if isinstance(v, dict): buff.extend(cls._walk_toml(v, key)) else: buff.append((key, v)) return buff @classmethod def _iter_config_elems_from_dict( cls, configs: Dict[str, Any] ) -> Iterator[ConfigElemType]: """Walk a config dict and get config elements. >>> list( ... ConfigLoader._iter_config_elems_from_dict( ... {"foo":{"bar":{"baz": "a", "biz": "b"}}} ... ) ... ) [(('foo', 'bar', 'baz'), 'a'), (('foo', 'bar', 'biz'), 'b')] """ for key, val in configs.items(): if isinstance(val, dict): for partial_key, sub_val in cls._iter_config_elems_from_dict(val): yield (key,) + partial_key, sub_val else: yield (key,), val @classmethod def _config_elems_to_dict(cls, configs: Iterable[ConfigElemType]) -> Dict[str, Any]: """Reconstruct config elements into a dict. >>> ConfigLoader._config_elems_to_dict( ... [(("foo", "bar", "baz"), "a"), (("foo", "bar", "biz"), "b")] ... ) {'foo': {'bar': {'baz': 'a', 'biz': 'b'}}} """ result: Dict[str, Any] = {} for key, val in configs: ref = result for step in key[:-1]: if step not in ref: ref[step] = {} ref = ref[step] ref[key[-1]] = val return result @classmethod def _get_config_elems_from_toml(cls, fpath: str) -> List[ConfigElemType]: """Load a config from a TOML file and return a list of tuples. The return value is a list of tuples, were each tuple has two elements, the first is a tuple of paths, the second is the value at that path. """ with open(fpath, mode="r") as file: config = tomllib.loads(file.read()) tool = config.get("tool", {}).get("sqlfluff", {}) return cls._walk_toml(tool) @classmethod def _get_config_elems_from_file( cls, fpath: Optional[str] = None, config_string: Optional[str] = None ) -> List[ConfigElemType]: """Load a config from a file and return a list of tuples. The return value is a list of tuples, were each tuple has two elements, the first is a tuple of paths, the second is the value at that path. Note: Unlike most cfg file readers, sqlfluff is case-sensitive in how it reads config files. Note: Any variable names ending with `_path` or `_dir`, will be attempted to be resolved as relative paths to this config file. If that fails the string value will remain. """ assert fpath or config_string, "One of fpath or config_string is required." buff: List[ConfigElemType] = [] # Disable interpolation so we can load macros kw: Dict[str, Any] = {} kw["interpolation"] = None config = configparser.ConfigParser(delimiters="=", **kw) # NB: We want to be case sensitive in how we read from files, # because jinja is also case sensitive. To do this we override # the optionxform attribute. config.optionxform = lambda option: option # type: ignore if fpath: config.read(fpath) else: assert config_string config.read_string(config_string) # Set the fpath to the current working directory fpath = os.getcwd() for k in config.sections(): if k == "sqlfluff": key: Tuple[str, ...] = ("core",) elif k.startswith("sqlfluff:"): # Return a tuple of nested values key = tuple(k[len("sqlfluff:") :].split(":")) else: # pragma: no cover # if it doesn't start with sqlfluff, then don't go # further on this iteration continue for name, val in config.items(section=k): # Try to coerce it to a more specific type, # otherwise just make it a string. v = coerce_value(val) # Attempt to resolve paths if name.lower() == "load_macros_from_path": # Comma-separated list of paths. paths = split_comma_separated_string(val) v_temp = [] for path in paths: v_temp.append(cls._resolve_path(fpath, path)) v = ",".join(v_temp) elif name.lower().endswith(("_path", "_dir")): # One path v = cls._resolve_path(fpath, val) # Add the name to the end of the key buff.append((key + (name,), v)) return buff @classmethod def _resolve_path(cls, fpath: str, val: str) -> str: """Try to resolve a path.""" # Make the referenced path. ref_path = os.path.join(os.path.dirname(fpath), val) # Check if it exists, and if it does, replace the value with the path. return ref_path if os.path.exists(ref_path) else val @staticmethod def _incorporate_vals( ctx: Dict[str, Any], vals: List[ConfigElemType] ) -> Dict[str, Any]: """Take a list of tuples and incorporate it into a dictionary. >>> ConfigLoader._incorporate_vals({}, [(("a", "b"), "c")]) {'a': {'b': 'c'}} >>> ConfigLoader._incorporate_vals({"a": {"b": "c"}}, [(("a", "d"), "e")]) {'a': {'b': 'c', 'd': 'e'}} """ for k, v in vals: # Keep a ref we can use for recursion r = ctx # Get the name of the variable n = k[-1] # Get the path pth = k[:-1] for dp in pth: # Does this path exist? if dp in r: if isinstance(r[dp], dict): r = r[dp] else: # pragma: no cover raise ValueError(f"Overriding config value with section! [{k}]") else: r[dp] = {} r = r[dp] # Deal with the value itself r[n] = v return ctx @staticmethod def _validate_configs( configs: Iterable[ConfigElemType], file_path: str ) -> List[ConfigElemType]: """Validate config elements. We validate in two ways: 1. Are these config settings removed or deprecated. 2. Are these config elements in the layout section _valid_. """ config_map = {cfg.old_path: cfg for cfg in REMOVED_CONFIGS} # Materialise the configs into a list to we can iterate twice. new_configs = list(configs) defined_keys = {k for k, _ in new_configs} validated_configs = [] for k, v in new_configs: # First validate against the removed option list. if k in config_map.keys(): formatted_key = ":".join(k) removed_option = config_map[k] # Is there a mapping option? if removed_option.translation_func and removed_option.new_path: formatted_new_key = ":".join(removed_option.new_path) # Before mutating, check we haven't _also_ set the new value. if removed_option.new_path in defined_keys: # Raise an warning. config_logger.warning( f"\nWARNING: Config file {file_path} set a deprecated " f"config value `{formatted_key}` (which can be migrated) " f"but ALSO set the value it would be migrated to. The new " f"value (`{removed_option.new_path}`) takes precedence. " "Please update your configuration to remove this warning. " f"\n\n{removed_option.warning}\n\n" "See https://docs.sqlfluff.com/en/stable/configuration.html" " for more details.\n" ) # continue to NOT add this value in the set continue # Mutate and warn. v = removed_option.translation_func(v) k = removed_option.new_path # NOTE: At the stage of emitting this warning, we may not yet # have set up red logging because we haven't yet loaded the config # file. For that reason, this error message has a bit more padding. config_logger.warning( f"\nWARNING: Config file {file_path} set a deprecated config " f"value `{formatted_key}`. This will be removed in a later " "release. This has been mapped to " f"`{formatted_new_key}` set to a value of `{v}` for this run. " "Please update your configuration to remove this warning. " f"\n\n{removed_option.warning}\n\n" "See https://docs.sqlfluff.com/en/stable/configuration.html" " for more details.\n" ) else: # Raise an error. raise SQLFluffUserError( f"Config file {file_path!r} set an outdated config " f"value {formatted_key}.\n\n{removed_option.warning}\n\n" "See https://docs.sqlfluff.com/en/stable/configuration.html" " for more details." ) # Second validate any layout configs for validity. # NOTE: For now we don't check that the "type" is a valid one # to reference, or that the values are valid. For the values, # these are likely to be rejected by the layout routines at # runtime. The last risk area is validating that the type is # a valid one. if k and k[0] == "layout": # Check for: # - Key length # - Key values if ( # Key length must be 4 (len(k) != 4) # Second value must (currently) be "type" or (k[1] != "type") # Last key value must be one of the allowable options. or (k[3] not in ALLOWABLE_LAYOUT_CONFIG_KEYS) ): raise SQLFluffUserError( f"Config file {file_path!r} set an invalid `layout` option " f"value {':'.join(k)}.\n" "See https://docs.sqlfluff.com/en/stable/layout.html" "#configuring-layout for more details." ) validated_configs.append((k, v)) return validated_configs def load_config_resource( self, package: str, file_name: str, configs: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """Load a config resource. This is however more compatible with mypyc because it avoids the use of the __file__ object to find the default config. This is only tested extensively with the default config. NOTE: This requires that the config file is built into a package but should be more performant because it leverages importlib. https://docs.python.org/3/library/importlib.resources.html """ config_string = files(package).joinpath(file_name).read_text() elems = self._get_config_elems_from_file(config_string=config_string) elems = self._validate_configs(elems, package + "." + file_name) return self._incorporate_vals(configs or {}, elems) def load_config_file( self, file_dir: str, file_name: str, configs: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """Load a config file.""" file_path = os.path.join(file_dir, file_name) if file_name == "pyproject.toml": elems = self._get_config_elems_from_toml(file_path) else: elems = self._get_config_elems_from_file(file_path) elems = self._validate_configs(elems, file_path) return self._incorporate_vals(configs or {}, elems) def load_config_string( self, config_string: str, configs: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """Load a config from the string in cfg format.""" elems = self._get_config_elems_from_file(config_string=config_string) elems = self._validate_configs(elems, "") return self._incorporate_vals(configs or {}, elems) def load_config_at_path(self, path: str) -> Dict[str, Any]: """Load config from a given path.""" # First check the cache if str(path) in self._config_cache: return self._config_cache[str(path)] # The potential filenames we would look for at this path. # NB: later in this list overwrites earlier filename_options = [ "setup.cfg", "tox.ini", "pep8.ini", ".sqlfluff", "pyproject.toml", ] configs: Dict[str, Any] = {} if os.path.isdir(path): p = path else: p = os.path.dirname(path) d = os.listdir(os.path.expanduser(p)) # iterate this way round to make sure things overwrite is the right direction for fname in filename_options: if fname in d: configs = self.load_config_file(p, fname, configs=configs) # Store in the cache self._config_cache[str(path)] = configs return configs def load_extra_config(self, extra_config_path: str) -> Dict[str, Any]: """Load specified extra config.""" if not os.path.exists(extra_config_path): raise SQLFluffUserError( f"Extra config '{extra_config_path}' does not exist." ) # First check the cache if str(extra_config_path) in self._config_cache: return self._config_cache[str(extra_config_path)] configs: Dict[str, Any] = {} if extra_config_path.endswith("pyproject.toml"): elems = self._get_config_elems_from_toml(extra_config_path) else: elems = self._get_config_elems_from_file(extra_config_path) configs = self._incorporate_vals(configs, elems) # Store in the cache self._config_cache[str(extra_config_path)] = configs return configs @staticmethod def _get_user_config_dir_path() -> str: appname = "sqlfluff" appauthor = "sqlfluff" # On Mac OSX follow Linux XDG base dirs # https://github.com/sqlfluff/sqlfluff/issues/889 user_config_dir_path = os.path.expanduser("~/.config/sqlfluff") if appdirs.system == "darwin": appdirs.system = "linux2" user_config_dir_path = appdirs.user_config_dir(appname, appauthor) appdirs.system = "darwin" if not os.path.exists(user_config_dir_path): user_config_dir_path = appdirs.user_config_dir(appname, appauthor) return user_config_dir_path def load_user_appdir_config(self) -> Dict[str, Any]: """Load the config from the user's OS specific appdir config directory.""" user_config_dir_path = self._get_user_config_dir_path() if os.path.exists(user_config_dir_path): return self.load_config_at_path(user_config_dir_path) else: return {} def load_user_config(self) -> Dict[str, Any]: """Load the config from the user's home directory.""" user_home_path = os.path.expanduser("~") return self.load_config_at_path(user_home_path) def load_config_up_to_path( self, path: str, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, ) -> Dict[str, Any]: """Loads a selection of config files from both the path and its parent paths.""" user_appdir_config = ( self.load_user_appdir_config() if not ignore_local_config else {} ) user_config = self.load_user_config() if not ignore_local_config else {} config_paths = ( self.iter_config_locations_up_to_path(path) if not ignore_local_config else {} ) config_stack = ( [self.load_config_at_path(p) for p in config_paths] if not ignore_local_config else [] ) extra_config = ( self.load_extra_config(extra_config_path) if extra_config_path else {} ) return nested_combine( user_appdir_config, user_config, *config_stack, extra_config ) @classmethod def find_ignore_config_files( cls, path: str, working_path: Union[str, Path] = Path.cwd(), ignore_file_name: str = ".sqlfluffignore", ) -> Set[str]: """Finds sqlfluff ignore files from both the path and its parent paths.""" return set( filter( os.path.isfile, map( lambda x: os.path.join(x, ignore_file_name), cls.iter_config_locations_up_to_path( path=path, working_path=working_path ), ), ) ) @staticmethod def iter_config_locations_up_to_path( path: str, working_path: Union[str, Path] = Path.cwd() ) -> Iterator[str]: """Finds config locations from both the path and its parent paths. The lowest priority is the user appdir, then home dir, then increasingly the configs closest to the file being directly linted. """ given_path = Path(path).absolute() working_path = Path(working_path).absolute() # If we've been passed a file and not a directory, # then go straight to the directory. if not given_path.is_dir(): given_path = given_path.parent common_path = Path(os.path.commonpath([working_path, given_path])) # we have a sub path! We can load nested paths path_to_visit = common_path while path_to_visit != given_path: yield str(path_to_visit.resolve()) next_path_to_visit = ( path_to_visit / given_path.relative_to(path_to_visit).parts[0] ) if next_path_to_visit == path_to_visit: # pragma: no cover # we're not making progress... # [prevent infinite loop] break path_to_visit = next_path_to_visit yield str(given_path.resolve()) class FluffConfig: """The class that actually gets passed around as a config object.""" private_vals = "rule_denylist", "rule_allowlist", "dialect_obj", "templater_obj" def __init__( self, configs: Optional[Dict[str, Any]] = None, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, overrides: Optional[Dict[str, Any]] = None, plugin_manager: Optional[pluggy.PluginManager] = None, # Ideally a dialect should be set when config is read but sometimes # it might only be set in nested .sqlfluff config files, so allow it # to be not required. require_dialect: bool = True, ): self._extra_config_path = ( extra_config_path # We only store this for child configs ) self._ignore_local_config = ( ignore_local_config # We only store this for child configs ) # If overrides are provided, validate them early. if overrides: overrides = ConfigLoader._config_elems_to_dict( ConfigLoader._validate_configs( [ (("core",) + k, v) for k, v in ConfigLoader._iter_config_elems_from_dict(overrides) ], "", ) )["core"] self._overrides = overrides # We only store this for child configs # Fetch a fresh plugin manager if we weren't provided with one self._plugin_manager = plugin_manager or get_plugin_manager() defaults = nested_combine(*self._plugin_manager.hook.load_default_config()) # If any existing configs are provided. Validate them: if configs: configs = ConfigLoader._config_elems_to_dict( ConfigLoader._validate_configs( ConfigLoader._iter_config_elems_from_dict(configs), "", ) ) self._configs = nested_combine( defaults, configs or {"core": {}}, {"core": overrides or {}} ) # Some configs require special treatment self._configs["core"]["color"] = ( False if self._configs["core"].get("nocolor", False) else None ) # Handle inputs which are potentially comma separated strings for in_key, out_key in [ # Deal with potential ignore & warning parameters ("ignore", "ignore"), ("warnings", "warnings"), ("rules", "rule_allowlist"), # Allowlists and denylistsignore_words ("exclude_rules", "rule_denylist"), ]: if self._configs["core"].get(in_key, None): # Checking if key is string as can potentially be a list to self._configs["core"][out_key] = split_comma_separated_string( self._configs["core"][in_key] ) else: self._configs["core"][out_key] = [] # Dialect and Template selection. dialect: Optional[str] = self._configs["core"]["dialect"] self._initialise_dialect(dialect, require_dialect) self._configs["core"]["templater_obj"] = self.get_templater( self._configs["core"]["templater"] ) def _initialise_dialect( self, dialect: Optional[str], require_dialect: bool = True ) -> None: # NB: We import here to avoid a circular references. from sqlfluff.core.dialects import dialect_selector if dialect is not None: self._configs["core"]["dialect_obj"] = dialect_selector( self._configs["core"]["dialect"] ) elif require_dialect: self.verify_dialect_specified() def verify_dialect_specified(self) -> None: """Check if the config specifies a dialect, raising an error if not.""" dialect: Optional[str] = self._configs["core"]["dialect"] if dialect is None: # Get list of available dialects for the error message. We must # import here rather than at file scope in order to avoid a circular # import. from sqlfluff.core.dialects import dialect_readout raise SQLFluffUserError( "No dialect was specified. You must configure a dialect or " "specify one on the command line using --dialect after the " "command. Available dialects:\n" f"{', '.join([d.label for d in dialect_readout()])}" ) def __getstate__(self) -> Dict[str, Any]: # Copy the object's state from self.__dict__ which contains # all our instance attributes. Always use the dict.copy() # method to avoid modifying the original state. state = self.__dict__.copy() # Remove the unpicklable entries. del state["_plugin_manager"] # The dbt templater doesn't pickle well, but isn't required # within threaded operations. If it was, it could easily be # rehydrated within the thread. state["_configs"]["core"].pop("templater_obj", None) return state def __setstate__(self, state: Dict[str, Any]) -> None: # pragma: no cover # Restore instance attributes self.__dict__.update(state) # NB: We don't reinstate the plugin manager, but this should only # be happening between processes where the plugin manager should # probably be fresh in any case. # NOTE: This means that registering user plugins directly will only # work if those plugins are used in the main process (i.e. templaters). # User registered linting rules either must be "installed" and therefore # available to all processes - or their use is limited to only single # process invocations of sqlfluff. In the event that user registered # rules are used in a multi-process invocation, they will not be applied # in the child processes. # NOTE: Likewise we don't reinstate the "templater_obj" config value # which should also only be used in the main thread rather than child # processes. @classmethod def from_root( cls, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, overrides: Optional[Dict[str, Any]] = None, **kw: Any, ) -> FluffConfig: """Loads a config object just based on the root directory.""" loader = ConfigLoader.get_global() c = loader.load_config_up_to_path( path=".", extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, ) return cls( configs=c, extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, overrides=overrides, **kw, ) @classmethod def from_string( cls, config_string: str, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, overrides: Optional[Dict[str, Any]] = None, plugin_manager: Optional[pluggy.PluginManager] = None, ) -> FluffConfig: """Loads a config object from a single config string.""" loader = ConfigLoader.get_global() c = loader.load_config_string(config_string) return cls( configs=c, extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, overrides=overrides, plugin_manager=plugin_manager, ) @classmethod def from_strings( cls, *config_strings: str, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, overrides: Optional[Dict[str, Any]] = None, plugin_manager: Optional[pluggy.PluginManager] = None, ) -> FluffConfig: """Loads a config object given a series of nested config strings. Config strings are incorporated from first to last, treating the first element as the "root" config, and then later config strings will take precedence over any earlier values. """ loader = ConfigLoader.get_global() config_state: Dict[str, Any] = {} for config_string in config_strings: config_state = loader.load_config_string( config_string, configs=config_state ) return cls( configs=config_state, extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, overrides=overrides, plugin_manager=plugin_manager, ) @classmethod def from_path( cls, path: str, extra_config_path: Optional[str] = None, ignore_local_config: bool = False, overrides: Optional[Dict[str, Any]] = None, plugin_manager: Optional[pluggy.PluginManager] = None, ) -> FluffConfig: """Loads a config object given a particular path.""" loader = ConfigLoader.get_global() c = loader.load_config_up_to_path( path=path, extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, ) return cls( configs=c, extra_config_path=extra_config_path, ignore_local_config=ignore_local_config, overrides=overrides, plugin_manager=plugin_manager, ) @classmethod def from_kwargs( cls, config: Optional[FluffConfig] = None, dialect: Optional[str] = None, rules: Optional[List[str]] = None, exclude_rules: Optional[List[str]] = None, require_dialect: bool = True, ) -> FluffConfig: """Instantiate a config from either an existing config or kwargs. This is a convenience method for the ways that the public classes like Linter(), Parser() and Lexer() can be instantiated with a FluffConfig or with the convenience kwargs: dialect & rules. """ if (dialect or rules) and config: # pragma: no cover raise ValueError( "Cannot specify `config` with `dialect` or `rules`. Any config object " "specifies its own dialect and rules." ) elif config: return config overrides = {} if dialect: overrides["dialect"] = dialect if rules: # Make a comma separated string to pass in as override overrides["rules"] = ",".join(rules) if exclude_rules: # Make a comma separated string to pass in as override overrides["exclude_rules"] = ",".join(exclude_rules) return cls(overrides=overrides, require_dialect=require_dialect) def get_templater( self, templater_name: str = "jinja", **kwargs: Any ) -> "RawTemplater": """Fetch a templater by name.""" templater_lookup: Dict[str, Type["RawTemplater"]] = { templater.name: templater for templater in chain.from_iterable( self._plugin_manager.hook.get_templaters() ) } try: cls = templater_lookup[templater_name] # Instantiate here, optionally with kwargs return cls(**kwargs) except KeyError: if templater_name == "dbt": # pragma: no cover config_logger.warning( "Starting in sqlfluff version 0.7.0 the dbt templater is " "distributed as a separate python package. Please pip install " "sqlfluff-templater-dbt to use it." ) raise SQLFluffUserError( "Requested templater {!r} which is not currently available. Try one of " "{}".format(templater_name, ", ".join(templater_lookup.keys())) ) def make_child_from_path(self, path: str) -> FluffConfig: """Make a child config at a path but pass on overrides and extra_config_path.""" return self.from_path( path, extra_config_path=self._extra_config_path, ignore_local_config=self._ignore_local_config, overrides=self._overrides, plugin_manager=self._plugin_manager, ) def diff_to(self, other: FluffConfig) -> Dict[str, Any]: """Compare this config to another. Args: other (:obj:`FluffConfig`): Another config object to compare against. We will return keys from *this* object that are not in `other` or are different to those in `other`. Returns: A filtered dict of items in this config that are not in the other or are different to the other. """ # We ignore some objects which are not meaningful in the comparison # e.g. dialect_obj, which is generated on the fly. return dict_diff(self._configs, other._configs, ignore=["dialect_obj"]) def get( self, val: str, section: Union[str, Iterable[str]] = "core", default: Any = None ) -> Any: """Get a particular value from the config.""" section_dict = self.get_section(section) if section_dict is None: return default return section_dict.get(val, default) def get_section(self, section: Union[str, Iterable[str]]) -> Any: """Return a whole section of config as a dict. If the element found at the address is a value and not a section, it is still returned and so this can be used as a more advanced from of the basic `get` method. Args: section: An iterable or string. If it's a string we load that root section. If it's an iterable of strings, then we treat it as a path within the dictionary structure. """ if isinstance(section, str): return self._configs.get(section, None) else: # Try iterating buff = self._configs for sec in section: buff = buff.get(sec, None) if buff is None: return None return buff def set_value(self, config_path: Iterable[str], val: Any) -> None: """Set a value at a given path.""" # Make the path a list so we can index on it config_path = list(config_path) # Coerce the value into something more useful. config_val = coerce_value(val) # Sort out core if not there if len(config_path) == 1: # pragma: no cover TODO? config_path = ["core"] + config_path # Current section: dict_buff = [self._configs] for elem in config_path[:-1]: dict_buff.append(dict_buff[-1].get(elem, {})) # Set the value dict_buff[-1][config_path[-1]] = config_val # Rebuild the config for elem in reversed(config_path[:-1]): dict_elem = dict_buff.pop() dict_buff[-1][elem] = dict_elem self._configs = dict_buff[0] def iter_vals( self, cfg: Optional[Dict[str, Any]] = None ) -> Iterable[Tuple[Any, ...]]: """Return an iterable of tuples representing keys. We show values before dicts, the tuple contains an indent value to know what level of the dict we're in. Dict labels will be returned as a blank value before their content. """ cfg = cfg or self._configs # Get keys and sort keys = sorted(cfg.keys()) # First iterate values (alphabetically): for k in keys: if ( not isinstance(cfg[k], dict) and cfg[k] is not None and k not in self.private_vals ): yield (0, k, cfg[k]) # Then iterate dicts (alphabetically (but `core` comes first if it exists)) for k in keys: if isinstance(cfg[k], dict): # First yield the dict label yield (0, k, "") # Then yield its content for idnt, key, val in self.iter_vals(cfg=cfg[k]): yield (idnt + 1, key, val) def process_inline_config(self, config_line: str, fname: str) -> None: """Process an inline config command and update self.""" # Strip preceding comment marks if config_line.startswith("--"): config_line = config_line[2:].strip() # Strip preceding sqlfluff line. if not config_line.startswith("sqlfluff:"): # pragma: no cover config_logger.warning( "Unable to process inline config statement: %r", config_line ) return config_line = config_line[9:].strip() config_val = split_colon_separated_string(config_line) # Validate the value ConfigLoader._validate_configs([config_val], fname) # Set the value self.set_value(*config_val) # If the config is for dialect, initialise the dialect. # NOTE: Comparison with a 1-tuple is intentional here as # the first element of config_val is a tuple. if config_val[0] == ("dialect",): self._initialise_dialect(config_val[1]) def process_raw_file_for_config(self, raw_str: str, fname: str) -> None: """Process a full raw file for inline config and update self.""" # Scan the raw file for config commands. for raw_line in raw_str.splitlines(): # With or without a space. if raw_line.startswith(("-- sqlfluff", "--sqlfluff")): # Found a in-file config command self.process_inline_config(raw_line, fname) class ProgressBarConfiguration: """Singleton-esque progress bar configuration. It's expected to be set during starting with parameters coming from commands parameters, then to be just utilized as just ``` from sqlfluff.core.config import progress_bar_configuration is_progressbar_disabled = progress_bar_configuration.disable_progress_bar ``` """ _disable_progress_bar: Optional[bool] = True @property def disable_progress_bar(self) -> Optional[bool]: # noqa: D102 return self._disable_progress_bar @disable_progress_bar.setter def disable_progress_bar(self, value: Optional[bool]) -> None: """`disable_progress_bar` setter. `True` means that progress bar should be always hidden, `False` fallbacks into `None` which is an automatic mode. From tqdm documentation: 'If set to None, disable on non-TTY.' """ self._disable_progress_bar = value or None progress_bar_configuration = ProgressBarConfiguration() sqlfluff-2.3.5/src/sqlfluff/core/default_config.cfg000066400000000000000000000276401451700765000223540ustar00rootroot00000000000000[sqlfluff] # verbose is an integer (0-2) indicating the level of log output verbose = 0 # Turn off color formatting of output nocolor = False # Supported dialects https://docs.sqlfluff.com/en/stable/dialects.html # Or run 'sqlfluff dialects' dialect = None # One of [raw|jinja|python|placeholder] templater = jinja # Comma separated list of rules to check, default to all rules = all # Comma separated list of rules to exclude, or None exclude_rules = None # Below controls SQLFluff output, see max_line_length for SQL output output_line_length = 80 # Number of passes to run before admitting defeat runaway_limit = 10 # Ignore errors by category (one or more of the following, separated by commas: lexing,linting,parsing,templating) ignore = None # Warn only for rule codes (one of more rule codes, seperated by commas: e.g. LT01,LT02) # Also works for templating and parsing errors by using TMP or PRS warnings = None # Whether to warn about unneeded '-- noqa:' comments. warn_unused_ignores = False # Ignore linting errors found within sections of code coming directly from # templated code (e.g. from within Jinja curly braces. Note that it does not # ignore errors from literal code found within template loops. ignore_templated_areas = True # can either be autodetect or a valid encoding e.g. utf-8, utf-8-sig encoding = autodetect # Ignore inline overrides (e.g. to test if still required) disable_noqa = False # Comma separated list of file extensions to lint # NB: This config will only apply in the root folder sql_file_exts = .sql,.sql.j2,.dml,.ddl # Allow fix to run on files, even if they contain parsing errors # Note altering this is NOT RECOMMENDED as can corrupt SQL fix_even_unparsable = False # Very large files can make the parser effectively hang. # The more efficient check is the _byte_ limit check which # is enabled by default. The previous _character_ limit check # is still present for backward compatibility. This will be # removed in a future version. # Set either to 0 to disable. large_file_skip_char_limit = 0 large_file_skip_byte_limit = 20000 # CPU processes to use while linting. # If positive, just implies number of processes. # If negative or zero, implies number_of_cpus - specified_number. # e.g. -1 means use all processors but one. 0 means all cpus. processes = 1 # Max line length is set by default to be in line with the dbt style guide. # https://github.com/dbt-labs/corp/blob/main/dbt_style_guide.md # Set to zero or negative to disable checks. max_line_length = 80 [sqlfluff:indentation] # See https://docs.sqlfluff.com/en/stable/layout.html#configuring-indent-locations indent_unit = space tab_space_size = 4 indented_joins = False indented_ctes = False indented_using_on = True indented_on_contents = True indented_then = True indented_then_contents = True allow_implicit_indents = False template_blocks_indent = True # This is a comma seperated list of elements to skip # indentation edits to. skip_indentation_in = script_content # If comments are found at the end of long lines, we default to moving # them to the line _before_ their current location as the convention is # that a comment precedes the line it describes. However if you prefer # comments moved _after_, this configuration setting can be set to "after". trailing_comments = before # Layout configuration # See https://docs.sqlfluff.com/en/stable/layout.html#configuring-layout-and-spacing [sqlfluff:layout:type:comma] spacing_before = touch line_position = trailing [sqlfluff:layout:type:binary_operator] spacing_within = touch line_position = leading [sqlfluff:layout:type:statement_terminator] spacing_before = touch line_position = trailing [sqlfluff:layout:type:end_of_file] spacing_before = touch [sqlfluff:layout:type:set_operator] line_position = alone:strict [sqlfluff:layout:type:start_bracket] spacing_after = touch [sqlfluff:layout:type:end_bracket] spacing_before = touch [sqlfluff:layout:type:start_square_bracket] spacing_after = touch [sqlfluff:layout:type:end_square_bracket] spacing_before = touch [sqlfluff:layout:type:start_angle_bracket] spacing_after = touch [sqlfluff:layout:type:end_angle_bracket] spacing_before = touch [sqlfluff:layout:type:casting_operator] spacing_before = touch spacing_after = touch:inline [sqlfluff:layout:type:slice] spacing_before = touch spacing_after = touch [sqlfluff:layout:type:dot] spacing_before = touch spacing_after = touch [sqlfluff:layout:type:comparison_operator] spacing_within = touch line_position = leading [sqlfluff:layout:type:assignment_operator] spacing_within = touch line_position = leading [sqlfluff:layout:type:object_reference] spacing_within = touch:inline [sqlfluff:layout:type:numeric_literal] spacing_within = touch:inline [sqlfluff:layout:type:sign_indicator] spacing_after = touch:inline [sqlfluff:layout:type:tilde] spacing_after = touch:inline [sqlfluff:layout:type:function_name] spacing_within = touch:inline spacing_after = touch:inline [sqlfluff:layout:type:array_type] spacing_within = touch:inline [sqlfluff:layout:type:typed_array_literal] spacing_within = touch [sqlfluff:layout:type:sized_array_type] spacing_within = touch [sqlfluff:layout:type:struct_type] spacing_within = touch:inline [sqlfluff:layout:type:bracketed_arguments] spacing_before = touch:inline [sqlfluff:layout:type:typed_struct_literal] spacing_within = touch [sqlfluff:layout:type:semi_structured_expression] spacing_within = touch:inline spacing_before = touch:inline [sqlfluff:layout:type:array_accessor] spacing_before = touch:inline [sqlfluff:layout:type:colon] spacing_before = touch [sqlfluff:layout:type:colon_delimiter] spacing_before = touch spacing_after = touch [sqlfluff:layout:type:path_segment] spacing_within = touch [sqlfluff:layout:type:sql_conf_option] spacing_within = touch [sqlfluff:layout:type:sqlcmd_operator] # NOTE: This is the spacing between the operator and the colon spacing_before = touch [sqlfluff:layout:type:comment] spacing_before = any spacing_after = any [sqlfluff:layout:type:pattern_expression] # Snowflake pattern expressions shouldn't have their spacing changed. spacing_within = any [sqlfluff:layout:type:placeholder] # Placeholders exist "outside" the rendered SQL syntax # so we shouldn't enforce any particular spacing around # them. spacing_before = any spacing_after = any [sqlfluff:layout:type:common_table_expression] # The definition part of a CTE should fit on one line where possible. # For users which regularly define column names in their CTEs they # may which to relax this config to just `single`. spacing_within = single:inline # By setting a selection of clauses to "alone", we hint to the reflow # algorithm that in the case of a long single line statement, the # first place to add newlines would be around these clauses. # Setting this to "alone:strict" would always _force_ line breaks # around them even if the line isn't too long. [sqlfluff:layout:type:select_clause] line_position = alone [sqlfluff:layout:type:where_clause] line_position = alone [sqlfluff:layout:type:from_clause] line_position = alone [sqlfluff:layout:type:join_clause] line_position = alone [sqlfluff:layout:type:groupby_clause] line_position = alone [sqlfluff:layout:type:orderby_clause] # NOTE: Order by clauses appear in many places other than in a select # clause. To avoid unexpected behaviour we use `leading` in this # case rather than `alone`. line_position = leading [sqlfluff:layout:type:having_clause] line_position = alone [sqlfluff:layout:type:limit_clause] line_position = alone # Template loop tokens shouldn't dictate spacing around them. [sqlfluff:layout:type:template_loop] spacing_before = any spacing_after = any [sqlfluff:templater] unwrap_wrapped_queries = True [sqlfluff:templater:jinja] apply_dbt_builtins = True # Some rules can be configured directly from the config common to other rules [sqlfluff:rules] allow_scalar = True single_table_references = consistent unquoted_identifiers_policy = all [sqlfluff:rules:capitalisation.keywords] # Keywords capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:capitalisation.identifiers] # Unquoted identifiers extended_capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:capitalisation.functions] # Function names extended_capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:capitalisation.literals] # Null & Boolean Literals capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:capitalisation.types] # Data Types extended_capitalisation_policy = consistent # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:ambiguous.join] # Fully qualify JOIN clause fully_qualify_join_types = inner [sqlfluff:rules:ambiguous.column_references] # GROUP BY/ORDER BY column references group_by_and_order_by_style = consistent [sqlfluff:rules:aliasing.table] # Aliasing preference for tables aliasing = explicit [sqlfluff:rules:aliasing.column] # Aliasing preference for columns aliasing = explicit [sqlfluff:rules:aliasing.length] min_alias_length = None max_alias_length = None [sqlfluff:rules:aliasing.forbid] # Avoid table aliases in from clauses and join conditions. # Disabled by default for all dialects unless explicitly enabled. # We suggest instead using aliasing.length (AL06) in most cases. force_enable = False [sqlfluff:rules:convention.select_trailing_comma] # Trailing commas select_clause_trailing_comma = forbid [sqlfluff:rules:convention.count_rows] # Consistent syntax to count all rows prefer_count_1 = False prefer_count_0 = False [sqlfluff:rules:convention.terminator] # Semi-colon formatting approach multiline_newline = False require_final_semicolon = False [sqlfluff:rules:convention.blocked_words] # Comma separated list of blocked words that should not be used blocked_words = None blocked_regex = None match_source = False [sqlfluff:rules:convention.quoted_literals] # Consistent usage of preferred quotes for quoted literals preferred_quoted_literal_style = consistent # Disabled for dialects that do not support single and double quotes for quoted literals (e.g. Postgres) force_enable = False [sqlfluff:rules:convention.casting_style] # SQL type casting preferred_type_casting_style = consistent [sqlfluff:rules:references.from] # References must be in FROM clause # Disabled for some dialects (e.g. bigquery) force_enable = False [sqlfluff:rules:references.qualification] # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:references.consistent] # References must be consistently used # Disabled for some dialects (e.g. bigquery) force_enable = False [sqlfluff:rules:references.keywords] # Keywords should not be used as identifiers. unquoted_identifiers_policy = aliases quoted_identifiers_policy = none # Comma separated list of words to ignore for this rule ignore_words = None ignore_words_regex = None [sqlfluff:rules:references.special_chars] # Special characters in identifiers unquoted_identifiers_policy = all quoted_identifiers_policy = all allow_space_in_identifier = False additional_allowed_characters = None ignore_words = None ignore_words_regex = None [sqlfluff:rules:references.quoting] # Policy on quoted and unquoted identifiers prefer_quoted_identifiers = False prefer_quoted_keywords = False ignore_words = None ignore_words_regex = None force_enable = False [sqlfluff:rules:layout.long_lines] # Line length ignore_comment_lines = False ignore_comment_clauses = False [sqlfluff:rules:layout.select_targets] wildcard_policy = single [sqlfluff:rules:structure.subquery] # By default, allow subqueries in from clauses, but not join clauses forbid_subquery_in = join [sqlfluff:rules:structure.join_condition_order] preferred_first_table_in_join_clause = earlier sqlfluff-2.3.5/src/sqlfluff/core/dialects/000077500000000000000000000000001451700765000205015ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/dialects/__init__.py000066400000000000000000000070471451700765000226220ustar00rootroot00000000000000"""Contains SQL Dialects. Note that individual dialects are only imported as needed at runtime. This avoids circular references. To enable this, any modules outside of .dialects cannot import dialects directly. They should import `dialect_selector` and use that to fetch dialects. Within .dialects, each dialect is free to depend on other dialects as required. Any dependent dialects will be loaded as needed. """ from importlib import import_module from typing import Iterator, NamedTuple # Eventually it would be a good to dynamically discover dialects # from any module beginning with "dialect_" within this folder. from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.errors import SQLFluffUserError _dialect_lookup = { "ansi": ("dialect_ansi", "ansi_dialect"), "athena": ("dialect_athena", "athena_dialect"), "bigquery": ("dialect_bigquery", "bigquery_dialect"), "clickhouse": ("dialect_clickhouse", "clickhouse_dialect"), "databricks": ("dialect_databricks", "databricks_dialect"), "db2": ("dialect_db2", "db2_dialect"), "duckdb": ("dialect_duckdb", "duckdb_dialect"), "exasol": ("dialect_exasol", "exasol_dialect"), "greenplum": ("dialect_greenplum", "greenplum_dialect"), "hive": ("dialect_hive", "hive_dialect"), "materialize": ("dialect_materialize", "materialize_dialect"), "mysql": ("dialect_mysql", "mysql_dialect"), "oracle": ("dialect_oracle", "oracle_dialect"), "postgres": ("dialect_postgres", "postgres_dialect"), "redshift": ("dialect_redshift", "redshift_dialect"), "snowflake": ("dialect_snowflake", "snowflake_dialect"), "soql": ("dialect_soql", "soql_dialect"), "sparksql": ("dialect_sparksql", "sparksql_dialect"), "sqlite": ("dialect_sqlite", "sqlite_dialect"), "teradata": ("dialect_teradata", "teradata_dialect"), "trino": ("dialect_trino", "trino_dialect"), "tsql": ("dialect_tsql", "tsql_dialect"), } _legacy_dialects = { "exasol_fs": ( "As of 0.7.0 the 'exasol_fs' dialect has been combined with " "the 'exasol' dialect, and is no longer a standalone dialect. " "Please use the 'exasol' dialect instead." ), "spark3": ( "The 'spark3' dialect has been renamed to sparksql. " "Please use the 'sparksql' dialect instead." ), } def load_raw_dialect(label: str, base_module: str = "sqlfluff.dialects") -> Dialect: """Dynamically load a dialect.""" if label in _legacy_dialects: raise SQLFluffUserError(_legacy_dialects[label]) elif label not in _dialect_lookup: raise KeyError("Unknown dialect") module_name, name = _dialect_lookup[label] module = import_module(f"{base_module}.{module_name}") result: Dialect = getattr(module, name) result.add_update_segments({k: getattr(module, k) for k in dir(module)}) return result class DialectTuple(NamedTuple): """Dialect Tuple object for describing dialects.""" label: str name: str inherits_from: str def dialect_readout() -> Iterator[DialectTuple]: """Generate a readout of available dialects.""" for dialect_label in sorted(_dialect_lookup): dialect = load_raw_dialect(dialect_label) yield DialectTuple( label=dialect_label, name=dialect.name, inherits_from=dialect.inherits_from or "nothing", ) def dialect_selector(s: str) -> Dialect: """Return a dialect given its name.""" dialect = load_raw_dialect(s) # Expand any callable references at this point. # NOTE: The result of .expand() is a new class. return dialect.expand() sqlfluff-2.3.5/src/sqlfluff/core/dialects/base.py000066400000000000000000000366451451700765000220030ustar00rootroot00000000000000"""Defines the base dialect class.""" import sys from typing import Any, Dict, List, Optional, Set, Type, Union, cast from sqlfluff.core.parser import ( BaseSegment, KeywordSegment, SegmentGenerator, StringParser, ) from sqlfluff.core.parser.grammar.base import BaseGrammar, Nothing from sqlfluff.core.parser.lexer import LexerType from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.types import BracketPairTuple, DialectElementType class Dialect: """Serves as the basis for runtime resolution of Grammar. Args: name (:obj:`str`): The name of the dialect, used for lookup. lexer_matchers (iterable of :obj:`StringLexer`): A structure defining the lexing config for this dialect. """ def __init__( self, name: str, root_segment_name: str, lexer_matchers: Optional[List[LexerType]] = None, library: Optional[Dict[str, DialectElementType]] = None, sets: Optional[Dict[str, Set[Union[str, BracketPairTuple]]]] = None, inherits_from: Optional[str] = None, ) -> None: self._library = library or {} self.name = name self.lexer_matchers = lexer_matchers self.expanded = False self._sets = sets or {} self.inherits_from = inherits_from self.root_segment_name = root_segment_name def __repr__(self) -> str: # pragma: no cover return f"" def expand(self) -> "Dialect": """Expand any callable references to concrete ones. This must be called before using the dialect. But allows more flexible definitions to happen at runtime. NOTE: This method returns a copy of the current dialect so that we don't pollute the original dialect and get dependency issues. Returns: :obj:`Dialect`: a copy of the given dialect but with expanded references. """ # Are we already expanded? if self.expanded: # pragma: no cover raise ValueError("Attempted to re-expand an already expanded dialect.") expanded_copy = self.copy_as(name=self.name) # Expand any callable elements of the dialect. for key in expanded_copy._library: seg_gen = expanded_copy._library[key] if isinstance(seg_gen, SegmentGenerator): # If the element is callable, call it passing the current # dialect and store the result in its place. # Use the .replace() method for its error handling. expanded_copy.replace(**{key: seg_gen.expand(expanded_copy)}) # Expand any keyword sets. for keyword_set in [ "unreserved_keywords", "reserved_keywords", ]: # e.g. reserved_keywords, (JOIN, ...) # Make sure the values are available as KeywordSegments keyword_sets = expanded_copy.sets(keyword_set) for kw in keyword_sets: n = kw.capitalize() + "KeywordSegment" if n not in expanded_copy._library: expanded_copy._library[n] = StringParser(kw.lower(), KeywordSegment) expanded_copy.expanded = True return expanded_copy def sets(self, label: str) -> Set[str]: """Allows access to sets belonging to this dialect. These sets belong to the dialect and are copied for sub dialects. These are used in combination with late-bound dialect objects to create some of the bulk-produced rules. """ assert label not in ( "bracket_pairs", "angle_bracket_pairs", ), f"Use `bracket_sets` to retrieve {label} set." if label not in self._sets: self._sets[label] = set() return cast(Set[str], self._sets[label]) def bracket_sets(self, label: str) -> Set[BracketPairTuple]: """Allows access to bracket sets belonging to this dialect.""" assert label in ( "bracket_pairs", "angle_bracket_pairs", ), "Invalid bracket set. Consider using `sets` instead." if label not in self._sets: self._sets[label] = set() return cast(Set[BracketPairTuple], self._sets[label]) def update_keywords_set_from_multiline_string( self, set_label: str, values: str ) -> None: """Special function to update a keywords set from a multi-line string.""" self.sets(set_label).update( [n.strip().upper() for n in values.strip().split("\n")] ) def copy_as(self, name: str) -> "Dialect": """Copy this dialect and create a new one with a different name. This is the primary method for inheritance, after which, the `replace` method can be used to override particular rules. """ # Are we already expanded? if self.expanded: # pragma: no cover # If we copy an already expanded dialect then any SegmentGenerators # won't respond. This is most likely a mistake. raise ValueError("Attempted to copy an already expanded dialect.") # Copy sets if they are passed, so they can be mutated independently new_sets = {} for label in self._sets: new_sets[label] = self._sets[label].copy() assert self.lexer_matchers return self.__class__( name=name, library=self._library.copy(), lexer_matchers=self.lexer_matchers.copy(), sets=new_sets, inherits_from=self.name, root_segment_name=self.root_segment_name, ) def add(self, **kwargs: DialectElementType) -> None: """Add a segment to the dialect directly. This is the alternative to the decorator route, most useful for segments defined using `make`. Segments are passed in as kwargs. e.g. dialect.add(SomeSegment=StringParser("blah", KeywordSegment)) Note that multiple segments can be added in the same call as this method will iterate through the kwargs """ for n in kwargs: if n in self._library: # pragma: no cover raise ValueError(f"{n!r} is already registered in {self!r}") self._library[n] = kwargs[n] def replace(self, **kwargs: DialectElementType) -> None: """Override a segment on the dialect directly. Usage is very similar to add, but elements specified must already exist. """ for n in kwargs: if n not in self._library: # pragma: no cover raise ValueError(f"{n!r} is not already registered in {self!r}") replacement = kwargs[n] # If trying to replace with same, just skip. if self._library[n] is replacement: continue # Check for replacement with a new but identical class. # This would be a sign of redundant definitions in the dialect. elif self._library[n] == replacement: raise ValueError( f"Attempted unnecessary identical redefinition of {n!r} in {self!r}" ) # pragma: no cover # To replace a segment, the replacement must either be a # subclass of the original, *or* it must have the same # public methods and/or fields as it. # NOTE: Other replacements aren't validated. subclass = False if isinstance(self._library[n], type) and not isinstance( # NOTE: The exception here is we _are_ allowed to replace a # segment with a `Nothing()` grammar, which shows that a segment # has been disabled. replacement, Nothing, ): assert isinstance( replacement, type ), f"Cannot replace {n!r} with {replacement}" old_seg = cast(Type["BaseSegment"], self._library[n]) new_seg = cast(Type["BaseSegment"], replacement) assert issubclass(old_seg, BaseSegment) assert issubclass(new_seg, BaseSegment) subclass = issubclass(new_seg, old_seg) if not subclass: if old_seg.type != new_seg.type: raise ValueError( # pragma: no cover f"Cannot replace {n!r} because 'type' property does not " f"match: {new_seg.type} != {old_seg.type}" ) base_dir = set(dir(self._library[n])) cls_dir = set(dir(new_seg)) missing = set( n for n in base_dir.difference(cls_dir) if not n.startswith("_") ) if missing: raise ValueError( # pragma: no cover f"Cannot replace {n!r} because it's not a subclass and " f"is missing these from base: {', '.join(missing)}" ) self._library[n] = replacement def add_update_segments(self, module_dct: Dict[str, Any]) -> None: """Scans module dictionary, adding or replacing segment definitions.""" for k, v in module_dct.items(): if isinstance(v, type) and issubclass(v, BaseSegment): if k not in self._library: self.add(**{k: v}) else: non_seg_v = cast(Union[Matchable, SegmentGenerator], v) self.replace(**{k: non_seg_v}) def get_grammar(self, name: str) -> BaseGrammar: """Allow access to grammars pre-expansion. This is typically for dialect inheritance. This method also validates that the result is a grammar. """ if name not in self._library: # pragma: no cover raise ValueError(f"Element {name} not found in dialect.") grammar = self._library[name] if not isinstance(grammar, BaseGrammar): # pragma: no cover raise TypeError( f"Attempted to fetch non grammar [{name}] with get_grammar." ) return grammar def get_segment(self, name: str) -> Type["BaseSegment"]: """Allow access to segments pre-expansion. This is typically for dialect inheritance. This method also validates that the result is a segment. """ if name not in self._library: # pragma: no cover raise ValueError(f"Element {name} not found in dialect.") segment = cast(Type["BaseSegment"], self._library[name]) if issubclass(segment, BaseSegment): return segment else: # pragma: no cover raise TypeError( f"Attempted to fetch non segment [{name}] " f"with get_segment - type{type(segment)}" ) def ref(self, name: str) -> Matchable: """Return an object which acts as a late binding reference to the element named. NB: This requires the dialect to be expanded, and only returns Matchables as a result. """ if not self.expanded: # pragma: no cover raise RuntimeError("Dialect must be expanded before use.") if name in self._library: res = self._library[name] if res: assert not isinstance(res, SegmentGenerator) return res else: # pragma: no cover raise ValueError( "Unexpected Null response while fetching {!r} from {}".format( name, self.name ) ) elif name.endswith("KeywordSegment"): # pragma: no cover keyword = name[0:-14] keyword_tip = ( "\n\nThe syntax in the query is not (yet?) supported. Try to" " narrow down your query to a minimal, reproducible case and" " raise an issue on GitHub.\n\n" "Or, even better, see this guide on how to help contribute" " keyword and/or dialect updates:\n" "https://github.com/sqlfluff/sqlfluff/wiki/Contributing-Dialect-Changes#keywords" # noqa E501 ) # Keyword errors are common so avoid printing the whole, scary, # traceback as not that useful and confusing to people. sys.tracebacklimit = 0 raise RuntimeError( ( "Grammar refers to the " "{!r} keyword which was not found in the {} dialect.{}".format( keyword, self.name, keyword_tip ) ) ) else: # pragma: no cover raise RuntimeError( ( "Grammar refers to " "{!r} which was not found in the {} dialect.".format( name, self.name ) ) ) def set_lexer_matchers(self, lexer_matchers: List[LexerType]) -> None: """Set the lexer struct for the dialect. This is what is used for base dialects. For derived dialects (which don't exist yet) the assumption is that we'll introduce some kind of *patch* function which could be used to mutate an existing `lexer_matchers`. """ self.lexer_matchers = lexer_matchers def get_lexer_matchers(self) -> List[LexerType]: """Fetch the lexer struct for this dialect.""" if self.lexer_matchers: return self.lexer_matchers else: # pragma: no cover raise ValueError(f"Lexing struct has not been set for dialect {self}") def patch_lexer_matchers(self, lexer_patch: List[LexerType]) -> None: """Patch an existing lexer struct. Used to edit the lexer of a sub-dialect. """ buff = [] if not self.lexer_matchers: # pragma: no cover raise ValueError("Lexer struct must be defined before it can be patched!") # Make a new data struct for lookups patch_dict = {elem.name: elem for elem in lexer_patch} for elem in self.lexer_matchers: if elem.name in patch_dict: buff.append(patch_dict[elem.name]) else: buff.append(elem) # Overwrite with the buffer once we're done self.lexer_matchers = buff def insert_lexer_matchers(self, lexer_patch: List[LexerType], before: str) -> None: """Insert new records into an existing lexer struct. Used to edit the lexer of a sub-dialect. The patch is inserted *before* whichever element is named in `before`. """ buff = [] found = False if not self.lexer_matchers: # pragma: no cover raise ValueError("Lexer struct must be defined before it can be patched!") for elem in self.lexer_matchers: if elem.name == before: found = True for patch in lexer_patch: buff.append(patch) buff.append(elem) else: buff.append(elem) if not found: # pragma: no cover raise ValueError( f"Lexer struct insert before '{before}' failed because tag never found." ) # Overwrite with the buffer once we're done self.lexer_matchers = buff def get_root_segment(self) -> Union[Type[BaseSegment], Matchable]: """Get the root segment of the dialect.""" return self.ref(self.root_segment_name) sqlfluff-2.3.5/src/sqlfluff/core/dialects/common.py000066400000000000000000000012311451700765000223400ustar00rootroot00000000000000"""Common classes for dialects to use.""" from typing import List, NamedTuple, Optional from sqlfluff.core.parser import BaseSegment class AliasInfo(NamedTuple): """Details about a table alias.""" ref_str: str # Name given to the alias segment: Optional[BaseSegment] # Identifier segment containing the name aliased: bool from_expression_element: BaseSegment alias_expression: Optional[BaseSegment] object_reference: Optional[BaseSegment] class ColumnAliasInfo(NamedTuple): """Details about a column alias.""" alias_identifier_name: str aliased_segment: BaseSegment column_reference_segments: List[BaseSegment] sqlfluff-2.3.5/src/sqlfluff/core/enums.py000066400000000000000000000010301451700765000204040ustar00rootroot00000000000000"""Enums used by sqlfluff.""" from enum import Enum from colorama import Fore, Style class FormatType(Enum): """Enums for formatting types.""" human = "human" json = "json" yaml = "yaml" github_annotation = "github-annotation" github_annotation_native = "github-annotation-native" none = "none" # An option to return _no output_. class Color(Enum): """Colors used by `colorize` function.""" red = Fore.RED green = Fore.GREEN blue = Fore.BLUE lightgrey = Fore.BLACK + Style.BRIGHT sqlfluff-2.3.5/src/sqlfluff/core/errors.py000066400000000000000000000233761451700765000206120ustar00rootroot00000000000000"""Errors - these are closely linked to what used to be called violations. NOTE: The BaseException class, which ValueError inherits from, defines a custom __reduce__() method for picking and unpickling exceptions. For the SQLBaseError, and it's dependent classes, we define properties of these exceptions which don't work well with that method, which is why we redefine __reduce__() on each of these classes. Given the circumstances in which they are called, they don't show up on coverage tracking. https://stackoverflow.com/questions/49715881/how-to-pickle-inherited-exceptions """ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, Union if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser import BaseSegment, PositionMarker from sqlfluff.core.rules import BaseRule, LintFix CheckTuple = Tuple[str, int, int] class SQLBaseError(ValueError): """Base Error Class for all violations.""" _code: Optional[str] = None _identifier = "base" _warning = False # The default value for `warning` def __init__( self, description: Optional[str] = None, pos: Optional["PositionMarker"] = None, line_no: int = 0, line_pos: int = 0, ignore: bool = False, fatal: bool = False, warning: Optional[bool] = None, ) -> None: self.fatal = fatal self.ignore = ignore self.warning: bool = warning if warning is not None else self._warning self.description = description if pos: self.line_no, self.line_pos = pos.source_position() else: self.line_no = line_no self.line_pos = line_pos super().__init__(self.desc()) def __eq__(self, other) -> bool: """Errors compare equal if they are the same type and same content.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __reduce__( self, ) -> Tuple[Type["SQLBaseError"], Tuple[Any, ...]]: """Prepare the SQLBaseError for pickling.""" return type(self), ( self.description, None, self.line_no, self.line_pos, self.ignore, self.fatal, self.warning, ) @property def fixable(self) -> bool: """Should this error be considered fixable?""" return False def rule_code(self) -> str: """Fetch the code of the rule which cause this error.""" return self._code or "????" def desc(self) -> str: """Fetch a description of this violation.""" if self.description: return self.description return self.__class__.__name__ # pragma: no cover def get_info_dict(self) -> Dict[str, Union[str, int]]: """Return a dict of properties. This is useful in the API for outputting violations. """ return { "line_no": self.line_no, "line_pos": self.line_pos, "code": self.rule_code(), "description": self.desc(), "name": getattr(self, "rule").name if hasattr(self, "rule") else "", } def check_tuple(self) -> CheckTuple: """Get a tuple representing this error. Mostly for testing.""" return ( self.rule_code(), self.line_no, self.line_pos, ) def source_signature(self) -> Tuple[Any, ...]: """Return hashable source signature for deduplication.""" return (self.check_tuple(), self.desc()) def ignore_if_in(self, ignore_iterable: List[str]) -> None: """Ignore this violation if it matches the iterable.""" if self._identifier in ignore_iterable: self.ignore = True def warning_if_in(self, warning_iterable: List[str]) -> None: """Warning only for this violation if it matches the iterable. Designed for rule codes so works with L001, LL0X but also TMP or PRS for templating and parsing errors. Args: warning_iterable (List[str]): A list of strings representing the warning codes to check. Returns: None """ if self.rule_code() in warning_iterable: self.warning = True class SQLTemplaterError(SQLBaseError): """An error which occurred during templating. Args: pos (:obj:`PosMarker`, optional): The position which the error occurred at. """ _code = "TMP" _identifier = "templating" class SQLFluffSkipFile(RuntimeError): """An error returned from a templater to skip a file.""" pass class SQLLexError(SQLBaseError): """An error which occurred during lexing. Args: pos (:obj:`PosMarker`, optional): The position which the error occurred at. """ _code = "LXR" _identifier = "lexing" class SQLParseError(SQLBaseError): """An error which occurred during parsing. Args: segment (:obj:`BaseSegment`, optional): The segment which is relevant for the failure in parsing. This is likely to be a subclass of `BaseSegment` rather than the parent class itself. This is mostly used for logging and for referencing position. """ _code = "PRS" _identifier = "parsing" def __init__( self, description: Optional[str] = None, segment: Optional["BaseSegment"] = None, line_no: int = 0, line_pos: int = 0, ignore: bool = False, fatal: bool = False, warning: Optional[bool] = None, ) -> None: # Store the segment on creation - we might need it later self.segment = segment super().__init__( description=description, pos=segment.pos_marker if segment else None, line_no=line_no, line_pos=line_pos, ignore=ignore, fatal=fatal, warning=warning, ) def __reduce__( self, ) -> Tuple[Type["SQLParseError"], Tuple[Any, ...]]: """Prepare the SQLParseError for pickling.""" return type(self), ( self.description, self.segment, self.line_no, self.line_pos, self.ignore, self.fatal, self.warning, ) class SQLLintError(SQLBaseError): """An error which occurred during linting. In particular we reference the rule here to do extended logging based on the rule in question which caused the fail. Args: segment (:obj:`BaseSegment`, optional): The segment which is relevant for the failure in parsing. This is likely to be a subclass of `BaseSegment` rather than the parent class itself. This is mostly used for logging and for referencing position. """ _identifier = "linting" def __init__( self, description: str, segment: "BaseSegment", rule: "BaseRule", fixes: Optional[List["LintFix"]] = None, ignore: bool = False, fatal: bool = False, warning: Optional[bool] = None, ) -> None: self.segment = segment self.rule = rule self.fixes = fixes or [] super().__init__( description=description, pos=segment.pos_marker if segment else None, ignore=ignore, fatal=fatal, warning=warning, ) def __reduce__( self, ) -> Tuple[Type["SQLLintError"], Tuple[Any, ...]]: """Prepare the SQLLintError for pickling.""" return type(self), ( self.description, self.segment, self.rule, self.fixes, self.ignore, self.fatal, self.warning, ) @property def fixable(self) -> bool: """Should this error be considered fixable?""" if self.fixes: return True return False def rule_code(self) -> str: """Fetch the code of the rule which cause this error.""" return self.rule.code def source_signature(self) -> Tuple[Any, ...]: """Return hashable source signature for deduplication. For linting errors we need to dedupe on more than just location and description, we also need to check the edits potentially made, both in the templated file but also in the source. """ fix_raws = tuple( tuple(e.raw for e in f.edit) if f.edit else None for f in self.fixes ) _source_fixes: List[Tuple[str, int, int]] = [] for fix in self.fixes: if not fix.edit: continue for edit in fix.edit: for source_edit in edit.source_fixes: # NOTE: It's important that we don't dedupe on the # templated slice for the source fix, because that will # be different for different locations in any loop. _source_fixes.append( ( source_edit.edit, source_edit.source_slice.start, source_edit.source_slice.stop, ) ) return (self.check_tuple(), self.description, fix_raws, tuple(_source_fixes)) def __repr__(self) -> str: return "".format( self.rule_code(), (self.line_no, self.line_pos), len(self.fixes), self.description, ) class SQLUnusedNoQaWarning(SQLBaseError): """A warning about an unused noqa directive.""" _code = "NOQA" _identifier = "noqa" _warning = True class SQLFluffUserError(ValueError): """An error which should be fed back to the user.""" sqlfluff-2.3.5/src/sqlfluff/core/file_helpers.py000066400000000000000000000007271451700765000217320ustar00rootroot00000000000000"""File Helpers for the parser module.""" import chardet from sqlfluff.core.config import FluffConfig def get_encoding(fname: str, config: FluffConfig) -> str: """Get the encoding of the file (autodetect).""" encoding_config: str = config.get("encoding", default="autodetect") if encoding_config == "autodetect": with open(fname, "rb") as f: data = f.read() return chardet.detect(data)["encoding"] return encoding_config sqlfluff-2.3.5/src/sqlfluff/core/helpers/000077500000000000000000000000001451700765000203535ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/helpers/__init__.py000066400000000000000000000005571451700765000224730ustar00rootroot00000000000000"""Helper methods for other classes. This module should have no dependencies on other parts of the SQLFluff package and each should also be free of dependencies on each other. This is to ensure each of these methods can be used anywhere within the project without fear of dependency issues. Methods are organised by the datatype they are designed to help with. """ sqlfluff-2.3.5/src/sqlfluff/core/helpers/dict.py000066400000000000000000000065031451700765000216540ustar00rootroot00000000000000"""Dict helpers, mostly used in config routines.""" from typing import Any, Dict, List, Optional def nested_combine(*dicts: Dict[str, Any]) -> Dict[str, Any]: """Combine an iterable of dictionaries. Each dictionary is combined into a result dictionary. For each key in the first dictionary, it will be overwritten by any same-named key in any later dictionaries in the iterable. If the element at that key is a dictionary, rather than just overwriting we use the same function to combine those dictionaries. Args: *dicts: An iterable of dictionaries to be combined. Returns: `dict`: A combined dictionary from the input dictionaries. A simple example: >>> nested_combine({"a": {"b": "c"}}, {"a": {"d": "e"}}) {'a': {'b': 'c', 'd': 'e'}} Keys overwrite left to right: >>> nested_combine({"a": {"b": "c"}}, {"a": {"b": "e"}}) {'a': {'b': 'e'}} """ r: Dict[str, Any] = {} for d in dicts: for k in d: if k in r and isinstance(r[k], dict): if isinstance(d[k], dict): r[k] = nested_combine(r[k], d[k]) else: # pragma: no cover raise ValueError( "Key {!r} is a dict in one config but not another! PANIC: " "{!r}".format(k, d[k]) ) else: r[k] = d[k] return r def dict_diff( left: Dict[str, Any], right: Dict[str, Any], ignore: Optional[List[str]] = None ) -> Dict[str, Any]: """Work out the difference between two dictionaries. Returns a dictionary which represents elements in the `left` dictionary which aren't in the `right` or are different to those in the `right`. If the element is a dictionary, we recursively look for differences in those dictionaries, likewise only returning the differing elements. NOTE: If an element is in the `right` but not in the `left` at all (i.e. an element has been *removed*) then it will not show up in the comparison. Args: left (:obj:`dict`): The object containing the *new* elements which will be compared against the other. right (:obj:`dict`): The object to compare against. ignore (:obj:`list` of `str`, optional): Keys to ignore. Returns: `dict`: A dictionary representing the difference. Basic functionality shown, especially returning the left as: >>> dict_diff({"a": "b", "c": "d"}, {"a": "b", "c": "e"}) {'c': 'd'} Ignoring works on a key basis: >>> dict_diff({"a": "b"}, {"a": "c"}) {'a': 'b'} >>> dict_diff({"a": "b"}, {"a": "c"}, ["a"]) {} """ buff: Dict[str, Any] = {} for k in left: if ignore and k in ignore: continue # Is the key there at all? if k not in right: buff[k] = left[k] # Is the content the same? elif left[k] == right[k]: continue # If it's not the same but both are dicts, then compare elif isinstance(left[k], dict) and isinstance(right[k], dict): diff = dict_diff(left[k], right[k], ignore=ignore) # Only include the difference if non-null. if diff: buff[k] = diff # It's just different else: buff[k] = left[k] return buff sqlfluff-2.3.5/src/sqlfluff/core/helpers/slice.py000066400000000000000000000025771451700765000220370ustar00rootroot00000000000000"""Helpers for handling slices.""" from typing import Tuple def to_tuple(s: slice) -> Tuple[int, int]: """Convert a slice into a tuple of (start, stop).""" assert s.start is not None and s.stop is not None return (s.start, s.stop) def slice_length(s: slice) -> int: """Get the length of a slice.""" length: int = s.stop - s.start return length def is_zero_slice(s: slice) -> bool: """Return true if this is a zero slice.""" is_zero: bool = s.stop == s.start return is_zero def zero_slice(i: int) -> slice: """Construct a zero slice from a single integer.""" return slice(i, i) def offset_slice(start: int, offset: int) -> slice: """Construct a slice from a start and offset.""" return slice(start, start + offset) def slice_overlaps(s1: slice, s2: slice) -> bool: """Check whether two slices overlap. NOTE: This is designed only for use with *closed* and *positive* slices. """ assert s1.start is not None, f"{s1} is not closed" assert s1.stop is not None, f"{s1} is not closed" assert s2.start is not None, f"{s2} is not closed" assert s2.stop is not None, f"{s2} is not closed" assert s1.start <= s1.stop, f"{s1} is not positive" assert s2.start <= s2.stop, f"{s2} is not positive" if s2.start >= s1.stop: return False if s1.start >= s2.stop: return False return True sqlfluff-2.3.5/src/sqlfluff/core/helpers/string.py000066400000000000000000000035731451700765000222430ustar00rootroot00000000000000"""String Helpers for the parser module.""" from typing import Iterator, List, Tuple, Union def curtail_string(s: str, length: int = 20) -> str: """Trim a string nicely to length.""" if len(s) > length: return s[:length] + "..." else: return s def findall(substr: str, in_str: str) -> Iterator[int]: """Yields all the positions sbstr within in_str. https://stackoverflow.com/questions/4664850/how-to-find-all-occurrences-of-a-substring """ # Return nothing if one of the inputs is trivial if not substr or not in_str: return idx = in_str.find(substr) while idx != -1: yield idx idx = in_str.find(substr, idx + 1) def split_colon_separated_string(in_str: str) -> Tuple[Tuple[str, ...], str]: """Converts a colon separated string. NOTE: This also includes some provisions for values which may be Windows paths containing colons and NOT stripping those. """ config_path: List[str] = [] for element in in_str.split(":"): # If the next element begins with a backslash, and the previous # one had length == 1, then this is probably a windows path. # In which case, rejoin them together. element = element.strip() if ( element and element[0] == "\\" and config_path[-1] and len(config_path[-1]) == 1 ): config_path[-1] = config_path[-1] + ":" + element else: # Otherwise just add it to the path. config_path.append(element) return tuple(config_path[:-1]), config_path[-1] def split_comma_separated_string(raw: Union[str, List[str]]) -> List[str]: """Converts comma separated string to List, stripping whitespace.""" if isinstance(raw, str): return [s.strip() for s in raw.split(",") if s.strip()] assert isinstance(raw, list) return raw sqlfluff-2.3.5/src/sqlfluff/core/linter/000077500000000000000000000000001451700765000202065ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/linter/__init__.py000066400000000000000000000006301451700765000223160ustar00rootroot00000000000000"""Linter class and helper classes.""" from sqlfluff.core.linter.common import ParsedString, RenderedFile, RuleTuple from sqlfluff.core.linter.linted_file import LintedFile from sqlfluff.core.linter.linter import Linter from sqlfluff.core.linter.linting_result import LintingResult __all__ = ( "RuleTuple", "ParsedString", "LintedFile", "LintingResult", "Linter", "RenderedFile", ) sqlfluff-2.3.5/src/sqlfluff/core/linter/common.py000066400000000000000000000034751451700765000220610ustar00rootroot00000000000000"""Defines small container classes to hold intermediate results during linting.""" from typing import Any, Dict, List, NamedTuple, Optional, Tuple from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLBaseError, SQLTemplaterError from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.templaters import TemplatedFile class RuleTuple(NamedTuple): """Rule Tuple object for describing rules.""" code: str name: str description: str groups: Tuple[str, ...] aliases: Tuple[str, ...] class RenderedFile(NamedTuple): """An object to store the result of a templated file/string. This is notable as it's the intermediate state between what happens in the main process and the child processes when running in parallel mode. """ templated_file: TemplatedFile templater_violations: List[SQLTemplaterError] config: FluffConfig time_dict: Dict[str, float] fname: str encoding: str source_str: str class ParsedString(NamedTuple): """An object to store the result of parsing a string. Args: `parsed` is a segment structure representing the parsed file. If parsing fails due to an unrecoverable violation then we will return None. `violations` is a :obj:`list` of violations so far, which will either be templating, lexing or parsing violations at this stage. `time_dict` is a :obj:`dict` containing timings for how long each step took in the process. `templated_file` is a :obj:`TemplatedFile` containing the details of the templated file. """ tree: Optional[BaseSegment] violations: List[SQLBaseError] time_dict: Dict[str, Any] templated_file: TemplatedFile config: FluffConfig fname: str source_str: str sqlfluff-2.3.5/src/sqlfluff/core/linter/linted_dir.py000066400000000000000000000102601451700765000226740ustar00rootroot00000000000000"""Defines the LintedDir class. This stores the idea of a collection of linted files at a single start path """ from typing import Any, Dict, List, Optional, Union, overload from typing_extensions import Literal from sqlfluff.core.errors import CheckTuple from sqlfluff.core.linter.linted_file import LintedFile from sqlfluff.core.parser.segments.base import BaseSegment class LintedDir: """A class to store the idea of a collection of linted files at a single start path. A LintedDir may contain files in subdirectories, but they all share a common root. """ def __init__(self, path: str) -> None: self.files: List[LintedFile] = [] self.path: str = path def add(self, file: LintedFile) -> None: """Add a file to this path.""" self.files.append(file) @overload def check_tuples(self, by_path: Literal[False]) -> List[CheckTuple]: """Return a List of CheckTuples when by_path is False.""" @overload def check_tuples(self, by_path: Literal[True]) -> Dict[str, List[CheckTuple]]: """Return a Dict of paths and CheckTuples when by_path is True.""" @overload def check_tuples(self, by_path: bool = False): """Default overload method.""" def check_tuples( self, by_path=False, raise_on_non_linting_violations=True ) -> Union[List[CheckTuple], Dict[str, List[CheckTuple]]]: """Compress all the tuples into one list. NB: This is a little crude, as you can't tell which file the violations are from. Good for testing though. For more control set the `by_path` argument to true. """ if by_path: return { file.path: file.check_tuples( raise_on_non_linting_violations=raise_on_non_linting_violations ) for file in self.files } else: tuple_buffer: List[CheckTuple] = [] for file in self.files: tuple_buffer += file.check_tuples( raise_on_non_linting_violations=raise_on_non_linting_violations ) return tuple_buffer def num_violations(self, **kwargs) -> int: """Count the number of violations in the path.""" return sum(file.num_violations(**kwargs) for file in self.files) def get_violations(self, **kwargs) -> list: """Return a list of violations in the path.""" buff: list = [] for file in self.files: buff += file.get_violations(**kwargs) return buff def violation_dict(self, **kwargs) -> Dict[str, list]: """Return a dict of violations by file path.""" return {file.path: file.get_violations(**kwargs) for file in self.files} def stats(self) -> Dict[str, int]: """Return a dict containing linting stats about this path.""" return dict( files=len(self.files), clean=sum(file.is_clean() for file in self.files), unclean=sum(not file.is_clean() for file in self.files), violations=sum(file.num_violations() for file in self.files), ) def persist_changes( self, formatter: Any = None, fixed_file_suffix: str = "" ) -> Dict[str, Union[bool, str]]: """Persist changes to files in the given path. This also logs the output as we go using the formatter if present. """ # Run all the fixes for all the files and return a dict buffer: Dict[str, Union[bool, str]] = {} for file in self.files: buffer[file.path] = file.persist_tree( suffix=fixed_file_suffix, formatter=formatter ) return buffer @property def tree(self) -> Optional[BaseSegment]: """A convenience method for when there is only one file and we want the tree.""" if len(self.files) > 1: # pragma: no cover raise ValueError( ".tree() cannot be called when a LintedDir contains more than one file." ) elif not self.files: # pragma: no cover raise ValueError( "LintedDir has no parsed files. There is probably a parsing error." ) return self.files[0].tree sqlfluff-2.3.5/src/sqlfluff/core/linter/linted_file.py000066400000000000000000000540671451700765000230520ustar00rootroot00000000000000"""Defines the LintedFile class. This holds linting results for a single file, and also contains all of the routines to apply fixes to that file post linting. """ import logging import os import shutil import stat import tempfile from collections import defaultdict from dataclasses import dataclass from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Tuple, Type, Union from sqlfluff.core.errors import ( CheckTuple, SQLBaseError, SQLLintError, SQLParseError, SQLTemplaterError, ) # Classes needed only for type checking from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.core.rules.fix import FixPatch, iter_patches from sqlfluff.core.rules.noqa import IgnoreMask from sqlfluff.core.templaters import RawFileSlice, TemplatedFile # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") TMP_PRS_ERROR_TYPES = (SQLTemplaterError, SQLParseError) @dataclass class FileTimings: """A dataclass for holding the timings information for a file.""" step_timings: Dict[str, float] # NOTE: Because rules may run more than once for any # given file we record each run and then we can post # process this as we wish later. rule_timings: List[Tuple[str, str, float]] def __repr__(self): # pragma: no cover return "" def get_rule_timing_dict(self) -> Dict[str, float]: """Generate a summary to total time in each rule. This is primarily for csv export. """ total_times: Dict[str, float] = defaultdict(float) for code, _, time in self.rule_timings: total_times[code] += time # Return as plain dict return dict(total_times.items()) class LintedFile(NamedTuple): """A class to store the idea of a linted file.""" path: str violations: List[SQLBaseError] timings: Optional[FileTimings] tree: Optional[BaseSegment] ignore_mask: Optional[IgnoreMask] templated_file: TemplatedFile encoding: str def check_tuples( self, raise_on_non_linting_violations: bool = True ) -> List[CheckTuple]: """Make a list of check_tuples. This assumes that all the violations found are linting violations. If they don't then this function raises that error. """ vs: List[CheckTuple] = [] v: SQLLintError for v in self.get_violations(): if isinstance(v, SQLLintError): vs.append(v.check_tuple()) elif raise_on_non_linting_violations: raise v return vs @staticmethod def deduplicate_in_source_space( violations: List[SQLBaseError], ) -> List[SQLBaseError]: """Removes duplicates in the source space. This is useful for templated files with loops, where we'll get a violation for each pass around the loop, but the user only cares about it once and we're only going to fix it once. By filtering them early we get a more a more helpful CLI output *and* and more efficient fixing routine (by handling fewer fixes). """ new_violations = [] dedupe_buffer = set() for v in violations: signature = v.source_signature() if signature not in dedupe_buffer: new_violations.append(v) dedupe_buffer.add(signature) else: linter_logger.debug("Removing duplicate source violation: %r", v) return new_violations def get_violations( self, rules: Optional[Union[str, Tuple[str, ...]]] = None, types: Optional[Union[Type[SQLBaseError], Iterable[Type[SQLBaseError]]]] = None, filter_ignore: bool = True, filter_warning: bool = True, warn_unused_ignores: bool = False, fixable: Optional[bool] = None, ) -> list: """Get a list of violations, respecting filters and ignore options. Optionally now with filters. """ violations = self.violations # Filter types if types: # If it's a singular type, make it a single item in a tuple # otherwise coerce to tuple normally so that we can use it with # isinstance. if isinstance(types, type) and issubclass(types, SQLBaseError): types = (types,) else: types = tuple(types) # pragma: no cover TODO? violations = [v for v in violations if isinstance(v, types)] # Filter rules if rules: if isinstance(rules, str): rules = (rules,) else: rules = tuple(rules) violations = [v for v in violations if v.rule_code() in rules] # Filter fixable if fixable is not None: # Assume that fixable is true or false if not None. # Fatal errors should always come through, regardless. violations = [v for v in violations if v.fixable is fixable or v.fatal] # Filter ignorable violations if filter_ignore: violations = [v for v in violations if not v.ignore] # Ignore any rules in the ignore mask if self.ignore_mask: violations = self.ignore_mask.ignore_masked_violations(violations) # Filter warning violations if filter_warning: violations = [v for v in violations if not v.warning] # Add warnings for unneeded noqa if applicable if warn_unused_ignores and not filter_warning and self.ignore_mask: violations += self.ignore_mask.generate_warnings_for_unused() return violations def num_violations(self, **kwargs) -> int: """Count the number of violations. Optionally now with filters. """ violations = self.get_violations(**kwargs) return len(violations) def is_clean(self) -> bool: """Return True if there are no ignorable violations.""" return not any(self.get_violations(filter_ignore=True)) @staticmethod def _log_hints(patch: FixPatch, templated_file: TemplatedFile): """Log hints for debugging during patch generation.""" # This next bit is ALL FOR LOGGING AND DEBUGGING max_log_length = 10 if patch.templated_slice.start >= max_log_length: pre_hint = templated_file.templated_str[ patch.templated_slice.start - max_log_length : patch.templated_slice.start ] else: pre_hint = templated_file.templated_str[: patch.templated_slice.start] if patch.templated_slice.stop + max_log_length < len( templated_file.templated_str ): post_hint = templated_file.templated_str[ patch.templated_slice.stop : patch.templated_slice.stop + max_log_length ] else: post_hint = templated_file.templated_str[patch.templated_slice.stop :] linter_logger.debug( " Templated Hint: ...%r <> %r...", pre_hint, post_hint ) def fix_string(self) -> Tuple[str, bool]: """Obtain the changes to a path as a string. We use the source mapping features of TemplatedFile to generate a list of "patches" which cover the non templated parts of the file and refer back to the locations in the original file. NB: This is MUCH FASTER than the original approach using difflib in pre 0.4.0. There is an important distinction here between Slices and Segments. A Slice is a portion of a file which is determined by the templater based on which portions of the source file are templated or not, and therefore before Lexing and so is completely dialect agnostic. A Segment is determined by the Lexer from portions of strings after templating. """ linter_logger.debug("Original Tree: %r", self.templated_file.templated_str) assert self.tree linter_logger.debug("Fixed Tree: %r", self.tree.raw) # The sliced file is contiguous in the TEMPLATED space. # NB: It has gaps and repeats in the source space. # It's also not the FIXED file either. linter_logger.debug("### Templated File.") for idx, file_slice in enumerate(self.templated_file.sliced_file): t_str = self.templated_file.templated_str[file_slice.templated_slice] s_str = self.templated_file.source_str[file_slice.source_slice] if t_str == s_str: linter_logger.debug( " File slice: %s %r [invariant]", idx, file_slice ) else: linter_logger.debug(" File slice: %s %r", idx, file_slice) linter_logger.debug(" \t\t\ttemplated: %r\tsource: %r", t_str, s_str) original_source = self.templated_file.source_str # Generate patches from the fixed tree. In the process we sort # and deduplicate them so that the resultant list is in the # the right order for the source file without any duplicates. filtered_source_patches = self._generate_source_patches( self.tree, self.templated_file ) linter_logger.debug("Filtered source patches:") for idx, patch in enumerate(filtered_source_patches): linter_logger.debug(" %s: %s", idx, patch) # Any Template tags in the source file are off limits, unless # we're explicitly fixing the source file. source_only_slices = self.templated_file.source_only_slices() linter_logger.debug("Source-only slices: %s", source_only_slices) # We now slice up the file using the patches and any source only slices. # This gives us regions to apply changes to. slice_buff = self._slice_source_file_using_patches( filtered_source_patches, source_only_slices, self.templated_file.source_str ) linter_logger.debug("Final slice buffer: %s", slice_buff) # Iterate through the patches, building up the new string. fixed_source_string = self._build_up_fixed_source_string( slice_buff, filtered_source_patches, self.templated_file.source_str ) # The success metric here is whether anything ACTUALLY changed. return fixed_source_string, fixed_source_string != original_source @classmethod def _generate_source_patches( cls, tree: BaseSegment, templated_file: TemplatedFile ) -> List[FixPatch]: """Use the fixed tree to generate source patches. Importantly here we deduplicate and sort the patches from their position in the templated file into their intended order in the source file. """ # Iterate patches, filtering and translating as we go: linter_logger.debug("### Beginning Patch Iteration.") filtered_source_patches = [] dedupe_buffer = [] # We use enumerate so that we get an index for each patch. This is entirely # so when debugging logs we can find a given patch again! for idx, patch in enumerate(iter_patches(tree, templated_file=templated_file)): linter_logger.debug(" %s Yielded patch: %s", idx, patch) cls._log_hints(patch, templated_file) # Check for duplicates if patch.dedupe_tuple() in dedupe_buffer: linter_logger.info( " - Skipping. Source space Duplicate: %s", patch.dedupe_tuple(), ) continue # We now evaluate patches in the source-space for whether they overlap # or disrupt any templated sections. # The intent here is that unless explicitly stated, a fix should never # disrupt a templated section. # NOTE: We rely here on the patches being generated in order. # TODO: Implement a mechanism for doing templated section fixes. Given # these patches are currently generated from fixed segments, there will # likely need to be an entirely different mechanism # Get the affected raw slices. local_raw_slices = templated_file.raw_slices_spanning_source_slice( patch.source_slice ) local_type_list = [slc.slice_type for slc in local_raw_slices] # Deal with the easy cases of 1) New code at end 2) only literals if not local_type_list or set(local_type_list) == {"literal"}: linter_logger.info( " * Keeping patch on new or literal-only section.", ) filtered_source_patches.append(patch) dedupe_buffer.append(patch.dedupe_tuple()) # Handle the easy case of an explicit source fix elif patch.patch_category == "source": linter_logger.info( " * Keeping explicit source fix patch.", ) filtered_source_patches.append(patch) dedupe_buffer.append(patch.dedupe_tuple()) # Is it a zero length patch. elif ( patch.source_slice.start == patch.source_slice.stop and patch.source_slice.start == local_raw_slices[0].source_idx ): linter_logger.info( " * Keeping insertion patch on slice boundary.", ) filtered_source_patches.append(patch) dedupe_buffer.append(patch.dedupe_tuple()) else: # pragma: no cover # We've got a situation where the ends of our patch need to be # more carefully mapped. This used to happen with greedy template # element matching, but should now never happen. In the event that # it does, we'll warn but carry on. linter_logger.warning( "Skipping edit patch on uncertain templated section [%s], " "Please report this warning on GitHub along with the query " "that produced it.", (patch.patch_category, patch.source_slice), ) continue # Sort the patches before building up the file. return sorted(filtered_source_patches, key=lambda x: x.source_slice.start) @staticmethod def _slice_source_file_using_patches( source_patches: List[FixPatch], source_only_slices: List[RawFileSlice], raw_source_string: str, ) -> List[slice]: """Use patches to safely slice up the file before fixing. This uses source only slices to avoid overwriting sections of templated code in the source file (when we don't want to). We assume that the source patches have already been sorted and deduplicated. Sorting is important. If the slices aren't sorted then this function will miss chunks. If there are overlaps or duplicates then this function may produce strange results. """ # We now slice up the file using the patches and any source only slices. # This gives us regions to apply changes to. slice_buff = [] source_idx = 0 for patch in source_patches: # Are there templated slices at or before the start of this patch? # TODO: We'll need to explicit handling for template fixes here, because # they ARE source only slices. If we can get handling to work properly # here then this is the last hurdle and it will flow through # smoothly from here. while ( source_only_slices and source_only_slices[0].source_idx < patch.source_slice.start ): next_so_slice = source_only_slices.pop(0).source_slice() # Add a pre-slice before the next templated slices if needed. if next_so_slice.start > source_idx: slice_buff.append(slice(source_idx, next_so_slice.start)) # Add the templated slice. slice_buff.append(next_so_slice) source_idx = next_so_slice.stop # Does this patch cover the next source-only slice directly? if ( source_only_slices and patch.source_slice == source_only_slices[0].source_slice() ): linter_logger.info( "Removing next source only slice from the stack because it " "covers the same area of source file as the current patch: %s %s", source_only_slices[0], patch, ) # If it does, remove it so that we don't duplicate it. source_only_slices.pop(0) # Is there a gap between current position and this patch? if patch.source_slice.start > source_idx: # Add a slice up to this patch. slice_buff.append(slice(source_idx, patch.source_slice.start)) # Is this patch covering an area we've already covered? if patch.source_slice.start < source_idx: # pragma: no cover # NOTE: This shouldn't happen. With more detailed templating # this shouldn't happen - but in the off-chance that this does # happen - then this code path remains. linter_logger.info( "Skipping overlapping patch at Index %s, Patch: %s", source_idx, patch, ) # Ignore the patch for now... continue # Add this patch. slice_buff.append(patch.source_slice) source_idx = patch.source_slice.stop # Add a tail slice. if source_idx < len(raw_source_string): slice_buff.append(slice(source_idx, len(raw_source_string))) return slice_buff @staticmethod def _build_up_fixed_source_string( source_file_slices: List[slice], source_patches: List[FixPatch], raw_source_string: str, ) -> str: """Use patches and raw file to fix the source file. This assumes that patches and slices have already been coordinated. If they haven't then this will fail because we rely on patches having a corresponding slice of exactly the right file in the list of file slices. """ # Iterate through the patches, building up the new string. str_buff = "" for source_slice in source_file_slices: # Is it one in the patch buffer: for patch in source_patches: if patch.source_slice == source_slice: # Use the patched version linter_logger.debug( "%-30s %s %r > %r", f"Appending {patch.patch_category} Patch:", patch.source_slice, patch.source_str, patch.fixed_raw, ) str_buff += patch.fixed_raw break else: # Use the raw string linter_logger.debug( "Appending Raw: %s %r", source_slice, raw_source_string[source_slice], ) str_buff += raw_source_string[source_slice] return str_buff def persist_tree(self, suffix: str = "", formatter: Any = None) -> bool: """Persist changes to the given path.""" if self.num_violations(fixable=True) > 0: write_buff, success = self.fix_string() if success: fname = self.path # If there is a suffix specified, then use it.s if suffix: root, ext = os.path.splitext(fname) fname = root + suffix + ext self._safe_create_replace_file( self.path, fname, write_buff, self.encoding ) result_label = "FIXED" else: # pragma: no cover result_label = "FAIL" else: result_label = "SKIP" success = True if formatter: formatter.dispatch_persist_filename(filename=self.path, result=result_label) return success def discard_fixes_if_tmp_or_prs_errors(self) -> None: """Discard lint fixes for files with templating or parse errors.""" num_errors = self.num_violations( types=TMP_PRS_ERROR_TYPES, filter_ignore=False, filter_warning=False, ) if num_errors: # File has errors. Discard all the SQLLintError fixes: # they are potentially unsafe. for violation in self.violations: if isinstance(violation, SQLLintError): violation.fixes = [] @staticmethod def _safe_create_replace_file( input_path: str, output_path: str, write_buff: str, encoding: str ): # Write to a temporary file first, so in case of encoding or other # issues, we don't delete or corrupt the user's existing file. # Get file mode (i.e. permissions) on existing file. We'll preserve the # same permissions on the output file. mode = None try: status = os.stat(input_path) except FileNotFoundError: pass else: if stat.S_ISREG(status.st_mode): mode = stat.S_IMODE(status.st_mode) dirname, basename = os.path.split(output_path) with tempfile.NamedTemporaryFile( mode="w", encoding=encoding, newline="", # NOTE: No newline conversion. Write as read. prefix=basename, dir=dirname, suffix=os.path.splitext(output_path)[1], delete=False, ) as tmp: tmp.file.write(write_buff) tmp.flush() os.fsync(tmp.fileno()) # Once the temp file is safely written, replace the existing file. if mode is not None: os.chmod(tmp.name, mode) shutil.move(tmp.name, output_path) sqlfluff-2.3.5/src/sqlfluff/core/linter/linter.py000066400000000000000000001313361451700765000220640ustar00rootroot00000000000000"""Defines the linter class.""" import logging import os import time from typing import ( TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Sequence, Set, Tuple, Type, cast, ) import pathspec import regex from tqdm import tqdm from sqlfluff.core.config import ConfigLoader, FluffConfig, progress_bar_configuration from sqlfluff.core.errors import ( SQLBaseError, SQLFluffSkipFile, SQLFluffUserError, SQLLexError, SQLLintError, SQLParseError, ) from sqlfluff.core.file_helpers import get_encoding from sqlfluff.core.linter.common import ParsedString, RenderedFile, RuleTuple from sqlfluff.core.linter.linted_dir import LintedDir from sqlfluff.core.linter.linted_file import ( TMP_PRS_ERROR_TYPES, FileTimings, LintedFile, ) from sqlfluff.core.linter.linting_result import LintingResult from sqlfluff.core.parser import Lexer, Parser from sqlfluff.core.parser.segments.base import BaseSegment, SourceFix from sqlfluff.core.rules import BaseRule, RulePack, get_ruleset from sqlfluff.core.rules.fix import apply_fixes, compute_anchor_edit_info from sqlfluff.core.rules.noqa import IgnoreMask if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.segments.meta import MetaSegment from sqlfluff.core.templaters import TemplatedFile WalkableType = Iterable[Tuple[str, Optional[List[str]], List[str]]] RuleTimingsType = List[Tuple[str, str, float]] # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") class Linter: """The interface class to interact with the linter.""" # Default to allowing process parallelism allow_process_parallelism = True def __init__( self, config: Optional[FluffConfig] = None, formatter: Any = None, dialect: Optional[str] = None, rules: Optional[List[str]] = None, user_rules: Optional[List[Type[BaseRule]]] = None, exclude_rules: Optional[List[str]] = None, ) -> None: # Store the config object self.config = FluffConfig.from_kwargs( config=config, dialect=dialect, rules=rules, exclude_rules=exclude_rules, # Don't require a dialect to be provided yet. Defer this until we # are actually linting something, since the directory we are linting # from may provide additional configuration, including a dialect. require_dialect=False, ) # Get the dialect and templater self.dialect = self.config.get("dialect_obj") self.templater = self.config.get("templater_obj") # Store the formatter for output self.formatter = formatter # Store references to user rule classes self.user_rules = user_rules or [] def get_rulepack(self, config: Optional[FluffConfig] = None) -> RulePack: """Get hold of a set of rules.""" rs = get_ruleset() # Register any user rules for rule in self.user_rules: rs.register(rule) cfg = config or self.config return rs.get_rulepack(config=cfg) def rule_tuples(self) -> List[RuleTuple]: """A simple pass through to access the rule tuples of the rule set.""" rs = self.get_rulepack() return [ RuleTuple(rule.code, rule.name, rule.description, rule.groups, rule.aliases) for rule in rs.rules ] # #### Static methods # These are the building blocks of the linting process. @staticmethod def load_raw_file_and_config( fname: str, root_config: FluffConfig ) -> Tuple[str, FluffConfig, str]: """Load a raw file and the associated config.""" file_config = root_config.make_child_from_path(fname) encoding = get_encoding(fname=fname, config=file_config) # Check file size before loading. limit = file_config.get("large_file_skip_byte_limit") if limit: # Get the file size file_size = os.path.getsize(fname) if file_size > limit: raise SQLFluffSkipFile( f"Length of file {fname!r} is {file_size} bytes which is over " f"the limit of {limit} bytes. Skipping to avoid parser lock. " "Users can increase this limit in their config by setting the " "'large_file_skip_byte_limit' value, or disable by setting it " "to zero." ) with open(fname, encoding=encoding, errors="backslashreplace") as target_file: raw_file = target_file.read() # Scan the raw file for config commands. file_config.process_raw_file_for_config(raw_file, fname) # Return the raw file and config return raw_file, file_config, encoding @staticmethod def _normalise_newlines(string: str) -> str: """Normalise newlines to unix-style line endings.""" return regex.sub(r"\r\n|\r", "\n", string) @staticmethod def _lex_templated_file( templated_file: "TemplatedFile", config: FluffConfig ) -> Tuple[Optional[Sequence[BaseSegment]], List[SQLLexError], FluffConfig]: """Lex a templated file. NOTE: This potentially mutates the config, so make sure to use the returned one. """ violations = [] linter_logger.info("LEXING RAW (%s)", templated_file.fname) # Get the lexer lexer = Lexer(config=config) # Lex the file and log any problems try: tokens, lex_vs = lexer.lex(templated_file) # We might just get the violations as a list violations += lex_vs linter_logger.info( "Lexed tokens: %s", [seg.raw for seg in tokens] if tokens else None ) except SQLLexError as err: # pragma: no cover linter_logger.info("LEXING FAILED! (%s): %s", templated_file.fname, err) violations.append(err) return None, violations, config if not tokens: # pragma: no cover TODO? return None, violations, config # Check that we've got sensible indentation from the lexer. # We might need to suppress if it's a complicated file. templating_blocks_indent = config.get("template_blocks_indent", "indentation") if isinstance(templating_blocks_indent, str): force_block_indent = templating_blocks_indent.lower().strip() == "force" else: force_block_indent = False templating_blocks_indent = bool(templating_blocks_indent) # If we're forcing it through we don't check. if templating_blocks_indent and not force_block_indent: indent_balance = sum( getattr(elem, "indent_val", 0) for elem in cast(Tuple[BaseSegment, ...], tokens) ) if indent_balance != 0: # pragma: no cover linter_logger.debug( "Indent balance test failed for %r. Template indents will not be " "linted for this file.", templated_file.fname, ) # Don't enable the templating blocks. templating_blocks_indent = False # The file will have been lexed without config, so check all indents # are enabled. new_tokens = [] for token in cast(Tuple[BaseSegment, ...], tokens): if token.is_meta: token = cast("MetaSegment", token) if token.indent_val != 0: # Don't allow it if we're not linting templating block indents. if not templating_blocks_indent: continue # pragma: no cover new_tokens.append(token) # Return new buffer return new_tokens, violations, config @staticmethod def _parse_tokens( tokens: Sequence[BaseSegment], config: FluffConfig, fname: Optional[str] = None, parse_statistics: bool = False, ) -> Tuple[Optional[BaseSegment], List[SQLParseError]]: parser = Parser(config=config) violations = [] # Parse the file and log any problems try: parsed: Optional[BaseSegment] = parser.parse( # Regardless of how the sequence was passed in, we should # coerce it to a tuple here, before we head deeper into # the parsing process. tuple(tokens), fname=fname, parse_statistics=parse_statistics, ) except SQLParseError as err: linter_logger.info("PARSING FAILED! : %s", err) violations.append(err) return None, violations if parsed is None: # pragma: no cover return None, violations linter_logger.info("\n###\n#\n# {}\n#\n###".format("Parsed Tree:")) linter_logger.info("\n" + parsed.stringify()) # We may succeed parsing, but still have unparsable segments. Extract them # here. for unparsable in parsed.iter_unparsables(): # No exception has been raised explicitly, but we still create one here # so that we can use the common interface assert unparsable.pos_marker violations.append( SQLParseError( "Line {0[0]}, Position {0[1]}: Found unparsable section: " "{1!r}".format( unparsable.pos_marker.working_loc, unparsable.raw if len(unparsable.raw) < 40 else unparsable.raw[:40] + "...", ), segment=unparsable, ) ) linter_logger.info("Found unparsable segment...") linter_logger.info(unparsable.stringify()) return parsed, violations @staticmethod def remove_templated_errors( linting_errors: List[SQLBaseError], ) -> List[SQLBaseError]: """Filter a list of lint errors, removing those from the templated slices.""" # Filter out any linting errors in templated sections if relevant. result: List[SQLBaseError] = [] for e in linting_errors: if isinstance(e, SQLLintError): assert e.segment.pos_marker if ( # Is it in a literal section? e.segment.pos_marker.is_literal() # Is it a rule that is designed to work on templated sections? or e.rule.targets_templated ): result.append(e) else: # If it's another type, just keep it. (E.g. SQLParseError from # malformed "noqa" comment). result.append(e) return result @staticmethod def _report_conflicting_fixes_same_anchor(message: str) -> None: # pragma: no cover # This function exists primarily in order to let us monkeypatch it at # runtime (replacing it with a function that raises an exception). linter_logger.critical(message) @staticmethod def _warn_unfixable(code: str) -> None: linter_logger.warning( f"One fix for {code} not applied, it would re-cause the same error." ) # ### Class Methods # These compose the base static methods into useful recipes. @classmethod def parse_rendered( cls, rendered: RenderedFile, parse_statistics: bool = False, ) -> ParsedString: """Parse a rendered file.""" t0 = time.monotonic() violations = cast(List[SQLBaseError], rendered.templater_violations) tokens: Optional[Sequence[BaseSegment]] if rendered.templated_file is not None: tokens, lvs, config = cls._lex_templated_file( rendered.templated_file, rendered.config ) violations += lvs else: tokens = None t1 = time.monotonic() linter_logger.info("PARSING (%s)", rendered.fname) if tokens: parsed, pvs = cls._parse_tokens( tokens, rendered.config, fname=rendered.fname, parse_statistics=parse_statistics, ) violations += pvs else: parsed = None time_dict = { **rendered.time_dict, "lexing": t1 - t0, "parsing": time.monotonic() - t1, } return ParsedString( parsed, violations, time_dict, rendered.templated_file, rendered.config, rendered.fname, rendered.source_str, ) @classmethod def lint_fix_parsed( cls, tree: BaseSegment, config: FluffConfig, rule_pack: RulePack, fix: bool = False, fname: Optional[str] = None, templated_file: Optional["TemplatedFile"] = None, formatter: Any = None, ) -> Tuple[BaseSegment, List[SQLBaseError], Optional[IgnoreMask], RuleTimingsType]: """Lint and optionally fix a tree object.""" # Keep track of the linting errors on the very first linter pass. The # list of issues output by "lint" and "fix" only includes issues present # in the initial SQL code, EXCLUDING any issues that may be created by # the fixes themselves. initial_linting_errors = [] # A placeholder for the fixes we had on the previous loop last_fixes = None # Keep a set of previous versions to catch infinite loops. previous_versions: Set[Tuple[str, Tuple["SourceFix", ...]]] = {(tree.raw, ())} # Keep a buffer for recording rule timings. rule_timings: RuleTimingsType = [] # If we are fixing then we want to loop up to the runaway_limit, otherwise just # once for linting. loop_limit = config.get("runaway_limit") if fix else 1 # Dispatch the output for the lint header if formatter: formatter.dispatch_lint_header(fname, sorted(rule_pack.codes())) # Look for comment segments which might indicate lines to ignore. if not config.get("disable_noqa"): ignore_mask, ivs = IgnoreMask.from_tree(tree, rule_pack.reference_map) initial_linting_errors += ivs else: ignore_mask = None save_tree = tree # There are two phases of rule running. # 1. The main loop is for most rules. These rules are assumed to # interact and cause a cascade of fixes requiring multiple passes. # These are run the `runaway_limit` number of times (default 10). # 2. The post loop is for post-processing rules, not expected to trigger # any downstream rules, e.g. capitalization fixes. They are run on the # first loop and then twice at the end (once to fix, and once again to # check result of fixes), but not in the intervening loops. phases = ["main"] if fix: phases.append("post") for phase in phases: if len(phases) > 1: rules_this_phase = [ rule for rule in rule_pack.rules if rule.lint_phase == phase ] else: rules_this_phase = rule_pack.rules for loop in range(loop_limit if phase == "main" else 2): def is_first_linter_pass() -> bool: return phase == phases[0] and loop == 0 # Additional newlines are to assist in scanning linting loops # during debugging. linter_logger.info( f"\n\nEntering linter phase {phase}, " f"loop {loop + 1}/{loop_limit}\n" ) changed = False if is_first_linter_pass(): # In order to compute initial_linting_errors correctly, need # to run all rules on the first loop of the main phase. rules_this_phase = rule_pack.rules progress_bar_crawler = tqdm( rules_this_phase, desc="lint by rules", leave=False, disable=progress_bar_configuration.disable_progress_bar, ) for crawler in progress_bar_crawler: # Performance: After first loop pass, skip rules that don't # do fixes. Any results returned won't be seen by the user # anyway (linting errors ADDED by rules changing SQL, are # not reported back to the user - only initial linting errors), # so there's absolutely no reason to run them. if ( fix and not is_first_linter_pass() and not crawler.is_fix_compatible ): continue progress_bar_crawler.set_description(f"rule {crawler.code}") t0 = time.monotonic() # fixes should be a dict {} with keys edit, delete, create # delete is just a list of segments to delete # edit and create are list of tuples. The first element is # the "anchor", the segment to look for either to edit or to # insert BEFORE. The second is the element to insert or create. linting_errors, _, fixes, _ = crawler.crawl( tree, dialect=config.get("dialect_obj"), fix=fix, templated_file=templated_file, ignore_mask=ignore_mask, fname=fname, config=config, ) if is_first_linter_pass(): initial_linting_errors += linting_errors if fix and fixes: linter_logger.info(f"Applying Fixes [{crawler.code}]: {fixes}") # Do some sanity checks on the fixes before applying. anchor_info = compute_anchor_edit_info(fixes) if any( not info.is_valid for info in anchor_info.values() ): # pragma: no cover message = ( f"Rule {crawler.code} returned conflicting " "fixes with the same anchor. This is only " "supported for create_before+create_after, so " "the fixes will not be applied. " ) for uuid, info in anchor_info.items(): if not info.is_valid: message += f"\n{uuid}:" for _fix in info.fixes: message += f"\n {_fix}" cls._report_conflicting_fixes_same_anchor(message) for lint_result in linting_errors: lint_result.fixes = [] elif fixes == last_fixes: # pragma: no cover # If we generate the same fixes two times in a row, # that means we're in a loop, and we want to stop. # (Fixes should address issues, hence different # and/or fewer fixes next time.) cls._warn_unfixable(crawler.code) else: # This is the happy path. We have fixes, now we want to # apply them. last_fixes = fixes new_tree, _, _, _valid = apply_fixes( tree, config.get("dialect_obj"), crawler.code, anchor_info, ) # Check for infinite loops. We use a combination of the # fixed templated file and the list of source fixes to # apply. loop_check_tuple = ( new_tree.raw, tuple(new_tree.source_fixes), ) if not _valid: # The fixes result in an invalid file. Don't apply # the fix and skip onward. Show a warning. linter_logger.warning( f"Fixes for {crawler.code} not applied, as it " "would result in an unparsable file. Please " "report this as a bug with a minimal query " "which demonstrates this warning." ) elif loop_check_tuple not in previous_versions: # We've not seen this version of the file so # far. Continue. tree = new_tree previous_versions.add(loop_check_tuple) changed = True continue else: # Applying these fixes took us back to a state # which we've seen before. We're in a loop, so # we want to stop. cls._warn_unfixable(crawler.code) # Record rule timing rule_timings.append( (crawler.code, crawler.name, time.monotonic() - t0) ) if fix and not changed: # We did not change the file. Either the file is clean (no # fixes), or any fixes which are present will take us back # to a previous state. linter_logger.info( f"Fix loop complete for {phase} phase. Stability " f"achieved after {loop}/{loop_limit} loops." ) break else: if fix: # The linter loop hit the limit before reaching a stable point # (i.e. free of lint errors). If this happens, it's usually # because one or more rules produced fixes which did not address # the original issue **or** created new issues. linter_logger.warning( f"Loop limit on fixes reached [{loop_limit}]." ) # Discard any fixes for the linting errors, since they caused a # loop. IMPORTANT: By doing this, we are telling SQLFluff that # these linting errors are "unfixable". This is important, # because when "sqlfluff fix" encounters unfixable lint errors, # it exits with a "failure" exit code, which is exactly what we # want in this situation. (Reason: Although this is more of an # internal SQLFluff issue, users deserve to know about it, # because it means their file(s) weren't fixed. for violation in initial_linting_errors: if isinstance(violation, SQLLintError): violation.fixes = [] # Return the original parse tree, before any fixes were applied. # Reason: When the linter hits the loop limit, the file is often # messy, e.g. some of the fixes were applied repeatedly, possibly # other weird things. We don't want the user to see this junk! return save_tree, initial_linting_errors, ignore_mask, rule_timings if config.get("ignore_templated_areas", default=True): initial_linting_errors = cls.remove_templated_errors(initial_linting_errors) linter_logger.info("\n###\n#\n# {}\n#\n###".format("Fixed Tree:")) linter_logger.info("\n" + tree.stringify()) return tree, initial_linting_errors, ignore_mask, rule_timings @classmethod def lint_parsed( cls, parsed: ParsedString, rule_pack: RulePack, fix: bool = False, formatter: Any = None, encoding: str = "utf8", ) -> LintedFile: """Lint a ParsedString and return a LintedFile.""" violations = parsed.violations time_dict = parsed.time_dict tree: Optional[BaseSegment] if parsed.tree: t0 = time.monotonic() linter_logger.info("LINTING (%s)", parsed.fname) ( tree, initial_linting_errors, ignore_mask, rule_timings, ) = cls.lint_fix_parsed( parsed.tree, config=parsed.config, rule_pack=rule_pack, fix=fix, fname=parsed.fname, templated_file=parsed.templated_file, formatter=formatter, ) # Update the timing dict time_dict["linting"] = time.monotonic() - t0 # We're only going to return the *initial* errors, rather # than any generated during the fixing cycle. violations += initial_linting_errors else: # If no parsed tree, set to None tree = None ignore_mask = None rule_timings = [] if not parsed.config.get("disable_noqa"): # Templating and/or parsing have failed. Look for "noqa" # comments (the normal path for identifying these comments # requires access to the parse tree, and because of the failure, # we don't have a parse tree). ignore_mask, ignore_violations = IgnoreMask.from_source( parsed.source_str, [ lm for lm in parsed.config.get("dialect_obj").lexer_matchers if lm.name == "inline_comment" ][0], rule_pack.reference_map, ) violations += ignore_violations # We process the ignore config here if appropriate for violation in violations: violation.ignore_if_in(parsed.config.get("ignore")) violation.warning_if_in(parsed.config.get("warnings")) linted_file = LintedFile( parsed.fname, # Deduplicate violations LintedFile.deduplicate_in_source_space(violations), FileTimings(time_dict, rule_timings), tree, ignore_mask=ignore_mask, templated_file=parsed.templated_file, encoding=encoding, ) # This is the main command line output from linting. if formatter: formatter.dispatch_file_violations( parsed.fname, linted_file, only_fixable=fix, warn_unused_ignores=parsed.config.get("warn_unused_ignores"), ) # Safety flag for unset dialects if linted_file.get_violations( fixable=True if fix else None, types=SQLParseError ): if formatter: # pragma: no cover TODO? formatter.dispatch_dialect_warning(parsed.config.get("dialect")) return linted_file @classmethod def lint_rendered( cls, rendered: RenderedFile, rule_pack: RulePack, fix: bool = False, formatter: Any = None, ) -> LintedFile: """Take a RenderedFile and return a LintedFile.""" parsed = cls.parse_rendered(rendered) return cls.lint_parsed( parsed, rule_pack=rule_pack, fix=fix, formatter=formatter, encoding=rendered.encoding, ) # ### Instance Methods # These are tied to a specific instance and so are not necessarily # safe to use in parallel operations. def render_string( self, in_str: str, fname: str, config: FluffConfig, encoding: str ) -> RenderedFile: """Template the file.""" linter_logger.info("TEMPLATING RAW [%s] (%s)", self.templater.name, fname) # Start the templating timer t0 = time.monotonic() # Newlines are normalised to unix-style line endings (\n). # The motivation is that Jinja normalises newlines during templating and # we want consistent mapping between the raw and templated slices. in_str = self._normalise_newlines(in_str) # Since Linter.__init__() does not require a dialect to be specified, # check for one now. (We're processing a string, not a file, so we're # not going to pick up a .sqlfluff or other config file to provide a # missing dialect at this point.) config.verify_dialect_specified() if not config.get("templater_obj") == self.templater: linter_logger.warning( ( f"Attempt to set templater to {config.get('templater_obj').name} " f"failed. Using {self.templater.name} templater. Templater cannot " "be set in a .sqlfluff file in a subdirectory of the current " "working directory. It can be set in a .sqlfluff in the current " "working directory. See Nesting section of the docs for more " "details." ) ) try: templated_file, templater_violations = self.templater.process( in_str=in_str, fname=fname, config=config, formatter=self.formatter ) except SQLFluffSkipFile as s: # pragma: no cover linter_logger.warning(str(s)) templated_file = None templater_violations = [] if templated_file is None: linter_logger.info("TEMPLATING FAILED: %s", templater_violations) # Record time time_dict = {"templating": time.monotonic() - t0} return RenderedFile( templated_file, templater_violations, config, time_dict, fname, encoding, in_str, ) def render_file(self, fname: str, root_config: FluffConfig) -> RenderedFile: """Load and render a file with relevant config.""" # Load the raw file. raw_file, config, encoding = self.load_raw_file_and_config(fname, root_config) # Render the file return self.render_string(raw_file, fname, config, encoding) def parse_string( self, in_str: str, fname: str = "", config: Optional[FluffConfig] = None, encoding: str = "utf-8", parse_statistics: bool = False, ) -> ParsedString: """Parse a string.""" violations: List[SQLBaseError] = [] # Dispatch the output for the template header (including the config diff) if self.formatter: self.formatter.dispatch_template_header(fname, self.config, config) # Just use the local config from here: config = config or self.config # Scan the raw file for config commands. config.process_raw_file_for_config(in_str, fname) rendered = self.render_string(in_str, fname, config, encoding) violations += rendered.templater_violations # Dispatch the output for the parse header if self.formatter: self.formatter.dispatch_parse_header(fname) return self.parse_rendered(rendered, parse_statistics=parse_statistics) def fix( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional["TemplatedFile"] = None, ) -> Tuple[BaseSegment, List[SQLBaseError]]: """Return the fixed tree and violations from lintfix when we're fixing.""" config = config or self.config rule_pack = self.get_rulepack(config=config) fixed_tree, violations, _, _ = self.lint_fix_parsed( tree, config, rule_pack, fix=True, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return fixed_tree, violations def lint( self, tree: BaseSegment, config: Optional[FluffConfig] = None, fname: Optional[str] = None, templated_file: Optional["TemplatedFile"] = None, ) -> List[SQLBaseError]: """Return just the violations from lintfix when we're only linting.""" config = config or self.config rule_pack = self.get_rulepack(config=config) _, violations, _, _ = self.lint_fix_parsed( tree, config, rule_pack, fix=False, fname=fname, templated_file=templated_file, formatter=self.formatter, ) return violations def lint_string( self, in_str: str = "", fname: str = "", fix: bool = False, config: Optional[FluffConfig] = None, encoding: str = "utf8", ) -> LintedFile: """Lint a string. Returns: :obj:`LintedFile`: an object representing that linted file. """ # Sort out config, defaulting to the built in config if no override config = config or self.config # Parse the string. parsed = self.parse_string( in_str=in_str, fname=fname, config=config, ) # Get rules as appropriate rule_pack = self.get_rulepack(config=config) # Lint the file and return the LintedFile return self.lint_parsed( parsed, rule_pack, fix=fix, formatter=self.formatter, encoding=encoding, ) def paths_from_path( self, path: str, ignore_file_name: str = ".sqlfluffignore", ignore_non_existent_files: bool = False, ignore_files: bool = True, working_path: str = os.getcwd(), ) -> List[str]: """Return a set of sql file paths from a potentially more ambiguous path string. Here we also deal with the .sqlfluffignore file if present. When a path to a file to be linted is explicitly passed we look for ignore files in all directories that are parents of the file, up to the current directory. If the current directory is not a parent of the file we only look for an ignore file in the direct parent of the file. """ if not os.path.exists(path): if ignore_non_existent_files: return [] else: raise SQLFluffUserError( f"Specified path does not exist. Check it/they exist(s): {path}." ) # Files referred to exactly are also ignored if # matched, but we warn the users when that happens is_exact_file = os.path.isfile(path) path_walk: WalkableType if is_exact_file: # When the exact file to lint is passed, we # fill path_walk with an input that follows # the structure of `os.walk`: # (root, directories, files) dirpath = os.path.dirname(path) files = [os.path.basename(path)] path_walk = [(dirpath, None, files)] else: path_walk = list(os.walk(path)) ignore_file_paths = ConfigLoader.find_ignore_config_files( path=path, working_path=working_path, ignore_file_name=ignore_file_name ) # Add paths that could contain "ignore files" # to the path_walk list path_walk_ignore_file = [ ( os.path.dirname(ignore_file_path), None, # Only one possible file, since we only # have one "ignore file name" [os.path.basename(ignore_file_path)], ) for ignore_file_path in ignore_file_paths ] path_walk += path_walk_ignore_file # If it's a directory then expand the path! buffer = [] ignores = {} for dirpath, _, filenames in path_walk: for fname in filenames: fpath = os.path.join(dirpath, fname) # Handle potential .sqlfluffignore files if ignore_files and fname == ignore_file_name: with open(fpath) as fh: spec = pathspec.PathSpec.from_lines("gitwildmatch", fh) ignores[dirpath] = spec # We don't need to process the ignore file any further continue # We won't purge files *here* because there's an edge case # that the ignore file is processed after the sql file. # Scan for remaining files for ext in ( self.config.get("sql_file_exts", default=".sql").lower().split(",") ): # is it a sql file? if fname.lower().endswith(ext): buffer.append(fpath) if not ignore_files: return sorted(buffer) # Check the buffer for ignore items and normalise the rest. # It's a set, so we can do natural deduplication. filtered_buffer = set() for fpath in buffer: abs_fpath = os.path.abspath(fpath) for ignore_base, ignore_spec in ignores.items(): abs_ignore_base = os.path.abspath(ignore_base) if abs_fpath.startswith( abs_ignore_base + ( "" if os.path.dirname(abs_ignore_base) == abs_ignore_base else os.sep ) ) and ignore_spec.match_file( os.path.relpath(abs_fpath, abs_ignore_base) ): # This file is ignored, skip it. if is_exact_file: linter_logger.warning( "Exact file path %s was given but " "it was ignored by a %s pattern in %s, " "re-run with `--disregard-sqlfluffignores` to " "skip %s" % ( path, ignore_file_name, ignore_base, ignore_file_name, ) ) break else: npath = os.path.normpath(fpath) # For debugging, log if we already have the file. if npath in filtered_buffer: linter_logger.debug( # pragma: no cover "Developer Warning: Path crawler attempted to " "requeue the same file twice. %s is already in " "filtered buffer.", npath, ) filtered_buffer.add(npath) # Return a sorted list return sorted(filtered_buffer) def lint_string_wrapped( self, string: str, fname: str = "", fix: bool = False, ) -> LintingResult: """Lint strings directly.""" result = LintingResult() linted_path = LintedDir(fname) linted_path.add(self.lint_string(string, fname=fname, fix=fix)) result.add(linted_path) result.stop_timer() return result def lint_path( self, path: str, fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: Optional[int] = None, ) -> LintedDir: """Lint a path.""" return self.lint_paths( (path,), fix, ignore_non_existent_files, ignore_files, processes ).paths[0] def lint_paths( self, paths: Tuple[str, ...], fix: bool = False, ignore_non_existent_files: bool = False, ignore_files: bool = True, processes: Optional[int] = None, apply_fixes: bool = False, fixed_file_suffix: str = "", fix_even_unparsable: bool = False, ) -> LintingResult: """Lint an iterable of paths.""" # If no paths specified - assume local if not paths: # pragma: no cover paths = (os.getcwd(),) # Set up the result to hold what we get back result = LintingResult() expanded_paths: List[str] = [] expanded_path_to_linted_dir = {} for path in paths: linted_dir = LintedDir(path) result.add(linted_dir) for fname in self.paths_from_path( path, ignore_non_existent_files=ignore_non_existent_files, ignore_files=ignore_files, ): expanded_paths.append(fname) expanded_path_to_linted_dir[fname] = linted_dir files_count = len(expanded_paths) if processes is None: processes = self.config.get("processes", default=1) # Hard set processes to 1 if only 1 file is queued. # The overhead will never be worth it with one file. if files_count == 1: processes = 1 # to avoid circular import from sqlfluff.core.linter.runner import get_runner runner, effective_processes = get_runner( self, self.config, processes=processes, allow_process_parallelism=self.allow_process_parallelism, ) if self.formatter and effective_processes != 1: self.formatter.dispatch_processing_header(effective_processes) # Show files progress bar only when there is more than one. first_path = expanded_paths[0] if expanded_paths else "" progress_bar_files = tqdm( total=files_count, desc=f"file {first_path}", leave=False, disable=files_count <= 1 or progress_bar_configuration.disable_progress_bar, ) for i, linted_file in enumerate(runner.run(expanded_paths, fix), start=1): linted_dir = expanded_path_to_linted_dir[linted_file.path] linted_dir.add(linted_file) # If any fatal errors, then stop iteration. if any(v.fatal for v in linted_file.violations): # pragma: no cover linter_logger.error("Fatal linting error. Halting further linting.") break # If we're applying fixes, then do that here. if apply_fixes: num_tmp_prs_errors = linted_file.num_violations( types=TMP_PRS_ERROR_TYPES, filter_ignore=False, filter_warning=False, ) if fix_even_unparsable or num_tmp_prs_errors == 0: linted_file.persist_tree( suffix=fixed_file_suffix, formatter=self.formatter ) # Progress bar for files is rendered only when there is more than one file. # Additionally, as it's updated after each loop, we need to get file name # from the next loop. This is why `enumerate` starts with `1` and there # is `i < len` to not exceed files list length. progress_bar_files.update(n=1) if i < len(expanded_paths): progress_bar_files.set_description(f"file {expanded_paths[i]}") result.stop_timer() return result def parse_path( self, path: str, parse_statistics: bool = False, ) -> Iterator[ParsedString]: """Parse a path of sql files. NB: This a generator which will yield the result of each file within the path iteratively. """ for fname in self.paths_from_path(path): if self.formatter: self.formatter.dispatch_path(path) # Load the file with the config and yield the result. try: raw_file, config, encoding = self.load_raw_file_and_config( fname, self.config ) except SQLFluffSkipFile as s: linter_logger.warning(str(s)) continue yield self.parse_string( raw_file, fname=fname, config=config, encoding=encoding, parse_statistics=parse_statistics, ) sqlfluff-2.3.5/src/sqlfluff/core/linter/linting_result.py000066400000000000000000000240741451700765000236310ustar00rootroot00000000000000"""Defines the linter class.""" import csv import time from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union, overload from typing_extensions import Literal from sqlfluff.core.errors import CheckTuple from sqlfluff.core.linter.linted_dir import LintedDir from sqlfluff.core.linter.linted_file import TMP_PRS_ERROR_TYPES from sqlfluff.core.timing import RuleTimingSummary, TimingSummary if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.segments.base import BaseSegment class LintingResult: """A class to represent the result of a linting operation. Notably this might be a collection of paths, all with multiple potential files within them. """ def __init__(self) -> None: self.paths: List[LintedDir] = [] self._start_time: float = time.monotonic() self.total_time: float = 0.0 @staticmethod def sum_dicts(d1: Dict[str, Any], d2: Dict[str, Any]) -> Dict[str, Any]: """Take the keys of two dictionaries and add them.""" keys = set(d1.keys()) | set(d2.keys()) return {key: d1.get(key, 0) + d2.get(key, 0) for key in keys} @staticmethod def combine_dicts(*d: Dict[str, Any]) -> Dict[str, Any]: """Take any set of dictionaries and combine them.""" dict_buffer: dict = {} for dct in d: dict_buffer.update(dct) return dict_buffer def add(self, path: LintedDir) -> None: """Add a new `LintedDir` to this result.""" self.paths.append(path) def stop_timer(self) -> None: """Stop the linting timer.""" self.total_time = time.monotonic() - self._start_time @overload def check_tuples(self, by_path: Literal[False]) -> List[CheckTuple]: """Return a List of CheckTuples when by_path is False.""" @overload def check_tuples(self, by_path: Literal[True]) -> Dict[LintedDir, List[CheckTuple]]: """Return a Dict of LintedDir and CheckTuples when by_path is True.""" @overload def check_tuples(self, by_path: bool = False): """Default overload method.""" def check_tuples( self, by_path=False ) -> Union[List[CheckTuple], Dict[LintedDir, List[CheckTuple]]]: """Fetch all check_tuples from all contained `LintedDir` objects. Args: by_path (:obj:`bool`, optional): When False, all the check_tuples are aggregated into one flat list. When True, we return a `dict` of paths, each with its own list of check_tuples. Defaults to False. """ if by_path: buff: Dict[LintedDir, List[CheckTuple]] = {} for path in self.paths: buff.update(path.check_tuples(by_path=by_path)) return buff else: tuple_buffer: List[CheckTuple] = [] for path in self.paths: tuple_buffer += path.check_tuples() return tuple_buffer def num_violations(self, **kwargs) -> int: """Count the number of violations in the result.""" return sum(path.num_violations(**kwargs) for path in self.paths) def get_violations(self, **kwargs) -> list: """Return a list of violations in the result.""" buff = [] for path in self.paths: buff += path.get_violations(**kwargs) return buff def violation_dict(self, **kwargs) -> dict: """Return a dict of paths and violations.""" return self.combine_dicts( *(path.violation_dict(**kwargs) for path in self.paths) ) def stats(self, fail_code: int, success_code: int) -> Dict[str, Any]: """Return a stats dictionary of this result.""" all_stats: Dict[str, Any] = dict(files=0, clean=0, unclean=0, violations=0) for path in self.paths: all_stats = self.sum_dicts(path.stats(), all_stats) if all_stats["files"] > 0: all_stats["avg per file"] = ( all_stats["violations"] * 1.0 / all_stats["files"] ) all_stats["unclean rate"] = all_stats["unclean"] * 1.0 / all_stats["files"] else: all_stats["avg per file"] = 0 all_stats["unclean rate"] = 0 all_stats["clean files"] = all_stats["clean"] all_stats["unclean files"] = all_stats["unclean"] all_stats["exit code"] = ( fail_code if all_stats["violations"] > 0 else success_code ) all_stats["status"] = "FAIL" if all_stats["violations"] > 0 else "PASS" return all_stats def timing_summary(self) -> Dict[str, Dict[str, Any]]: """Return a timing summary.""" timing = TimingSummary() rules_timing = RuleTimingSummary() for dir in self.paths: for file in dir.files: if file.timings: timing.add(file.timings.step_timings) rules_timing.add(file.timings.rule_timings) return {**timing.summary(), **rules_timing.summary()} def persist_timing_records(self, filename: str) -> None: """Persist the timing records as a csv for external analysis.""" meta_fields = [ "path", "source_chars", "templated_chars", "segments", "raw_segments", ] timing_fields = ["templating", "lexing", "parsing", "linting"] # Iterate through all the files to get rule timing information so # we know what headings we're going to need. rule_codes: Set[str] = set() file_timing_dicts: Dict[str, dict] = {} for dir in self.paths: for file in dir.files: if not file.timings: # pragma: no cover continue file_timing_dicts[file.path] = file.timings.get_rule_timing_dict() rule_codes.update(file_timing_dicts[file.path].keys()) with open(filename, "w", newline="") as f: writer = csv.DictWriter( f, fieldnames=meta_fields + timing_fields + sorted(rule_codes) ) writer.writeheader() for dir in self.paths: for file in dir.files: if not file.timings: # pragma: no cover continue writer.writerow( { "path": file.path, "source_chars": ( len(file.templated_file.source_str) if file.templated_file else "" ), "templated_chars": ( len(file.templated_file.templated_str) if file.templated_file else "" ), "segments": ( file.tree.count_segments(raw_only=False) if file.tree else "" ), "raw_segments": ( file.tree.count_segments(raw_only=True) if file.tree else "" ), **file.timings.step_timings, **file_timing_dicts[file.path], } ) def as_records(self) -> List[dict]: """Return the result as a list of dictionaries. Each record contains a key specifying the filepath, and a list of violations. This method is useful for serialization as all objects will be builtin python types (ints, strs). """ return [ { "filepath": path, "violations": sorted( # Sort violations by line and then position (v.get_info_dict() for v in violations), # The tuple allows sorting by line number, then position, then code key=lambda v: (v["line_no"], v["line_pos"], v["code"]), ), } for LintedDir in self.paths for path, violations in LintedDir.violation_dict().items() if violations ] def persist_changes(self, formatter, fixed_file_suffix: str = "") -> dict: """Run all the fixes for all the files and return a dict.""" return self.combine_dicts( *( path.persist_changes( formatter=formatter, fixed_file_suffix=fixed_file_suffix ) for path in self.paths ) ) @property def tree(self) -> Optional["BaseSegment"]: # pragma: no cover """A convenience method for when there is only one file and we want the tree.""" if len(self.paths) > 1: raise ValueError( ".tree() cannot be called when a LintingResult contains more than one " "path." ) return self.paths[0].tree def count_tmp_prs_errors(self) -> Tuple[int, int]: """Count templating or parse errors before and after filtering.""" total_errors = self.num_violations( types=TMP_PRS_ERROR_TYPES, filter_ignore=False, filter_warning=False, ) num_filtered_errors = 0 for linted_dir in self.paths: for linted_file in linted_dir.files: num_filtered_errors += linted_file.num_violations( types=TMP_PRS_ERROR_TYPES ) return total_errors, num_filtered_errors def discard_fixes_for_lint_errors_in_files_with_tmp_or_prs_errors(self) -> None: """Discard lint fixes for files with templating or parse errors.""" total_errors = self.num_violations( types=TMP_PRS_ERROR_TYPES, filter_ignore=False, filter_warning=False, ) if total_errors: for linted_dir in self.paths: for linted_file in linted_dir.files: linted_file.discard_fixes_if_tmp_or_prs_errors() sqlfluff-2.3.5/src/sqlfluff/core/linter/runner.py000066400000000000000000000224521451700765000220760ustar00rootroot00000000000000"""Implements runner classes used internally by the Linter class. Implements various runner types for SQLFluff: - Serial - Parallel - Multiprocess - Multithread (used only by automated tests) """ import bdb import functools import logging import multiprocessing import multiprocessing.dummy import signal import sys import traceback from abc import ABC from typing import Callable, Iterator, List, Tuple from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import SQLFluffSkipFile from sqlfluff.core.linter import LintedFile, RenderedFile from sqlfluff.core.plugin.host import is_main_process linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") class BaseRunner(ABC): """Base runner class.""" def __init__( self, linter: Linter, config: FluffConfig, ) -> None: self.linter = linter self.config = config pass_formatter = True def iter_rendered(self, fnames: List[str]) -> Iterator[Tuple[str, RenderedFile]]: """Iterate through rendered files ready for linting.""" for fname in self.linter.templater.sequence_files( fnames, config=self.config, formatter=self.linter.formatter ): try: yield fname, self.linter.render_file(fname, self.config) except SQLFluffSkipFile as s: linter_logger.warning(str(s)) def iter_partials( self, fnames: List[str], fix: bool = False, ) -> Iterator[Tuple[str, Callable]]: """Iterate through partials for linted files. Generates filenames and objects which return LintedFiles. """ for fname, rendered in self.iter_rendered(fnames): # Generate a fresh ruleset rule_pack = self.linter.get_rulepack(config=rendered.config) yield ( fname, functools.partial( self.linter.lint_rendered, rendered, rule_pack, fix, # Formatters may or may not be passed. They don't pickle # nicely so aren't appropriate in a multiprocessing world. self.linter.formatter if self.pass_formatter else None, ), ) def run(self, fnames: List[str], fix: bool): """Run linting on the specified list of files.""" raise NotImplementedError # pragma: no cover @classmethod def _init_global(cls, config) -> None: """Initializes any global state. May be overridden by subclasses to apply global configuration, initialize logger state in child processes, etc. """ pass @staticmethod def _handle_lint_path_exception(fname, e) -> None: if isinstance(e, IOError): # IOErrors are caught in commands.py, so propagate it raise (e) # pragma: no cover linter_logger.warning( f"""Unable to lint {fname} due to an internal error. \ Please report this as an issue with your query's contents and stacktrace below! To hide this warning, add the failing file to .sqlfluffignore {traceback.format_exc()}""", ) class SequentialRunner(BaseRunner): """Simple runner that does sequential processing.""" def run(self, fnames: List[str], fix: bool) -> Iterator[LintedFile]: """Sequential implementation.""" for fname, partial in self.iter_partials(fnames, fix=fix): try: yield partial() except (bdb.BdbQuit, KeyboardInterrupt): # pragma: no cover raise except Exception as e: self._handle_lint_path_exception(fname, e) class ParallelRunner(BaseRunner): """Base class for parallel runner implementations (process or thread).""" POOL_TYPE: Callable MAP_FUNCTION_NAME: str # Don't pass the formatter in a parallel world, they # don't pickle well. pass_formatter = False def __init__(self, linter, config, processes) -> None: super().__init__(linter, config) self.processes = processes def run(self, fnames: List[str], fix: bool): """Parallel implementation. Note that the partials are generated one at a time then passed directly into the pool as they're ready. This means the main thread can do the IO work while passing the parsing and linting work out to the threads. """ with self._create_pool( self.processes, self._init_global, (self.config,), ) as pool: try: for lint_result in self._map( pool, self._apply, self.iter_partials(fnames, fix=fix), ): if isinstance(lint_result, DelayedException): try: lint_result.reraise() except Exception as e: self._handle_lint_path_exception(lint_result.fname, e) else: # It's a LintedDir. if self.linter.formatter: self.linter.formatter.dispatch_file_violations( lint_result.path, lint_result, only_fixable=fix, warn_unused_ignores=self.linter.config.get( "warn_unused_ignores" ), ) yield lint_result except KeyboardInterrupt: # pragma: no cover # On keyboard interrupt (Ctrl-C), terminate the workers. # Notify the user we've received the signal and are cleaning up, # in case it takes awhile. print("Received keyboard interrupt. Cleaning up and shutting down...") pool.terminate() @staticmethod def _apply(partial_tuple): """Shim function used in parallel mode.""" # Unpack the tuple and ditch the filename in this case. fname, partial = partial_tuple try: return partial() # Capture any exceptions and return as delayed exception to handle # in the main thread. except Exception as e: return DelayedException(e, fname=fname) @classmethod def _init_global(cls, config) -> None: # pragma: no cover """For the parallel runners indicate that we're not in the main thread.""" is_main_process.set(False) super()._init_global(config) @classmethod def _create_pool(cls, *args, **kwargs): return cls.POOL_TYPE(*args, **kwargs) @classmethod def _map(cls, pool, *args, **kwargs): """Runs a class-appropriate version of the general map() function.""" return getattr(pool, cls.MAP_FUNCTION_NAME)(*args, **kwargs) class MultiProcessRunner(ParallelRunner): """Runner that does parallel processing using multiple processes.""" POOL_TYPE = multiprocessing.Pool MAP_FUNCTION_NAME = "imap_unordered" @classmethod def _init_global(cls, config) -> None: # pragma: no cover super()._init_global(config) # Disable signal handling in the child processes to let the parent # control all KeyboardInterrupt handling (Control C). This is # necessary in order for keyboard interrupts to exit quickly and # cleanly. Adapted from this post: # https://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python signal.signal(signal.SIGINT, signal.SIG_IGN) class MultiThreadRunner(ParallelRunner): """Runner that does parallel processing using multiple threads. Used only by automated tests. """ POOL_TYPE = multiprocessing.dummy.Pool MAP_FUNCTION_NAME = "imap" class DelayedException(Exception): """Multiprocessing process pool uses this to propagate exceptions.""" def __init__(self, ee, fname=None): self.ee = ee __, __, self.tb = sys.exc_info() self.fname = None super().__init__(str(ee)) def reraise(self): """Reraise the encapsulated exception.""" raise self.ee.with_traceback(self.tb) def get_runner( linter: Linter, config: FluffConfig, processes: int, allow_process_parallelism: bool = True, ) -> Tuple[BaseRunner, int]: """Generate a runner instance based on parallel and system configuration. The processes argument can be positive or negative. - If positive, the integer is interpreted as the number of processes. - If negative or zero, the integer is interpreted as number_of_cpus - processes. e.g. -1 = all cpus but one. 0 = all cpus 1 = 1 cpu """ if processes <= 0: processes = max(multiprocessing.cpu_count() + processes, 1) if processes > 1: # Process parallelism isn't really supported during testing # so this flag allows us to fall back to a threaded runner # in those cases. if allow_process_parallelism: return MultiProcessRunner(linter, config, processes=processes), processes else: return MultiThreadRunner(linter, config, processes=processes), processes else: return SequentialRunner(linter, config), processes sqlfluff-2.3.5/src/sqlfluff/core/parser/000077500000000000000000000000001451700765000202055ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/parser/__init__.py000066400000000000000000000041751451700765000223250ustar00rootroot00000000000000"""init file for the parser.""" from sqlfluff.core.parser.grammar import ( AnyNumberOf, AnySetOf, Anything, Bracketed, Conditional, Delimited, Nothing, OneOf, OptionallyBracketed, Ref, Sequence, ) from sqlfluff.core.parser.lexer import Lexer, RegexLexer, StringLexer from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.parser import Parser from sqlfluff.core.parser.parsers import ( MultiStringParser, RegexParser, StringParser, TypedParser, ) from sqlfluff.core.parser.segments import ( BaseFileSegment, BaseSegment, BinaryOperatorSegment, BracketedSegment, CodeSegment, CommentSegment, ComparisonOperatorSegment, CompositeBinaryOperatorSegment, CompositeComparisonOperatorSegment, Dedent, IdentifierSegment, ImplicitIndent, Indent, KeywordSegment, LiteralKeywordSegment, LiteralSegment, NewlineSegment, RawSegment, SegmentGenerator, SourceFix, SymbolSegment, UnlexableSegment, WhitespaceSegment, WordSegment, ) from sqlfluff.core.parser.types import ParseMode __all__ = ( "BaseSegment", "SourceFix", "BaseFileSegment", "BracketedSegment", "RawSegment", "CodeSegment", "UnlexableSegment", "CommentSegment", "WhitespaceSegment", "NewlineSegment", "KeywordSegment", "SymbolSegment", "IdentifierSegment", "LiteralSegment", "LiteralKeywordSegment", "BinaryOperatorSegment", "CompositeBinaryOperatorSegment", "ComparisonOperatorSegment", "CompositeComparisonOperatorSegment", "WordSegment", "Indent", "Dedent", "ImplicitIndent", "SegmentGenerator", "Sequence", "OneOf", "Delimited", "Bracketed", "AnyNumberOf", "AnySetOf", "Ref", "Anything", "Nothing", "OptionallyBracketed", "Conditional", "StringParser", "MultiStringParser", "TypedParser", "RegexParser", "PositionMarker", "Lexer", "StringLexer", "RegexLexer", "Parser", "Matchable", "ParseMode", ) sqlfluff-2.3.5/src/sqlfluff/core/parser/context.py000066400000000000000000000320351451700765000222460ustar00rootroot00000000000000"""The parser context. This mirrors some of the same design of the flask context manager. https://flask.palletsprojects.com/en/1.1.x/ The context acts as a way of keeping track of state, references to common configuration and dialects, logging and also the parse and match depth of the current operation. """ import logging import uuid from collections import defaultdict from contextlib import contextmanager from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Sequence, Tuple from tqdm import tqdm from sqlfluff.core.config import progress_bar_configuration if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.config import FluffConfig from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable # Get the parser logger parser_logger = logging.getLogger("sqlfluff.parser") class ParseContext: """Object to handle the context at hand during parsing. Holds two tiers of references. 1. Persistent config, like references to the dialect or the current verbosity and logger. 2. Stack config, like the parse and match depth. The manipulation of the stack config is done using a context manager and layered config objects inside the context. NOTE: We use context managers here to avoid _copying_ the context, just to mutate it safely. This is significantly more performant than the copy operation, but does require some care to use properly. When fetching elements from the context, we first look at the top level stack config object and the persistent config values (stored as attributes of the ParseContext itself). """ def __init__( self, dialect: "Dialect", indentation_config: Optional[Dict[str, Any]] = None, ) -> None: """Initialize a new instance of the class. Args: dialect (Dialect): The dialect used for parsing. indentation_config (Optional[Dict[str, Any]], optional): The indentation configuration used by Indent and Dedent to control the intended indentation of certain features. Defaults to None. """ self.dialect = dialect # Indentation config is used by Indent and Dedent and used to control # the intended indentation of certain features. Specifically it is # used in the Conditional grammar. self.indentation_config = indentation_config or {} # This is the logger that child objects will latch onto. self.logger = parser_logger # A uuid for this parse context to enable cache invalidation self.uuid = uuid.uuid4() # A dict for parse caching. This is reset for each file, # but persists for the duration of an individual file parse. self._parse_cache: Dict[Tuple[Any, ...], "MatchResult"] = {} # A dictionary for keeping track of some statistics on parsing # for performance optimisation. # Focused around BaseGrammar._longest_trimmed_match(). # Initialise only with "next_counts", the rest will be int # and are dealt with in .increment(). self.parse_stats: Dict[str, Any] = {"next_counts": defaultdict(int)} # The following attributes are only accessible via a copy # and not in the init method. # NOTE: We default to the name `File` which is not # particularly informative, does indicate the root segment. self.match_segment: str = "File" self._match_stack: List[str] = [] self._parse_stack: List[str] = [] self.match_depth = 0 self.parse_depth = 0 # self.terminators is a tuple to afford some level of isolation # and protection from edits to outside the context. This introduces # a little more overhead than a list, but we manage this by only # copying it when necessary. # NOTE: Includes inherited parent terminators. self.terminators: Tuple["Matchable", ...] = () # Value for holding a reference to the progress bar. self._tqdm: Optional[tqdm] = None # Variable to store whether we're tracking progress. When looking # ahead to terminators or suchlike, we set this to False so as not # to confuse the progress bar. self.track_progress = True # The current character, to store where the progress bar is at. self._current_char = 0 @classmethod def from_config(cls, config: "FluffConfig") -> "ParseContext": """Construct a `ParseContext` from a `FluffConfig`. Args: config (FluffConfig): The configuration object. Returns: ParseContext: The constructed ParseContext object. """ indentation_config = config.get_section("indentation") or {} try: indentation_config = {k: bool(v) for k, v in indentation_config.items()} except TypeError: # pragma: no cover raise TypeError( "One of the configuration keys in the `indentation` section is not " "True or False: {!r}".format(indentation_config) ) return cls( dialect=config.get("dialect_obj"), indentation_config=indentation_config, ) def _set_terminators( self, clear_terminators: bool = False, push_terminators: Optional[Sequence["Matchable"]] = None, ) -> Tuple[int, Tuple["Matchable", ...]]: """Set the terminators used in the class. This private method sets the terminators used in the class. If `clear_terminators` is True and the existing terminators are not already cleared, the method clears the terminators. If `push_terminators` is provided, the method appends them to the existing terminators if they are not already present. Args: clear_terminators (bool, optional): A flag indicating whether to clear the existing terminators. Defaults to False. push_terminators (Optional[Sequence["Matchable"]], optional): A sequence of `Matchable` objects to be added as terminators. Defaults to None. Returns: Tuple[int, Tuple["Matchable", ...]]: A tuple containing the number of terminators appended and the original terminators. """ _appended = 0 # Retain a reference to the original terminators. _terminators = self.terminators # Note: only need to reset if clear _and not already clear_. if clear_terminators and self.terminators: # NOTE: It's really important that we .copy() on the way in, because # we don't know what else has a reference to the input list, and # we rely a lot in this code on having full control over the # list of terminators. self.terminators = tuple(push_terminators) if push_terminators else () elif push_terminators: # Yes, inefficient for now. for terminator in push_terminators: if terminator not in self.terminators: self.terminators += (terminator,) _appended += 1 return _appended, _terminators def _reset_terminators( self, appended: int, terminators: Tuple["Matchable", ...], clear_terminators: bool = False, ) -> None: """Reset the terminators attribute of the class. This method is used to reset the terminators attribute of the class. If the clear_terminators parameter is True, the terminators attribute is set to the provided terminators. If the clear_terminators parameter is False and the appended parameter is non-zero, the terminators attribute is trimmed to its original length minus the value of the appended parameter. Args: appended (int): The number of terminators that were appended. terminators (Tuple["Matchable", ...]): The original terminators. clear_terminators (bool, optional): If True, clear the terminators attribute completely. Defaults to False. """ # If we totally reset them, just reinstate the old object. if clear_terminators: self.terminators = terminators # If we didn't, then trim any added ones. # NOTE: Because we dedupe, just because we had push_terminators # doesn't mean any of them actually got added here - we only trim # the number that actually got appended. elif appended: # Trim back to original length. self.terminators = self.terminators[:-appended] @contextmanager def deeper_match( self, name: str, clear_terminators: bool = False, push_terminators: Optional[Sequence["Matchable"]] = None, track_progress: Optional[bool] = None, ) -> Iterator["ParseContext"]: """Increment match depth. Args: name (:obj:`str`): Name of segment we are starting to parse. NOTE: This value is entirely used for tracking and logging purposes. clear_terminators (:obj:`bool`, optional): Whether to force clear any inherited terminators. This is useful in structures like brackets, where outer terminators shouldn't apply while within. Terminators are stashed until we return back out of this context. push_terminators (:obj:`Sequence` of :obj:`Matchable`): Additional terminators to add to the environment while in this context. track_progress (:obj:`bool`, optional): Whether to pause progress tracking for deeper matches. This avoids having the linting progress bar jump forward when performing greedy matches on terminators. """ self._match_stack.append(self.match_segment) self.match_segment = name self.match_depth += 1 _append, _terms = self._set_terminators(clear_terminators, push_terminators) _track_progress = self.track_progress if track_progress is False: self.track_progress = False elif track_progress is True: # pragma: no cover # We can't go from False to True. Raise an issue if not. assert self.track_progress is True, "Cannot set tracking from False to True" try: yield self finally: self._reset_terminators( _append, _terms, clear_terminators=clear_terminators ) self.match_depth -= 1 # Reset back to old name self.match_segment = self._match_stack.pop() # Reset back to old progress tracking. self.track_progress = _track_progress @contextmanager def progress_bar(self, last_char: int) -> Iterator["ParseContext"]: """Set up the progress bar (if it's not already set up). Args: last_char (:obj:`int`): The templated character position of the final segment in the sequence. This is usually populated from the end of `templated_slice` on the final segment. We require this on initialising the progress bar so that we know how far there is to go as we track progress through the file. """ assert not self._tqdm, "Attempted to re-initialise progressbar." self._tqdm = tqdm( # Progress is character by character in the *templated* file. total=last_char, desc="parsing", miniters=1, mininterval=0.2, disable=progress_bar_configuration.disable_progress_bar, leave=False, ) self._current_line = 0 try: yield self finally: self._tqdm.close() def update_progress(self, char_idx: int) -> None: """Update the progress bar if configured. If progress isn't configured, we do nothing. If `track_progress` is false we do nothing. """ if not self._tqdm or not self.track_progress: return None if char_idx <= self._current_char: return None self._tqdm.update(char_idx - self._current_char) self._current_char = char_idx return None def stack(self) -> Tuple[Tuple[str, ...], Tuple[str, ...]]: # pragma: no cover """Return stacks as a tuples so that it can't be edited.""" return tuple(self._parse_stack), tuple(self._match_stack) def check_parse_cache( self, loc_key: Tuple[Any, ...], matcher_key: str ) -> Optional["MatchResult"]: """Check against the parse cache for a pre-existing match. If no match is found in the cache, this returns None. """ return self._parse_cache.get((loc_key, matcher_key)) def put_parse_cache( self, loc_key: Tuple[Any, ...], matcher_key: str, match: "MatchResult" ) -> None: """Store a match in the cache for later retrieval.""" self._parse_cache[(loc_key, matcher_key)] = match sqlfluff-2.3.5/src/sqlfluff/core/parser/grammar/000077500000000000000000000000001451700765000216335ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/parser/grammar/__init__.py000066400000000000000000000011501451700765000237410ustar00rootroot00000000000000"""Definitions of grammars.""" from sqlfluff.core.parser.grammar.anyof import ( AnyNumberOf, AnySetOf, OneOf, OptionallyBracketed, ) from sqlfluff.core.parser.grammar.base import Anything, Nothing, Ref from sqlfluff.core.parser.grammar.conditional import Conditional from sqlfluff.core.parser.grammar.delimited import Delimited from sqlfluff.core.parser.grammar.sequence import Bracketed, Sequence __all__ = ( "Ref", "Anything", "Nothing", "AnyNumberOf", "AnySetOf", "OneOf", "OptionallyBracketed", "Delimited", "Sequence", "Bracketed", "Conditional", ) sqlfluff-2.3.5/src/sqlfluff/core/parser/grammar/anyof.py000066400000000000000000000266641451700765000233370ustar00rootroot00000000000000"""AnyNumberOf, OneOf, OptionallyBracketed & AnySetOf.""" from typing import FrozenSet, List, Optional, Tuple, Union, cast from typing import Sequence as SequenceType from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar.base import ( BaseGrammar, cached_method_for_parse_context, ) from sqlfluff.core.parser.grammar.sequence import Bracketed, Sequence from sqlfluff.core.parser.match_algorithms import ( longest_match, skip_start_index_forward_to_code, trim_to_terminator, ) from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment, UnparsableSegment from sqlfluff.core.parser.types import ParseMode, SimpleHintType def _parse_mode_match_result( segments: SequenceType[BaseSegment], current_match: MatchResult, max_idx: int, parse_mode: ParseMode, ) -> MatchResult: """A helper function for the return values of AnyNumberOf. This method creates UnparsableSegments as appropriate depending on the parse mode and return values. """ # If we're being strict, just return. if parse_mode == ParseMode.STRICT: return current_match # Nothing in unmatched anyway? _stop_idx = current_match.matched_slice.stop if _stop_idx == max_idx or all(not s.is_code for s in segments[_stop_idx:max_idx]): return current_match _trim_idx = skip_start_index_forward_to_code(segments, _stop_idx) # Create an unmatched segment _expected = "Nothing else" if len(segments) > max_idx: _expected += f" before {segments[max_idx].raw!r}" unmatched_match = MatchResult( matched_slice=slice(_trim_idx, max_idx), matched_class=UnparsableSegment, segment_kwargs={"expected": _expected}, ) return current_match.append(unmatched_match) class AnyNumberOf(BaseGrammar): """A more configurable version of OneOf.""" supported_parse_modes = { ParseMode.STRICT, ParseMode.GREEDY, } def __init__( self, *args: Union[Matchable, str], max_times: Optional[int] = None, min_times: int = 0, max_times_per_element: Optional[int] = None, exclude: Optional[Matchable] = None, terminators: SequenceType[Union[Matchable, str]] = (), reset_terminators: bool = False, allow_gaps: bool = True, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: self.max_times = max_times self.min_times = min_times self.max_times_per_element = max_times_per_element # Any patterns to _prevent_ a match. self.exclude = exclude super().__init__( *args, allow_gaps=allow_gaps, optional=optional, terminators=terminators, reset_terminators=reset_terminators, parse_mode=parse_mode, ) @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None ) -> SimpleHintType: """Does this matcher support a uppercase hash matching route? AnyNumberOf does provide this, as long as *all* the elements *also* do. """ option_simples: List[SimpleHintType] = [ opt.simple(parse_context=parse_context, crumbs=crumbs) for opt in self._elements ] if any(elem is None for elem in option_simples): return None # We now know that there are no Nones. simple_buff = cast(List[Tuple[FrozenSet[str], FrozenSet[str]]], option_simples) # Combine the lists simple_raws = [simple[0] for simple in simple_buff if simple[0]] simple_types = [simple[1] for simple in simple_buff if simple[1]] return ( frozenset.union(*simple_raws) if simple_raws else frozenset(), frozenset.union(*simple_types) if simple_types else frozenset(), ) def is_optional(self) -> bool: """Return whether this element is optional. This is mostly set in the init method, but also in this case, if min_times is zero then this is also optional. """ return self.optional or self.min_times == 0 def match( self, segments: SequenceType["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against any of the elements a relevant number of times. If it matches multiple, it returns the longest, and if any are the same length it returns the first (unless we explicitly just match first). """ if self.exclude: with parse_context.deeper_match( name=self.__class__.__name__ + "-Exclude" ) as ctx: if self.exclude.match(segments, idx, ctx): return MatchResult.empty_at(idx) n_matches = 0 # Keep track of the number of times each option has been matched. option_counter = {elem.cache_key(): 0 for elem in self._elements} # Keep track of how far we've got. matched_idx = idx # The working index is to cover non-code elements which aren't # claimed yet, but we should conditionally claim if the next # match is succesful. working_idx = idx matched = MatchResult.empty_at(idx) max_idx = len(segments) # What is the limit if self.parse_mode == ParseMode.GREEDY: max_idx = trim_to_terminator( segments, idx, terminators=[*self.terminators, *parse_context.terminators], parse_context=parse_context, ) while True: if n_matches >= self.min_times: if ( # Either nothing left to match... matched_idx >= max_idx # ...Or we've matched as many times as allowed. or (self.max_times and n_matches >= self.max_times) ): # NOTE: For OneOf, this is the matched return path. return _parse_mode_match_result( segments, matched, max_idx, self.parse_mode, ) # Is there nothing left to match? if matched_idx >= max_idx: # Return unsuccessful as we didn't meet the hurdle. # The positive exhausted return is above. return MatchResult.empty_at(idx) with parse_context.deeper_match( name=self.__class__.__name__, clear_terminators=self.reset_terminators, push_terminators=self.terminators, ) as ctx: match, matched_option = longest_match( # TODO: Resolve re-slice limit hack segments[:max_idx], self._elements, working_idx, ctx, ) # Did we fail to match? if not match: # If we haven't already met the hurdle rate, act as though # not match at all. if n_matches < self.min_times: matched = MatchResult.empty_at(idx) return _parse_mode_match_result( segments, matched, max_idx, self.parse_mode, ) # Otherwise we have a new clean match. assert match assert matched_option # Update counts of each option in case we've hit limits. matched_key = matched_option.cache_key() if matched_option.cache_key() in option_counter: option_counter[matched_key] += 1 # Check if we have matched an option too many times. if ( self.max_times_per_element and option_counter[matched_key] > self.max_times_per_element ): # Return the match so far, without the most recent match. return _parse_mode_match_result( segments, matched, max_idx, self.parse_mode, ) # If we haven't hit limits then consume and move on. matched = matched.append(match) matched_idx = matched.matched_slice.stop working_idx = matched_idx if self.allow_gaps: working_idx = skip_start_index_forward_to_code(segments, matched_idx) parse_context.update_progress(matched_idx) n_matches += 1 # Continue around the loop... class OneOf(AnyNumberOf): """Match any of the elements given once. If it matches multiple, it returns the longest, and if any are the same length it returns the first (unless we explicitly just match first). """ def __init__( self, *args: Union[Matchable, str], exclude: Optional[Matchable] = None, terminators: SequenceType[Union[Matchable, str]] = (), reset_terminators: bool = False, allow_gaps: bool = True, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: super().__init__( *args, max_times=1, min_times=1, exclude=exclude, terminators=terminators, reset_terminators=reset_terminators, allow_gaps=allow_gaps, optional=optional, parse_mode=parse_mode, ) class OptionallyBracketed(OneOf): """Hybrid of Bracketed and Sequence: allows brackets but they aren't required. NOTE: This class is greedy on brackets so if they *can* be claimed, then they will be. """ def __init__( self, *args: Union[Matchable, str], exclude: Optional[Matchable] = None, terminators: SequenceType[Union[Matchable, str]] = (), reset_terminators: bool = False, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: super().__init__( Bracketed(*args), # In the case that there is only one argument, no sequence is required. args[0] if len(args) == 1 else Sequence(*args), exclude=exclude, terminators=terminators, reset_terminators=reset_terminators, optional=optional, parse_mode=parse_mode, ) class AnySetOf(AnyNumberOf): """Match any number of the elements but each element can only be matched once.""" def __init__( self, *args: Union[Matchable, str], max_times: Optional[int] = None, min_times: int = 0, exclude: Optional[Matchable] = None, terminators: SequenceType[Union[Matchable, str]] = (), reset_terminators: bool = False, allow_gaps: bool = True, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: super().__init__( *args, max_times_per_element=1, max_times=max_times, min_times=min_times, exclude=exclude, terminators=terminators, reset_terminators=reset_terminators, allow_gaps=allow_gaps, optional=optional, parse_mode=parse_mode, ) sqlfluff-2.3.5/src/sqlfluff/core/parser/grammar/base.py000066400000000000000000000464651451700765000231360ustar00rootroot00000000000000"""Base grammar, Ref, Anything and Nothing.""" import copy from typing import ( TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Set, Tuple, TypeVar, Union, ) from uuid import UUID, uuid4 from sqlfluff.core.helpers.string import curtail_string from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_algorithms import greedy_match from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.core.parser.types import ParseMode, SimpleHintType if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects.base import Dialect def cached_method_for_parse_context( func: Callable[[Any, ParseContext, Optional[Tuple[str]]], SimpleHintType] ) -> Callable[..., SimpleHintType]: """A decorator to cache the output of this method for a given parse context. This cache automatically invalidates if the uuid of the parse context changes. The value is store in the __dict__ attribute of the class against a key unique to that function. """ cache_key = "__cache_" + func.__name__ def wrapped_method( self: Any, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None ) -> SimpleHintType: """Cache the output of the method against a given parse context. Note: kwargs are not taken into account in the caching, but for the current use case of dependency loop debugging that's ok. """ try: cache_tuple: Tuple[UUID, SimpleHintType] = self.__dict__[cache_key] # Is the value for the current context? if cache_tuple[0] == parse_context.uuid: # If so return it. return cache_tuple[1] except KeyError: # Failed to find an item in the cache. pass # If we're here, we either didn't find a match in the cache or it # wasn't valid. Generate a new value, cache it and return result = func(self, parse_context, crumbs) self.__dict__[cache_key] = (parse_context.uuid, result) return result return wrapped_method T = TypeVar("T", bound="BaseGrammar") class BaseGrammar(Matchable): """Grammars are a way of composing match statements. Any grammar must implement the `match` function. Segments can also be passed to most grammars. Segments implement `match` as a classmethod. Grammars implement it as an instance method. """ is_meta = False equality_kwargs: Tuple[str, ...] = ("_elements", "optional", "allow_gaps") # All grammars are assumed to support STRICT mode by default. # If they wish to support other modes, they should declare # it by overriding this attribute. supported_parse_modes: Set[ParseMode] = {ParseMode.STRICT} @staticmethod def _resolve_ref(elem: Union[str, Matchable]) -> Matchable: """Resolve potential string references to things we can match against.""" if isinstance(elem, str): return Ref.keyword(elem) elif isinstance(elem, Matchable): # NOTE: BaseSegment types are an instance of Matchable. return elem raise TypeError( "Grammar element [{!r}] was found of unexpected type [{}] was " "found.".format(elem, type(elem)) # pragma: no cover ) def __init__( self, *args: Union[Matchable, str], allow_gaps: bool = True, optional: bool = False, terminators: Sequence[Union[Matchable, str]] = (), reset_terminators: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: """Deal with kwargs common to all grammars. Args: *args: Any number of elements which because the subjects of this grammar. Optionally these elements may also be string references to elements rather than the Matchable elements themselves. allow_gaps (:obj:`bool`, optional): Does this instance of the grammar allow gaps between the elements it matches? This may be exhibited slightly differently in each grammar. See that grammar for details. Defaults `True`. optional (:obj:`bool`, optional): In the context of a sequence, is this grammar *optional*, i.e. can it be skipped if no match is found. Outside of a Sequence, this option does nothing. Defaults `False`. terminators (Sequence of :obj:`str` or Matchable): Matchable objects which can terminate the grammar early. These are also used in some parse modes to dictate how many segments to claim when handling unparsable sections. Items passed as :obj:`str` are assumed to refer to keywords and so will be passed to `Ref.keyword()` to be resolved. Terminators are also added to the parse context during deeper matching of child elements. reset_terminators (:obj:`bool`, default `False`): Controls whether any inherited terminators from outer grammars should be cleared before matching child elements. Situations where this might be appropriate are within bracketed expressions, where outer terminators should be temporarily ignored. parse_mode (:obj:`ParseMode`): Defines how eager the grammar should be in claiming unmatched segments. By default, grammars usually only claim what they can match, but by setting this to something more eager, grammars can control how unparsable sections are treated to give the user more granular feedback on what can (and what *cannot*) be parsed. """ # We provide a common interface for any grammar that allows positional elements. # If *any* for the elements are a string and not a grammar, then this is a # shortcut to the Ref.keyword grammar by default. self._elements: List[Matchable] = [self._resolve_ref(e) for e in args] # Now we deal with the standard kwargs self.allow_gaps = allow_gaps self.optional: bool = optional # The intent here is that if we match something, and then the _next_ # item is one of these, we can safely conclude it's a "total" match. # In those cases, we return early without considering more options. self.terminators: Sequence[Matchable] = [ self._resolve_ref(t) for t in terminators ] self.reset_terminators = reset_terminators assert parse_mode in self.supported_parse_modes, ( f"{self.__class__.__name__} does not support {parse_mode} " f"(only {self.supported_parse_modes})" ) self.parse_mode = parse_mode # Generate a cache key self._cache_key = uuid4().hex def cache_key(self) -> str: """Get the cache key for this grammar. For grammars these are unique per-instance. """ return self._cache_key def is_optional(self) -> bool: """Return whether this segment is optional. The optional attribute is set in the __init__ method. """ return self.optional @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None ) -> SimpleHintType: """Does this matcher support a lowercase hash matching route?""" return None def __str__(self) -> str: # pragma: no cover TODO? """Return a string representation of the object.""" return repr(self) def __repr__(self) -> str: """Return a string representation suitable for debugging.""" return "<{}: [{}]>".format( self.__class__.__name__, curtail_string( ", ".join(curtail_string(repr(elem), 40) for elem in self._elements), 100, ), ) def __eq__(self, other: Any) -> bool: """Two grammars are equal if their elements and types are equal. NOTE: We use the equality_kwargs tuple on the class to define other kwargs which should also be checked so that things like "optional" is also taken into account in considering equality. """ return type(self) is type(other) and all( getattr(self, k, None) == getattr(other, k, None) for k in self.equality_kwargs ) def copy( self: T, insert: Optional[List[Matchable]] = None, at: Optional[int] = None, before: Optional[Any] = None, remove: Optional[List[Matchable]] = None, terminators: List[Union[str, Matchable]] = [], replace_terminators: bool = False, # NOTE: Optionally allow other kwargs to be provided to this # method for type compatibility. Any provided won't be used. **kwargs: Any, ) -> T: """Create a copy of this grammar, optionally with differences. This is mainly used in dialect inheritance. Args: insert (:obj:`list`, optional): Matchable elements to insert. This is inserted pre-expansion so can include unexpanded elements as normal. at (:obj:`int`, optional): The position in the elements to insert the item. Defaults to `None` which means insert at the end of the elements. before (optional): An alternative to _at_ to determine the position of an insertion. Using this inserts the elements immediately before the position of this element. Note that this is not an _index_ but an element to look for (i.e. a Segment or Grammar which will be compared with other elements for equality). remove (:obj:`list`, optional): A list of individual elements to remove from a grammar. Removal is done *after* insertion so that order is preserved. Elements are searched for individually. terminators (:obj:`list` of :obj:`str` or Matchable): New terminators to add to the existing ones. Whether they replace or append is controlled by `append_terminators`. :obj:`str` objects will be interpreted as keywords and passed to `Ref.keyword()`. replace_terminators (:obj:`bool`, default False): When `True` we replace the existing terminators from the copied grammar, otherwise we just append. **kwargs: Optional additional values may be passed to this method for inherited classes, but if unused they will raise an `AssertionError`. """ assert not kwargs, f"Unexpected kwargs to .copy(): {kwargs}" # Copy only the *grammar* elements. The rest comes through # as is because they should just be classes rather than # instances. new_elems = [ elem.copy() if isinstance(elem, BaseGrammar) else elem for elem in self._elements ] if insert: if at is not None and before is not None: # pragma: no cover raise ValueError( "Cannot specify `at` and `before` in BaseGrammar.copy()." ) if before is not None: try: idx = new_elems.index(before) except ValueError: # pragma: no cover raise ValueError( "Could not insert {} in copy of {}. {} not Found.".format( insert, self, before ) ) new_elems = new_elems[:idx] + insert + new_elems[idx:] elif at is None: new_elems = new_elems + insert else: new_elems = new_elems[:at] + insert + new_elems[at:] if remove: for elem in remove: try: new_elems.remove(elem) except ValueError: # pragma: no cover raise ValueError( "Could not remove {} from copy of {}. Not Found.".format( elem, self ) ) new_grammar = copy.copy(self) new_grammar._elements = new_elems if replace_terminators: # pragma: no cover # Override (NOTE: Not currently used). new_grammar.terminators = [self._resolve_ref(t) for t in terminators] else: # NOTE: This is also safe in the case that neither `terminators` or # `replace_terminators` are set. In that case, nothing will change. new_grammar.terminators = [ *new_grammar.terminators, *(self._resolve_ref(t) for t in terminators), ] return new_grammar class Ref(BaseGrammar): """A kind of meta-grammar that references other grammars by name at runtime.""" equality_kwargs: Tuple[str, ...] = ("_ref", "optional", "allow_gaps") def __init__( self, *args: str, exclude: Optional[Matchable] = None, terminators: Sequence[Union[Matchable, str]] = (), reset_terminators: bool = False, allow_gaps: bool = True, optional: bool = False, ) -> None: # For Ref, there should only be one arg. assert len(args) == 1, ( "Ref grammar can only deal with precisely one element for now. Instead " f"found {args!r}" ) assert isinstance(args[0], str), f"Ref must be string. Found {args}." self._ref = args[0] # Any patterns to _prevent_ a match. self.exclude = exclude super().__init__( # NOTE: Don't pass on any args (we've already handled it with self._ref) allow_gaps=allow_gaps, optional=optional, # Terminators don't take effect directly within this grammar, but # the Ref grammar is an effective place to manage the terminators # inherited via the context. terminators=terminators, reset_terminators=reset_terminators, ) @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None ) -> SimpleHintType: """Does this matcher support a uppercase hash matching route? A ref is simple, if the thing it references is simple. """ if crumbs and self._ref in crumbs: # pragma: no cover loop = " -> ".join(crumbs) raise RecursionError(f"Self referential grammar detected: {loop}") return self._get_elem(dialect=parse_context.dialect).simple( parse_context=parse_context, crumbs=(crumbs or ()) + (self._ref,), ) def _get_elem(self, dialect: "Dialect") -> Matchable: """Get the actual object we're referencing.""" if dialect: # Use the dialect to retrieve the grammar it refers to. return dialect.ref(self._ref) else: # pragma: no cover raise ReferenceError("No Dialect has been provided to Ref grammar!") def __repr__(self) -> str: """Return a string representation of the 'Ref' object.""" return "".format( repr(self._ref), " [opt]" if self.is_optional() else "" ) def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match a list of segments against this segment. Matching can be done from either the raw or the segments. This raw function can be overridden, or a grammar defined on the underlying class. Args: segments (Tuple[BaseSegment, ...]): The sequence of segments to match against. idx (int): Index of the element in the sequence. parse_context (ParseContext): The parse context. Returns: MatchResult: The result of the matching process. """ elem = self._get_elem(dialect=parse_context.dialect) # First if we have an *exclude* option, we should check that # which would prevent the rest of this grammar from matching. if self.exclude: with parse_context.deeper_match( name=self._ref + "-Exclude", clear_terminators=self.reset_terminators, push_terminators=self.terminators, ) as ctx: if self.exclude.match(segments, idx, ctx): return MatchResult.empty_at(idx) # Match against that. NB We're not incrementing the match_depth here. # References shouldn't really count as a depth of match. with parse_context.deeper_match( name=self._ref, clear_terminators=self.reset_terminators, push_terminators=self.terminators, ) as ctx: return elem.match(segments, idx, parse_context) @classmethod def keyword(cls, keyword: str, optional: bool = False) -> BaseGrammar: """Generate a reference to a keyword by name. This function is entirely syntactic sugar, and designed for more readable dialects. Ref.keyword('select') == Ref('SelectKeywordSegment') Args: keyword (str): The name of the keyword. optional (bool, optional): Whether the keyword is optional or not. Defaults to False. Returns: BaseGrammar: An instance of the BaseGrammar class. """ name = keyword.capitalize() + "KeywordSegment" return cls(name, optional=optional) class Anything(BaseGrammar): """Matches anything.""" def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Matches... Anything. Most useful in match grammars, where a later parse grammar will work out what's inside. NOTE: This grammar does still only match as far as any inherited terminators if they exist. """ terminators = [*self.terminators] if not self.reset_terminators: # Only add context terminators if we're not resetting. terminators.extend(parse_context.terminators) if not terminators: return MatchResult(slice(idx, len(segments))) return greedy_match( segments, idx, parse_context, terminators, # Using the nested match option means that we can match # any bracketed sections we find to persist the structure # even if this grammar is permissive on the meaning. # This preserves backward compatibility with older # parsing behaviour. nested_match=True, ) class Nothing(BaseGrammar): """Matches nothing. Useful for placeholders which might be overwritten by other dialects. """ def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Always return a failed (empty) match.""" return MatchResult.empty_at(idx) sqlfluff-2.3.5/src/sqlfluff/core/parser/grammar/conditional.py000066400000000000000000000101631451700765000245110ustar00rootroot00000000000000"""Conditional Grammar.""" from typing import Sequence, Type, Union from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar.base import BaseGrammar from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments import BaseSegment, Indent class Conditional(BaseGrammar): """A grammar which is conditional on the parse context. | NOTE: The Conditional grammar is assumed to be operating | within a Sequence grammar, and some of the functionality | may not function within a different context. Args: *args: A meta segment which is instantiated conditionally upon the rules set. config_type: The area of the config that is used when evaluating the status of the given rules. rules: A set of `rule=boolean` pairs, which are evaluated when understanding whether conditions are met for this grammar to be enabled. Example: .. code-block:: python Conditional(Dedent, config_type="indent", indented_joins=False) This effectively says that if `indented_joins` in the "indent" section of the current config is set to `True`, then this grammar will allow a `Dedent` segment to be matched here. If `indented_joins` is set to `False`, it will be as though there was no `Dedent` in this sequence. | NOTE: While the Conditional grammar is set up to allow different | sources of configuration, it relies on configuration keys being | available within the ParseContext. Practically speaking only the | "indentation" keys are currently set up. """ def __init__( self, meta: Type[Indent], config_type: str = "indentation", **rules: Union[str, bool] ): """Initialize a new instance of the class. This method initializes an instance of the class with the provided arguments. Args: meta (Type[Indent]): The meta argument. config_type (str, optional): The config_type argument. Defaults to "indentation". **rules (Union[str, bool]): The rules argument. """ assert issubclass( meta, Indent ), "Conditional is only designed to work with Indent/Dedent segments." self._meta = meta if not config_type: # pragma: no cover raise ValueError("Conditional config_type must be set.") elif config_type not in ("indentation"): # pragma: no cover raise ValueError( "Only 'indentation' is supported as a Conditional config_type." ) if not rules: # pragma: no cover raise ValueError("Conditional requires rules to be set.") self._config_type = config_type self._config_rules = rules super().__init__() def is_enabled(self, parse_context: ParseContext) -> bool: """Evaluate conditionals and return whether enabled.""" # NOTE: Because only "indentation" is the only current config_type # supported, this code is much simpler that would be required in # future if multiple options are available. if self._config_type != "indentation": # pragma: no cover raise ValueError( "Only 'indentation' is supported as a Conditional config_type." ) config_section = parse_context.indentation_config # If any rules fail, return no match. for rule, val in self._config_rules.items(): # Assume False if not set. conf_val = config_section.get(rule, False) # Coerce to boolean. if val != bool(conf_val): return False return True def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """If enabled, return a single insert of the new segment.""" if not self.is_enabled(parse_context): return MatchResult.empty_at(idx) return MatchResult( matched_slice=slice(idx, idx), insert_segments=((idx, self._meta),) ) sqlfluff-2.3.5/src/sqlfluff/core/parser/grammar/delimited.py000066400000000000000000000161441451700765000241530ustar00rootroot00000000000000"""Definitions for Grammar.""" from typing import Optional, Sequence, Union from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import Ref from sqlfluff.core.parser.grammar.anyof import OneOf from sqlfluff.core.parser.grammar.noncode import NonCodeMatcher from sqlfluff.core.parser.match_algorithms import ( longest_match, skip_start_index_forward_to_code, ) from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment class Delimited(OneOf): """Match an arbitrary number of elements separated by a delimiter. Note that if there are multiple elements passed in that they will be treated as different options of what can be delimited, rather than a sequence. """ equality_kwargs = ( "_elements", "optional", "allow_gaps", "delimiter", "allow_trailing", "terminator", "min_delimiters", ) def __init__( self, *args: Union[Matchable, str], delimiter: Union[Matchable, str] = Ref("CommaSegment"), allow_trailing: bool = False, terminators: Sequence[Union[Matchable, str]] = (), reset_terminators: bool = False, min_delimiters: int = 0, bracket_pairs_set: str = "bracket_pairs", allow_gaps: bool = True, optional: bool = False, ) -> None: """Initialize the class object with the provided arguments. Args: *args (Union[Matchable, str]): Options for elements between delimiters. This is treated as a set of options rather than a sequence. delimiter (Union[Matchable, str], optional): Delimiter used for parsing. Defaults to Ref("CommaSegment"). allow_trailing (bool, optional): Flag indicating whether trailing delimiters are allowed. Defaults to False. terminators (Sequence[Union[Matchable, str]], optional): Sequence of terminators used to match the end of a segment. Defaults to (). reset_terminators (bool, optional): Flag indicating whether terminators should be reset. Defaults to False. min_delimiters (Optional[int], optional): Minimum number of delimiters to match. Defaults to None. bracket_pairs_set (str, optional): Name of the bracket pairs set. Defaults to "bracket_pairs". allow_gaps (bool, optional): Flag indicating whether gaps between segments are allowed. Defaults to True. optional (bool, optional): Flag indicating whether the segment is optional. Defaults to False. """ if delimiter is None: # pragma: no cover raise ValueError("Delimited grammars require a `delimiter`") self.bracket_pairs_set = bracket_pairs_set self.delimiter = self._resolve_ref(delimiter) self.allow_trailing = allow_trailing # Setting min delimiters means we have to match at least this number self.min_delimiters = min_delimiters super().__init__( *args, terminators=terminators, reset_terminators=reset_terminators, allow_gaps=allow_gaps, optional=optional, ) def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match delimited sequences. To achieve this we flip flop between looking for content and looking for delimiters. Individual elements of this grammar are treated as _options_ not as a _sequence_. """ delimiters = 0 seeking_delimiter = False max_idx = len(segments) working_idx = idx working_match = MatchResult.empty_at(idx) delimiter_match: Optional[MatchResult] = None delimiter_matchers = [self.delimiter] # NOTE: If the configured delimiter is in `parse_context.terminators` then # treat is _only_ as a delimiter and not as a terminator. This happens # frequently during nested comma expressions. terminator_matchers = [ *self.terminators, *(t for t in parse_context.terminators if t not in delimiter_matchers), ] # If gaps aren't allowed, a gap (or non-code segment), acts like a terminator. if not self.allow_gaps: terminator_matchers.append(NonCodeMatcher()) while True: # If we're past the start and allowed gaps, work forward # through any gaps. if self.allow_gaps and working_idx > idx: working_idx = skip_start_index_forward_to_code(segments, working_idx) # Do we have anything left to match on? if working_idx >= max_idx: break # Check whether there is a terminator before checking for content with parse_context.deeper_match(name="Delimited-Term") as ctx: match, _ = longest_match( segments=segments, matchers=terminator_matchers, idx=working_idx, parse_context=ctx, ) if match: break # Then match for content/delimiter as appropriate. _push_terminators = [] if delimiter_matchers and not seeking_delimiter: _push_terminators = delimiter_matchers with parse_context.deeper_match( name="Delimited", push_terminators=_push_terminators ) as ctx: match, _ = longest_match( segments=segments, matchers=delimiter_matchers if seeking_delimiter else self._elements, idx=working_idx, parse_context=ctx, ) if not match: # Failed to match next element, stop here. break # Otherwise we _did_ match. Handle it. if seeking_delimiter: # It's a delimiter delimiter_match = match else: # It's content. Add both the last delimiter and the content to the # working match. if delimiter_match: # NOTE: This should happen on every loop _except_ the first. delimiters += 1 working_match = working_match.append(delimiter_match) working_match = working_match.append(match) # Prep for going back around the loop... working_idx = match.matched_slice.stop seeking_delimiter = not seeking_delimiter parse_context.update_progress(working_idx) if self.allow_trailing and delimiter_match and not seeking_delimiter: delimiters += 1 working_match = working_match.append(delimiter_match) if delimiters < self.min_delimiters: return MatchResult.empty_at(idx) return working_match sqlfluff-2.3.5/src/sqlfluff/core/parser/grammar/noncode.py000066400000000000000000000035021451700765000236320ustar00rootroot00000000000000"""A non-code matcher. This is a stub of a grammar, intended for use entirely as a terminator or similar alongside other matchers. """ from typing import Optional, Sequence, Tuple from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.core.parser.types import SimpleHintType class NonCodeMatcher(Matchable): """An object which behaves like a matcher to match non-code.""" def simple( self, parse_context: ParseContext, crumbs: Optional[Tuple[str, ...]] = None ) -> SimpleHintType: """This element doesn't work with simple.""" return None def is_optional(self) -> bool: # pragma: no cover """Not optional. NOTE: The NonCodeMatcher is only normally only used as a terminator or other special instance matcher. As such the `.simple()` method is unlikely to be used. """ return False def cache_key(self) -> str: """Get the cache key for the matcher. NOTE: In this case, this class is a bit of a singleton and so we don't need a unique UUID in the same way as other classes. """ return "non-code-matcher" def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match any starting non-code segments.""" matched_idx = idx for matched_idx in range(idx, len(segments)): if segments[matched_idx].is_code: break if matched_idx > idx: return MatchResult(matched_slice=slice(idx, matched_idx)) # Otherwise return no match return MatchResult.empty_at(idx) sqlfluff-2.3.5/src/sqlfluff/core/parser/grammar/sequence.py000066400000000000000000000636521451700765000240310ustar00rootroot00000000000000"""Sequence and Bracketed Grammars.""" # NOTE: We rename the typing.Sequence here so it doesn't collide # with the grammar class that we're defining. from os import getenv from typing import Optional, Set, Tuple, Type, Union, cast from typing import Sequence as SequenceType from sqlfluff.core.helpers.slice import is_zero_slice from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar.base import ( BaseGrammar, cached_method_for_parse_context, ) from sqlfluff.core.parser.grammar.conditional import Conditional from sqlfluff.core.parser.match_algorithms import ( resolve_bracket, skip_start_index_forward_to_code, skip_stop_index_backward_to_code, trim_to_terminator, ) from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import ( BaseSegment, Indent, MetaSegment, TemplateSegment, UnparsableSegment, ) from sqlfluff.core.parser.types import ParseMode, SimpleHintType def _flush_metas( pre_nc_idx: int, post_nc_idx: int, meta_buffer: SequenceType[Type["MetaSegment"]], segments: SequenceType[BaseSegment], ) -> Tuple[Tuple[int, Type[MetaSegment]], ...]: """Position any new meta segments relative to the non code section. It's important that we position the new meta segments appropriately around any templated sections and any whitespace so that indentation behaviour works as expected. There are four valid locations (which may overlap). 1. Before any non-code 2. Before the first block templated section (if it's a block opener). 3. After the last block templated section (if it's a block closer). 4. After any non code. If all the metas have a positive indent value then they should go in position 1 or 3, otherwise we're in position 2 or 4. Within each of those scenarios it depends on whether an appropriate block end exists. """ if all(m.indent_val >= 0 for m in meta_buffer): for _idx in range(post_nc_idx, pre_nc_idx, -1): if segments[_idx - 1].is_type("placeholder"): _seg = cast(TemplateSegment, segments[_idx - 1]) if _seg.block_type == "block_end": meta_idx = _idx else: meta_idx = pre_nc_idx break else: meta_idx = pre_nc_idx else: for _idx in range(pre_nc_idx, post_nc_idx): if segments[_idx].is_type("placeholder"): _seg = cast(TemplateSegment, segments[_idx]) if _seg.block_type == "block_start": meta_idx = _idx else: meta_idx = post_nc_idx break else: meta_idx = post_nc_idx return tuple((meta_idx, meta) for meta in meta_buffer) class Sequence(BaseGrammar): """Match a specific sequence of elements.""" supported_parse_modes = { ParseMode.STRICT, ParseMode.GREEDY, ParseMode.GREEDY_ONCE_STARTED, } test_env = getenv("SQLFLUFF_TESTENV", "") @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None ) -> SimpleHintType: """Does this matcher support a uppercase hash matching route? Sequence does provide this, as long as the *first* non-optional element does, *AND* and optional elements which preceded it also do. """ simple_raws: Set[str] = set() simple_types: Set[str] = set() for opt in self._elements: simple = opt.simple(parse_context=parse_context, crumbs=crumbs) if not simple: return None simple_raws.update(simple[0]) simple_types.update(simple[1]) if not opt.is_optional(): # We found our first non-optional element! return frozenset(simple_raws), frozenset(simple_types) # If *all* elements are optional AND simple, I guess it's also simple. return frozenset(simple_raws), frozenset(simple_types) def match( self, segments: SequenceType["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match a specific sequence of elements. When returning incomplete matches in one of the greedy parse modes, we don't return any new meta segments (whether from conditionals or otherwise). This is because we meta segments (typically indents) may only make sense in the context of a full sequence, as their corresponding pair may be later (and yet unrendered). Partial matches should however still return the matched (mutated) versions of any segments which _have_ been processed to provide better feedback to the user. """ start_idx = idx # Where did we start matched_idx = idx # Where have we got to max_idx = len(segments) # What is the limit insert_segments: Tuple[Tuple[int, Type[MetaSegment]], ...] = () child_matches: Tuple[MatchResult, ...] = () first_match = True # Metas with a negative indent value come AFTER # the whitespace. Positive or neutral come BEFORE. # HOWEVER: If one is already there, we must preserve # the order. This forced ordering is fine if there's # a positive followed by a negative in the sequence, # but if by design a positive arrives *after* a # negative then we should insert it after the positive # instead. # https://github.com/sqlfluff/sqlfluff/issues/3836 meta_buffer = [] if self.parse_mode == ParseMode.GREEDY: # In the GREEDY mode, we first look ahead to find a terminator # before matching any code. max_idx = trim_to_terminator( segments, idx, terminators=[*self.terminators, *parse_context.terminators], parse_context=parse_context, ) # Iterate elements for elem in self._elements: # 1. Handle any metas or conditionals. # We do this first so that it's the same whether we've run # out of segments or not. # If it's a conditional, evaluate it. # In both cases, we don't actually add them as inserts yet # because their position will depend on what types we accrue. if isinstance(elem, Conditional): # A conditional grammar will only ever return insertions. # If it's not enabled it returns an empty match. # NOTE: No deeper match here, it seemed unnecessary. _match = elem.match(segments, matched_idx, parse_context) # Rather than taking them as a match at this location, we # requeue them for addition later. for _, submatch in _match.insert_segments: meta_buffer.append(submatch) continue # If it's a raw meta, just add it to our list. elif isinstance(elem, type) and issubclass(elem, Indent): meta_buffer.append(elem) continue # 2. Match Segments. # At this point we know there are segments left to match # on and that the current element isn't a meta or conditional. _idx = matched_idx # TODO: Need test cases to cover overmatching non code properly # especially around optional elements. if self.allow_gaps: # First, if we're allowing gaps, consume any non-code. # NOTE: This won't consume from the end of a sequence # because this happens only in the run up to matching # another element. This is as designed. _idx = skip_start_index_forward_to_code(segments, matched_idx, max_idx) # Have we prematurely run out of segments? if _idx >= max_idx: # If the current element is optional, carry on. if elem.is_optional(): continue # Otherwise we have a problem. We've already consumed # any metas, optionals and conditionals. # This is a failed match because we couldn't complete # the sequence. if ( # In a strict mode, running out a segments to match # on means that we don't match anything. self.parse_mode == ParseMode.STRICT # If nothing has been matched _anyway_ then just bail out. or matched_idx == start_idx ): return MatchResult.empty_at(idx) # On any of the other modes (GREEDY or GREEDY_ONCE_STARTED) # we've effectively already claimed the segments, we've # just failed to match. In which case it's unparsable. insert_segments += tuple((matched_idx, meta) for meta in meta_buffer) return MatchResult( matched_slice=slice(start_idx, matched_idx), insert_segments=insert_segments, child_matches=child_matches, ).wrap( UnparsableSegment, segment_kwargs={ "expected": ( f"{elem} after {segments[matched_idx - 1]}. Found nothing." ) }, ) # Match the current element against the current position. with parse_context.deeper_match(name=f"Sequence-@{idx}") as ctx: # HACK: Segment slicing hack to limit elem_match = elem.match(segments[:max_idx], _idx, ctx) # Did we fail to match? (totally or un-cleanly) if not elem_match: # If we can't match an element, we should ascertain whether it's # required. If so then fine, move on, but otherwise we should # crash out without a match. We have not matched the sequence. if elem.is_optional(): # Pass this one and move onto the next element. continue if self.parse_mode == ParseMode.STRICT: # In a strict mode, failing to match an element means that # we don't match anything. return MatchResult.empty_at(idx) if ( self.parse_mode == ParseMode.GREEDY_ONCE_STARTED and matched_idx == start_idx ): # If it's only greedy once started, and we haven't matched # anything yet, then we also don't match anything. return MatchResult.empty_at(idx) # On any of the other modes (GREEDY or GREEDY_ONCE_STARTED) # we've effectively already claimed the segments, we've # just failed to match. In which case it's unparsable. # Handle the simple case where we haven't even started the # sequence yet first: if matched_idx == start_idx: return MatchResult( matched_slice=slice(start_idx, max_idx), matched_class=UnparsableSegment, segment_kwargs={ "expected": ( f"{elem} to start sequence. Found {segments[_idx]}" ) }, ) # Then handle the case of a partial match. _start_idx = skip_start_index_forward_to_code( segments, matched_idx, max_idx ) return MatchResult( # NOTE: We use the already matched segments in the # return value so that if any have already been # matched, the user can see that. Those are not # part of the unparsable section. # NOTE: The unparsable section is _included_ in the span # of the parent match. # TODO: Make tests to assert that child matches sit within # the parent!!! matched_slice=slice(start_idx, max_idx), insert_segments=insert_segments, child_matches=child_matches + ( MatchResult( # The unparsable section is just the remaining # segments we were unable to match from the # sequence. matched_slice=slice(_start_idx, max_idx), matched_class=UnparsableSegment, segment_kwargs={ "expected": ( f"{elem} after {segments[matched_idx - 1]}. " f"Found {segments[_idx]}" ) }, ), ), ) # Flush any metas... insert_segments += _flush_metas(matched_idx, _idx, meta_buffer, segments) meta_buffer = [] # Otherwise we _do_ have a match. Update the position. matched_idx = elem_match.matched_slice.stop parse_context.update_progress(matched_idx) if first_match and self.parse_mode == ParseMode.GREEDY_ONCE_STARTED: # In the GREEDY_ONCE_STARTED mode, we first look ahead to find a # terminator after the first match (and only the first match). max_idx = trim_to_terminator( segments, matched_idx, terminators=[*self.terminators, *parse_context.terminators], parse_context=parse_context, ) first_match = False # How we deal with child segments depends on whether it had a matched # class or not. # If it did, then just add it as a child match and we're done. Move on. if elem_match.matched_class: child_matches += (elem_match,) continue # Otherwise, we un-nest the returned structure, adding any inserts and # children into the inserts and children of this sequence. child_matches += elem_match.child_matches insert_segments += elem_match.insert_segments # If we get to here, we've matched all of the elements (or skipped them). insert_segments += tuple((matched_idx, meta) for meta in meta_buffer) # Finally if we're in one of the greedy modes, and there's anything # left as unclaimed, mark it as unparsable. if self.parse_mode in (ParseMode.GREEDY, ParseMode.GREEDY_ONCE_STARTED): if max_idx > matched_idx: _idx = skip_start_index_forward_to_code(segments, matched_idx, max_idx) _stop_idx = skip_stop_index_backward_to_code(segments, max_idx, _idx) if _stop_idx > _idx: child_matches += ( MatchResult( # The unparsable section is just the remaining # segments we were unable to match from the # sequence. matched_slice=slice(_idx, _stop_idx), matched_class=UnparsableSegment, # TODO: We should come up with a better "expected" string # than this segment_kwargs={"expected": "Nothing here."}, ), ) # Match up to the end. matched_idx = _stop_idx return MatchResult( matched_slice=slice(start_idx, matched_idx), insert_segments=insert_segments, child_matches=child_matches, ) class Bracketed(Sequence): """Match if a bracketed sequence, with content that matches one of the elements. Note that the contents of the Bracketed Expression are treated as an expected sequence. Changelog: - Post 0.3.2: Bracketed inherits from Sequence and anything within the the `Bracketed()` expression is treated as a sequence. For the content of the Brackets, we call the `match()` method of the sequence grammar. - Post 0.1.0: Bracketed was separate from sequence, and the content of the expression were treated as options (like OneOf). - Pre 0.1.0: Bracketed inherited from Sequence and simply added brackets to that sequence. """ def __init__( self, *args: Union[Matchable, str], bracket_type: str = "round", bracket_pairs_set: str = "bracket_pairs", start_bracket: Optional[Matchable] = None, end_bracket: Optional[Matchable] = None, allow_gaps: bool = True, optional: bool = False, parse_mode: ParseMode = ParseMode.STRICT, ) -> None: """Initialize the object. Args: *args (Union[Matchable, str]): Variable length arguments which can be of type 'Matchable' or 'str'. bracket_type (str, optional): The type of bracket used. Defaults to 'round'. bracket_pairs_set (str, optional): The set of bracket pairs. Defaults to 'bracket_pairs'. start_bracket (Optional[Matchable], optional): The start bracket. Defaults to None. end_bracket (Optional[Matchable], optional): The end bracket. Defaults to None. allow_gaps (bool, optional): Whether to allow gaps. Defaults to True. optional (bool, optional): Whether optional. Defaults to False. parse_mode (ParseMode, optional): The parse mode. Defaults to ParseMode.STRICT. """ # Store the bracket type. NB: This is only # hydrated into segments at runtime. self.bracket_type = bracket_type self.bracket_pairs_set = bracket_pairs_set # Allow optional override for special bracket-like things self.start_bracket = start_bracket self.end_bracket = end_bracket super().__init__( *args, allow_gaps=allow_gaps, optional=optional, parse_mode=parse_mode, ) @cached_method_for_parse_context def simple( self, parse_context: ParseContext, crumbs: Optional[Tuple[str]] = None ) -> SimpleHintType: """Check if the matcher supports an uppercase hash matching route. Bracketed does this easily, we just look for the bracket. """ start_bracket, _, _ = self.get_bracket_from_dialect(parse_context) return start_bracket.simple(parse_context=parse_context, crumbs=crumbs) def get_bracket_from_dialect( self, parse_context: ParseContext ) -> Tuple[Matchable, Matchable, bool]: """Rehydrate the bracket segments in question.""" bracket_pairs = parse_context.dialect.bracket_sets(self.bracket_pairs_set) for bracket_type, start_ref, end_ref, persists in bracket_pairs: if bracket_type == self.bracket_type: start_bracket = parse_context.dialect.ref(start_ref) end_bracket = parse_context.dialect.ref(end_ref) break else: # pragma: no cover raise ValueError( "bracket_type {!r} not found in bracket_pairs of {!r} dialect.".format( self.bracket_type, parse_context.dialect.name ) ) return start_bracket, end_bracket, persists def match( self, segments: SequenceType["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match a bracketed sequence of elements. Once we've confirmed the existence of the initial opening bracket, this grammar delegates to `resolve_bracket()` to recursively close any brackets we fund until the initial opening bracket has been closed. After the closing point of the bracket has been established, we then match the content against the elements of this grammar (as options, not as a sequence). How the grammar behaves on different content depends on the `parse_mode`: - If the parse mode is `GREEDY`, this always returns a match if the opening and closing brackets are found. Anything unexpected within the brackets is marked as `unparsable`. - If the parse mode is `STRICT`, then this only returns a match if the content of the brackets matches (and matches *completely*) one of the elements of the grammar. Otherwise no match. """ # Rehydrate the bracket segments in question. # bracket_persists controls whether we make a BracketedSegment or not. start_bracket, end_bracket, bracket_persists = self.get_bracket_from_dialect( parse_context ) # Allow optional override for special bracket-like things start_bracket = self.start_bracket or start_bracket end_bracket = self.end_bracket or end_bracket # Otherwise try and match the segments directly. # Look for the first bracket with parse_context.deeper_match(name="Bracketed-Start") as ctx: start_match = start_bracket.match(segments, idx, ctx) if not start_match: # Can't find the opening bracket. No Match. return MatchResult.empty_at(idx) # NOTE: Ideally we'd match on the _content_ next, providing we were sure # we wouldn't hit the end. But it appears the terminator logic isn't # robust enough for that yet. Until then, we _first_ look for the closing # bracket and _then_ match on the inner content. bracketed_match = resolve_bracket( segments, opening_match=start_match, opening_matcher=start_bracket, start_brackets=[start_bracket], end_brackets=[end_bracket], bracket_persists=[bracket_persists], parse_context=parse_context, ) # If the brackets couldn't be resolved, then it will raise a parsing error # that means we can assert that brackets have been matched if there is no # error. assert bracketed_match # The bracketed_match will also already have been wrapped as a # BracketedSegment including the references to start and end brackets. # We only need to add content. # Work forward through any gaps at the start and end. # NOTE: We assume that all brackets are single segment. _idx = start_match.matched_slice.stop _end_idx = bracketed_match.matched_slice.stop - 1 if self.allow_gaps: _idx = skip_start_index_forward_to_code(segments, _idx) _end_idx = skip_stop_index_backward_to_code(segments, _end_idx, _idx) # Try and match content, clearing and adding the closing bracket # to the terminators. with parse_context.deeper_match( name="Bracketed", clear_terminators=True, push_terminators=[end_bracket] ) as ctx: # NOTE: This slice is a bit of a hack, but it's the only # reliable way so far to make sure we don't "over match" when # presented with a potential terminating bracket. content_match = super().match(segments[:_end_idx], _idx, ctx) # No complete match within the brackets? Stop here and return unmatched. if ( not content_match.matched_slice.stop == _end_idx and self.parse_mode == ParseMode.STRICT ): return MatchResult.empty_at(idx) # What's between the final match and the content. Hopefully just gap? intermediate_slice = slice( # NOTE: Assumes that brackets are always of size 1. content_match.matched_slice.stop, bracketed_match.matched_slice.stop - 1, ) if not self.allow_gaps and not is_zero_slice(intermediate_slice): # NOTE: In this clause, content_match will never have matched. Either # we're in STRICT mode, and would have exited in the `return` above, # or we're in GREEDY mode and the `super().match()` will have already # claimed the whole sequence with nothing left. This clause is # effectively only accessible in a bracketed section which doesn't # allow whitespace but nonetheless has some, which is fairly rare. expected = str(self._elements) # Whatever is in the gap should be marked as an UnparsableSegment. child_match = MatchResult( intermediate_slice, UnparsableSegment, segment_kwargs={"expected": expected}, ) content_match = content_match.append(child_match) # We now have content and bracketed matches. Depending on whether the intent # is to wrap or not we should construct the response. _content_matches: Tuple[MatchResult, ...] if content_match.matched_class: _content_matches = bracketed_match.child_matches + (content_match,) else: _content_matches = ( bracketed_match.child_matches + content_match.child_matches ) # NOTE: Whether a bracket is wrapped or unwrapped (i.e. the effect of # `bracket_persists`, is controlled by `resolve_bracket`) return MatchResult( matched_slice=bracketed_match.matched_slice, matched_class=bracketed_match.matched_class, segment_kwargs=bracketed_match.segment_kwargs, insert_segments=bracketed_match.insert_segments, child_matches=_content_matches, ) sqlfluff-2.3.5/src/sqlfluff/core/parser/helpers.py000066400000000000000000000034771451700765000222340ustar00rootroot00000000000000"""Helpers for the parser module.""" from typing import TYPE_CHECKING, Tuple from sqlfluff.core.errors import SQLParseError if TYPE_CHECKING: from sqlfluff.core.parser.segments import BaseSegment # pragma: no cover def join_segments_raw(segments: Tuple["BaseSegment", ...]) -> str: """Make a string from the joined `raw` attributes of an iterable of segments.""" return "".join(s.raw for s in segments) def check_still_complete( segments_in: Tuple["BaseSegment", ...], matched_segments: Tuple["BaseSegment", ...], unmatched_segments: Tuple["BaseSegment", ...], ) -> bool: """Check that the segments in are the same as the segments out.""" initial_str = join_segments_raw(segments_in) current_str = join_segments_raw(matched_segments + unmatched_segments) if initial_str != current_str: # pragma: no cover segment = unmatched_segments[0] if unmatched_segments else None raise SQLParseError( f"Parse completeness check fail: {current_str!r} != {initial_str!r}", segment=segment, ) return True def trim_non_code_segments( segments: Tuple["BaseSegment", ...] ) -> Tuple[ Tuple["BaseSegment", ...], Tuple["BaseSegment", ...], Tuple["BaseSegment", ...] ]: """Take segments and split off surrounding non-code segments as appropriate. We use slices to avoid creating too many unnecessary tuples. """ pre_idx = 0 seg_len = len(segments) post_idx = seg_len if segments: seg_len = len(segments) # Trim the start while pre_idx < seg_len and not segments[pre_idx].is_code: pre_idx += 1 # Trim the end while post_idx > pre_idx and not segments[post_idx - 1].is_code: post_idx -= 1 return segments[:pre_idx], segments[pre_idx:post_idx], segments[post_idx:] sqlfluff-2.3.5/src/sqlfluff/core/parser/lexer.py000066400000000000000000001063331451700765000217040ustar00rootroot00000000000000"""The code for the Lexer.""" import logging from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple, Type, Union from uuid import UUID, uuid4 import regex from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLLexError from sqlfluff.core.helpers.slice import is_zero_slice, offset_slice, to_tuple from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments import ( BaseSegment, Dedent, EndOfFile, Indent, MetaSegment, RawSegment, TemplateLoop, TemplateSegment, UnlexableSegment, ) from sqlfluff.core.templaters import TemplatedFile from sqlfluff.core.templaters.base import TemplatedFileSlice # Instantiate the lexer logger lexer_logger = logging.getLogger("sqlfluff.lexer") class BlockTracker: """This is an object for keeping track of templating blocks. Using the .enter() and .exit() methods on opening and closing blocks, we can match up tags of the same level so that later it's easier to treat them the same way in the linting engine. In case looping means that we encounter the same block more than once, we use cache uuids against their source location so that if we try to re-enter the block again, it will get the same uuid on the second pass. """ _stack: List[UUID] = [] _map: Dict[Tuple[int, int], UUID] = {} def enter(self, src_slice: slice) -> None: """Add a block to the stack.""" key = to_tuple(src_slice) uuid = self._map.get(key, None) if not uuid: uuid = uuid4() self._map[key] = uuid lexer_logger.debug( " Entering block stack @ %s: %s (fresh)", src_slice, uuid, ) else: lexer_logger.debug( " Entering block stack @ %s: %s (cached)", src_slice, uuid, ) self._stack.append(uuid) def exit(self) -> None: """Pop a block from the stack.""" uuid = self._stack.pop() lexer_logger.debug( " Exiting block stack: %s", uuid, ) def top(self) -> UUID: """Get the uuid on top of the stack.""" return self._stack[-1] class LexedElement(NamedTuple): """An element matched during lexing.""" raw: str matcher: "StringLexer" class TemplateElement(NamedTuple): """A LexedElement, bundled with it's position in the templated file.""" raw: str template_slice: slice matcher: "StringLexer" @classmethod def from_element( cls, element: LexedElement, template_slice: slice ) -> "TemplateElement": """Make a TemplateElement from a LexedElement.""" return cls( raw=element.raw, template_slice=template_slice, matcher=element.matcher ) def to_segment( self, pos_marker: PositionMarker, subslice: Optional[slice] = None ) -> RawSegment: """Create a segment from this lexed element.""" return self.matcher.construct_segment( self.raw[subslice] if subslice else self.raw, pos_marker=pos_marker ) class LexMatch(NamedTuple): """A class to hold matches from the Lexer.""" forward_string: str elements: List[LexedElement] def __bool__(self) -> bool: """A LexMatch is truthy if it contains a non-zero number of matched elements.""" return len(self.elements) > 0 LexerType = Union["RegexLexer", "StringLexer"] class StringLexer: """This singleton matcher matches strings exactly. This is the simplest usable matcher, but it also defines some of the mechanisms for more complicated matchers, which may simply override the `_match` function rather than the public `match` function. This acts as the base class for matchers. """ def __init__( self, name: str, template: str, segment_class: Type[RawSegment], subdivider: Optional[LexerType] = None, trim_post_subdivide: Optional[LexerType] = None, segment_kwargs: Optional[Dict[str, Any]] = None, ) -> None: self.name = name self.template = template self.segment_class = segment_class self.subdivider = subdivider self.trim_post_subdivide = trim_post_subdivide self.segment_kwargs = segment_kwargs or {} self.__post_init__() def __repr__(self) -> str: return f"<{self.__class__.__name__}: {self.name}>" def __post_init__(self) -> None: """Optional post-init method called after __init__(). Designed for subclasses to use. """ pass def _match(self, forward_string: str) -> Optional[LexedElement]: """The private match function. Just look for a literal string.""" if forward_string.startswith(self.template): return LexedElement(self.template, self) else: return None def search(self, forward_string: str) -> Optional[Tuple[int, int]]: """Use string methods to find a substring.""" loc = forward_string.find(self.template) if loc >= 0: return loc, loc + len(self.template) else: return None def _trim_match(self, matched_str: str) -> List[LexedElement]: """Given a string, trim if we are allowed to. Returns: :obj:`tuple` of LexedElement """ elem_buff: List[LexedElement] = [] content_buff = "" str_buff = matched_str if self.trim_post_subdivide: while str_buff: # Iterate through subdividing as appropriate trim_pos = self.trim_post_subdivide.search(str_buff) # No match? Break if not trim_pos: break # Start match? elif trim_pos[0] == 0: elem_buff.append( LexedElement( str_buff[: trim_pos[1]], self.trim_post_subdivide, ) ) str_buff = str_buff[trim_pos[1] :] # End Match? elif trim_pos[1] == len(str_buff): elem_buff += [ LexedElement( content_buff + str_buff[: trim_pos[0]], self, ), LexedElement( str_buff[trim_pos[0] : trim_pos[1]], self.trim_post_subdivide, ), ] content_buff, str_buff = "", "" # Mid Match? (carry on) else: content_buff += str_buff[: trim_pos[1]] str_buff = str_buff[trim_pos[1] :] # Do we have anything left? (or did nothing happen) if content_buff + str_buff: elem_buff.append( LexedElement(content_buff + str_buff, self), ) return elem_buff def _subdivide(self, matched: LexedElement) -> List[LexedElement]: """Given a string, subdivide if we area allowed to. Returns: :obj:`tuple` of segments """ # Can we have to subdivide? if self.subdivider: # Yes subdivision elem_buff: List[LexedElement] = [] str_buff = matched.raw while str_buff: # Iterate through subdividing as appropriate div_pos = self.subdivider.search(str_buff) if div_pos: # Found a division trimmed_elems = self._trim_match(str_buff[: div_pos[0]]) div_elem = LexedElement( str_buff[div_pos[0] : div_pos[1]], self.subdivider ) elem_buff += trimmed_elems + [div_elem] str_buff = str_buff[div_pos[1] :] else: # No more division matches. Trim? trimmed_elems = self._trim_match(str_buff) elem_buff += trimmed_elems break return elem_buff else: return [matched] def match(self, forward_string: str) -> LexMatch: """Given a string, match what we can and return the rest. Returns: :obj:`LexMatch` """ if len(forward_string) == 0: # pragma: no cover raise ValueError("Unexpected empty string!") matched = self._match(forward_string) if matched: # Handle potential subdivision elsewhere. new_elements = self._subdivide(matched) return LexMatch( forward_string[len(matched.raw) :], new_elements, ) else: return LexMatch(forward_string, []) def construct_segment(self, raw: str, pos_marker: PositionMarker) -> RawSegment: """Construct a segment using the given class a properties. Unless an override `type` is provided in the `segment_kwargs`, it is assumed that the `name` of the lexer is designated as the intended `type` of the segment. """ # NOTE: Using a private attribute here feels a bit wrong. _segment_class_types = self.segment_class._class_types _kwargs = self.segment_kwargs assert not ( "type" in _kwargs and "instance_types" in _kwargs ), f"Cannot set both `type` and `instance_types` in segment kwargs: {_kwargs}" if "type" in _kwargs: # TODO: At some point we should probably deprecate this API and only # allow setting `instance_types`. assert _kwargs["type"] _kwargs["instance_types"] = (_kwargs.pop("type"),) elif "instance_types" not in _kwargs and self.name not in _segment_class_types: _kwargs["instance_types"] = (self.name,) return self.segment_class(raw=raw, pos_marker=pos_marker, **_kwargs) class RegexLexer(StringLexer): """This RegexLexer matches based on regular expressions.""" def __post_init__(self) -> None: """Handle setup for RegexLexer.""" # We might want to configure this at some point, but for now, newlines # do get matched by . flags = regex.DOTALL self._compiled_regex = regex.compile(self.template, flags) def _match(self, forward_string: str) -> Optional[LexedElement]: """Use regexes to match chunks.""" match = self._compiled_regex.match(forward_string) if match: # We can only match strings with length match_str = match.group(0) if match_str: return LexedElement(match_str, self) else: # pragma: no cover lexer_logger.warning( f"Zero length Lex item returned from {self.name!r}. Report this as " "a bug." ) return None def search(self, forward_string: str) -> Optional[Tuple[int, int]]: """Use regex to find a substring.""" match = self._compiled_regex.search(forward_string) if match: # We can only match strings with length if match.group(0): return match.span() else: # pragma: no cover lexer_logger.warning( f"Zero length Lex item returned from {self.name!r}. Report this as " "a bug." ) return None def _handle_zero_length_slice( tfs: TemplatedFileSlice, next_tfs: Optional[TemplatedFileSlice], block_stack: BlockTracker, templated_file: TemplatedFile, add_indents: bool, ) -> Iterator[MetaSegment]: """Generate placeholders and loop segments from a zero length slice. This method checks for: 1. Backward jumps (inserting :obj:`TemplateLoop`). 2. Forward jumps (inserting :obj:`TemplateSegment`). 3. Blocks (inserting :obj:`TemplateSegment`). 4. Unrendered template elements(inserting :obj:`TemplateSegment`). For blocks and loops, :obj:`Indent` and :obj:`Dedent` segments are yielded around them as appropriate. NOTE: block_stack is _mutated_ by this method. """ assert is_zero_slice(tfs.templated_slice) # First check for jumps. Backward initially, because in the backward # case we don't render the element we find first. # That requires being able to look past to the next element. if tfs.slice_type.startswith("block") and next_tfs: # Look for potential backward jump if next_tfs.source_slice.start < tfs.source_slice.start: lexer_logger.debug(" Backward jump detected. Inserting Loop Marker") # If we're here remember we're on the tfs which is the block end # i.e. not the thing we want to render. pos_marker = PositionMarker.from_point( tfs.source_slice.start, tfs.templated_slice.start, templated_file, ) if add_indents: yield Dedent( is_template=True, pos_marker=pos_marker, ) yield TemplateLoop(pos_marker=pos_marker, block_uuid=block_stack.top()) if add_indents: yield Indent( is_template=True, pos_marker=pos_marker, ) # Move on to the next templated slice. Don't render this directly. return # Then handle blocks (which aren't jumps backward) if tfs.slice_type.startswith("block"): # It's a block. Yield a placeholder with potential indents. # Update block stack or add indents if tfs.slice_type == "block_start": block_stack.enter(tfs.source_slice) elif add_indents and tfs.slice_type in ("block_end", "block_mid"): yield Dedent( is_template=True, pos_marker=PositionMarker.from_point( tfs.source_slice.start, tfs.templated_slice.start, templated_file, ), # NOTE: We mark the dedent with the block uuid too. block_uuid=block_stack.top(), ) yield TemplateSegment.from_slice( tfs.source_slice, tfs.templated_slice, block_type=tfs.slice_type, templated_file=templated_file, block_uuid=block_stack.top(), ) # Update block stack or add indents if tfs.slice_type == "block_end": block_stack.exit() elif add_indents and tfs.slice_type in ("block_start", "block_mid"): yield Indent( is_template=True, pos_marker=PositionMarker.from_point( tfs.source_slice.stop, tfs.templated_slice.stop, templated_file, ), # NOTE: We mark the indent with the block uuid too. block_uuid=block_stack.top(), ) # Before we move on, we might have a _forward_ jump to the next # element. That element can handle itself, but we'll add a # placeholder for it here before we move on. if next_tfs: # Identify whether we have a skip. skipped_chars = next_tfs.source_slice.start - tfs.source_slice.stop placeholder_str = "" if skipped_chars >= 10: placeholder_str = ( f"... [{skipped_chars} unused template " "characters] ..." ) elif skipped_chars: placeholder_str = "..." # Handle it if we do. if placeholder_str: lexer_logger.debug(" Forward jump detected. Inserting placeholder") yield TemplateSegment( pos_marker=PositionMarker( slice(tfs.source_slice.stop, next_tfs.source_slice.start), # Zero slice in the template. tfs.templated_slice, templated_file, ), source_str=placeholder_str, block_type="skipped_source", ) # Move on return # Always return the slice, even if the source slice was also zero length. Some # templaters might want to pass through totally zero length slices as a way of # marking locations in the middle of templated output. yield TemplateSegment.from_slice( tfs.source_slice, tfs.templated_slice, tfs.slice_type, templated_file, ) def _iter_segments( lexed_elements: List[TemplateElement], templated_file: TemplatedFile, add_indents: bool = True, ) -> Iterator[RawSegment]: # An index to track where we've got to in the templated file. tfs_idx = 0 # We keep a map of previous block locations in case they re-occur. block_stack = BlockTracker() templated_file_slices = templated_file.sliced_file # Now work out source slices, and add in template placeholders. for idx, element in enumerate(lexed_elements): # We're working through elements in the rendered file. # When they enter this code they don't have a position in the source. # We already have a map of how templated elements map to the source file # so we work through them to work out what's going on. In theory we can # step through the two lists in lock step. # i.e. we worked through the lexed elements, but check off the templated # file slices as we go. # Output the slice as we lex. lexer_logger.debug(" %s: %s. [tfs_idx = %s]", idx, element, tfs_idx) # All lexed elements, by definition, have a position in the templated # file. That means we've potentially got zero-length elements we also # need to consider. We certainly need to consider templated slices # at tfs_idx. But we should consider some others after that which we # might also need to consider. # A lexed element is either a literal in the raw file or the result # (or part of the result) of a template placeholder. We don't make # placeholders for any variables which return a non-zero length of # code. We do add placeholders for others. # The amount of the current element which has already been consumed. consumed_element_length = 0 # The position in the source which we still need to yield from. stashed_source_idx = None for tfs_idx, tfs in enumerate(templated_file_slices[tfs_idx:], tfs_idx): lexer_logger.debug(" %s: %s", tfs_idx, tfs) # Is it a zero slice? if is_zero_slice(tfs.templated_slice): next_tfs = ( templated_file_slices[tfs_idx + 1] if tfs_idx + 1 < len(templated_file_slices) else None ) yield from _handle_zero_length_slice( tfs, next_tfs, block_stack, templated_file, add_indents ) continue if tfs.slice_type == "literal": # There's a literal to deal with here. Yield as much as we can. # Can we cover this whole lexed element with the current templated # slice without moving on? tfs_offset = tfs.source_slice.start - tfs.templated_slice.start # NOTE: Greater than OR EQUAL, to include the case of it matching # length exactly. if element.template_slice.stop <= tfs.templated_slice.stop: lexer_logger.debug( " Consuming whole from literal. Existing Consumed: %s", consumed_element_length, ) # If we have a stashed start use that. Otherwise infer start. if stashed_source_idx is not None: slice_start = stashed_source_idx else: slice_start = ( element.template_slice.start + consumed_element_length + tfs_offset ) yield element.to_segment( pos_marker=PositionMarker( slice( slice_start, element.template_slice.stop + tfs_offset, ), element.template_slice, templated_file, ), subslice=slice(consumed_element_length, None), ) # If it was an exact match, consume the templated element too. if element.template_slice.stop == tfs.templated_slice.stop: tfs_idx += 1 # In any case, we're done with this element. Move on break elif element.template_slice.start == tfs.templated_slice.stop: # Did we forget to move on from the last tfs and there's # overlap? # NOTE: If the rest of the logic works, this should never # happen. lexer_logger.debug(" NOTE: Missed Skip") # pragma: no cover continue # pragma: no cover else: # This means that the current lexed element spans across # multiple templated file slices. lexer_logger.debug(" Consuming whole spanning literal") # This almost certainly means there's a templated element # in the middle of a whole lexed element. # What we do here depends on whether we're allowed to split # lexed elements. This is basically only true if it's whitespace. # NOTE: We should probably make this configurable on the # matcher object, but for now we're going to look for the # name of the lexer. if element.matcher.name == "whitespace": # We *can* split it! # Consume what we can from this slice and move on. lexer_logger.debug( " Consuming split whitespace from literal. " "Existing Consumed: %s", consumed_element_length, ) if stashed_source_idx is not None: raise NotImplementedError( # pragma: no cover "Found literal whitespace with stashed idx!" ) incremental_length = ( tfs.templated_slice.stop - element.template_slice.start ) yield element.to_segment( pos_marker=PositionMarker( slice( element.template_slice.start + consumed_element_length + tfs_offset, tfs.templated_slice.stop + tfs_offset, ), element.template_slice, templated_file, ), # Subdivide the existing segment. subslice=offset_slice( consumed_element_length, incremental_length, ), ) consumed_element_length += incremental_length continue else: # We can't split it. We're going to end up yielding a segment # which spans multiple slices. Stash the type, and if we haven't # set the start yet, stash it too. lexer_logger.debug(" Spilling over literal slice.") if stashed_source_idx is None: stashed_source_idx = ( element.template_slice.start + tfs_offset ) lexer_logger.debug( " Stashing a source start. %s", stashed_source_idx ) continue elif tfs.slice_type in ("templated", "block_start"): # Found a templated slice. Does it have length in the templated file? # If it doesn't, then we'll pick it up next. if not is_zero_slice(tfs.templated_slice): # If it's a block_start. Append to the block stack. # NOTE: This is rare, but call blocks do occasionally # have length (and so don't get picked up by # _handle_zero_length_slice) if tfs.slice_type == "block_start": block_stack.enter(tfs.source_slice) # Is our current element totally contained in this slice? if element.template_slice.stop <= tfs.templated_slice.stop: lexer_logger.debug(" Contained templated slice.") # Yes it is. Add lexed element with source slices as the whole # span of the source slice for the file slice. # If we've got an existing stashed source start, use that # as the start of the source slice. if stashed_source_idx is not None: slice_start = stashed_source_idx else: slice_start = ( tfs.source_slice.start + consumed_element_length ) yield element.to_segment( pos_marker=PositionMarker( slice( slice_start, # The end in the source is the end of the templated # slice. We can't subdivide any better. tfs.source_slice.stop, ), element.template_slice, templated_file, ), subslice=slice(consumed_element_length, None), ) # If it was an exact match, consume the templated element too. if element.template_slice.stop == tfs.templated_slice.stop: tfs_idx += 1 # Carry on to the next lexed element break # We've got an element which extends beyond this templated slice. # This means that a _single_ lexed element claims both some # templated elements and some non-templated elements. That could # include all kinds of things (and from here we don't know what # else is yet to come, comments, blocks, literals etc...). # In the `literal` version of this code we would consider # splitting the literal element here, but in the templated # side we don't. That's because the way that templated tokens # are lexed, means that they should arrive "pre-split". else: # Stash the source idx for later when we do make a segment. lexer_logger.debug(" Spilling over templated slice.") if stashed_source_idx is None: stashed_source_idx = tfs.source_slice.start lexer_logger.debug( " Stashing a source start as lexed element spans " "over the end of a template slice. %s", stashed_source_idx, ) # Move on to the next template slice continue raise NotImplementedError( f"Unable to process slice: {tfs}" ) # pragma: no cover # If templated elements are left, yield them. # We can assume they're all zero length if we're here. for tfs_idx, tfs in enumerate(templated_file_slices[tfs_idx:], tfs_idx): next_tfs = ( templated_file_slices[tfs_idx + 1] if tfs_idx + 1 < len(templated_file_slices) else None ) yield from _handle_zero_length_slice( tfs, next_tfs, block_stack, templated_file, add_indents ) class Lexer: """The Lexer class actually does the lexing step.""" def __init__( self, config: Optional[FluffConfig] = None, last_resort_lexer: Optional[StringLexer] = None, dialect: Optional[str] = None, ): # Allow optional config and dialect self.config = FluffConfig.from_kwargs(config=config, dialect=dialect) # Store the matchers self.lexer_matchers = self.config.get("dialect_obj").get_lexer_matchers() self.last_resort_lexer = last_resort_lexer or RegexLexer( "", r"[^\t\n\ ]*", UnlexableSegment, ) def lex( self, raw: Union[str, TemplatedFile] ) -> Tuple[Tuple[BaseSegment, ...], List[SQLLexError]]: """Take a string or TemplatedFile and return segments. If we fail to match the *whole* string, then we must have found something that we cannot lex. If that happens we should package it up as unlexable and keep track of the exceptions. """ # Make sure we've got a string buffer and a template # regardless of what was passed in. if isinstance(raw, str): template = TemplatedFile.from_string(raw) str_buff = raw else: template = raw str_buff = str(template) # Lex the string to get a tuple of LexedElement element_buffer: List[LexedElement] = [] while True: res = self.lex_match(str_buff, self.lexer_matchers) element_buffer += res.elements if res.forward_string: resort_res = self.last_resort_lexer.match(res.forward_string) if not resort_res: # pragma: no cover # If we STILL can't match, then just panic out. raise SQLLexError( "Fatal. Unable to lex characters: {0!r}".format( res.forward_string[:10] + "..." if len(res.forward_string) > 9 else res.forward_string ) ) str_buff = resort_res.forward_string element_buffer += resort_res.elements else: # pragma: no cover TODO? break # Map tuple LexedElement to list of TemplateElement. # This adds the template_slice to the object. templated_buffer = self.map_template_slices(element_buffer, template) # Turn lexed elements into segments. segments: Tuple[RawSegment, ...] = self.elements_to_segments( templated_buffer, template ) # Generate any violations violations: List[SQLLexError] = self.violations_from_segments(segments) return segments, violations def elements_to_segments( self, elements: List[TemplateElement], templated_file: TemplatedFile ) -> Tuple[RawSegment, ...]: """Convert a tuple of lexed elements into a tuple of segments.""" lexer_logger.info("Elements to Segments.") add_indents = self.config.get("template_blocks_indent", "indentation") # Delegate to _iter_segments segment_buffer: List[RawSegment] = list( _iter_segments(elements, templated_file, add_indents) ) # Add an end of file marker segment_buffer.append( EndOfFile( pos_marker=segment_buffer[-1].pos_marker.end_point_marker() if segment_buffer else PositionMarker.from_point(0, 0, templated_file) ) ) # Convert to tuple before return return tuple(segment_buffer) @staticmethod def violations_from_segments(segments: Tuple[RawSegment, ...]) -> List[SQLLexError]: """Generate any lexing errors for any unlexables.""" violations = [] for segment in segments: if segment.is_type("unlexable"): violations.append( SQLLexError( "Unable to lex characters: {!r}".format( segment.raw[:10] + "..." if len(segment.raw) > 9 else segment.raw ), pos=segment.pos_marker, ) ) return violations @staticmethod def lex_match(forward_string: str, lexer_matchers: List[StringLexer]) -> LexMatch: """Iteratively match strings using the selection of submatchers.""" elem_buff: List[LexedElement] = [] while True: if len(forward_string) == 0: return LexMatch(forward_string, elem_buff) for matcher in lexer_matchers: res = matcher.match(forward_string) if res.elements: # If we have new segments then whoop! elem_buff += res.elements forward_string = res.forward_string # Cycle back around again and start with the top # matcher again. break else: # We've got so far, but now can't match. Return return LexMatch(forward_string, elem_buff) @staticmethod def map_template_slices( elements: List[LexedElement], template: TemplatedFile ) -> List[TemplateElement]: """Create a tuple of TemplateElement from a tuple of LexedElement. This adds slices in the templated file to the original lexed elements. We'll need this to work out the position in the source file. """ idx = 0 templated_buff: List[TemplateElement] = [] for element in elements: template_slice = offset_slice(idx, len(element.raw)) idx += len(element.raw) templated_buff.append(TemplateElement.from_element(element, template_slice)) if ( template.templated_str[template_slice] != element.raw ): # pragma: no cover raise ValueError( "Template and lexed elements do not match. This should never " f"happen {element.raw!r} != " f"{template.templated_str[template_slice]!r}" ) return templated_buff sqlfluff-2.3.5/src/sqlfluff/core/parser/markers.py000066400000000000000000000224751451700765000222350ustar00rootroot00000000000000"""Implements the PositionMarker class. This class is a construct to keep track of positions within a file. """ from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Optional, Tuple from sqlfluff.core.helpers.slice import zero_slice if TYPE_CHECKING: from sqlfluff.core.templaters import TemplatedFile # pragma: no cover @dataclass(frozen=True) class PositionMarker: """A reference to a position in a file. Things to note: - This combines the previous functionality of FilePositionMarker and EnrichedFilePositionMarker. Additionally it contains a reference to the original templated file. - It no longer explicitly stores a line number or line position in the source or template. This is extrapolated from the templated file as required. - Positions in the source and template are with slices and therefore identify ranges. - Positions within the fixed file are identified with a line number and line position, which identify a point. - Arithmetic comparisons are on the location in the fixed file. """ source_slice: slice templated_slice: slice templated_file: "TemplatedFile" # If not set, these will be initialised in the post init. working_line_no: int = -1 working_line_pos: int = -1 def __post_init__(self) -> None: # If the working position has not been explicitly set # then infer it from the position in the templated file. # This is accurate up until the point that any fixes have # been applied. if self.working_line_no == -1 or self.working_line_pos == -1: line_no, line_pos = self.templated_position() # Use the base method because we're working with a frozen class object.__setattr__(self, "working_line_no", line_no) object.__setattr__(self, "working_line_pos", line_pos) def __str__(self) -> str: return self.to_source_string() def __gt__(self, other: "PositionMarker") -> bool: return self.working_loc > other.working_loc def __lt__(self, other: "PositionMarker") -> bool: return self.working_loc < other.working_loc def __ge__(self, other: "PositionMarker") -> bool: return self.working_loc >= other.working_loc def __le__(self, other: "PositionMarker") -> bool: return self.working_loc <= other.working_loc def __eq__(self, other: Any) -> bool: if not isinstance(other, PositionMarker): return False # pragma: no cover return self.working_loc == other.working_loc @property def working_loc(self) -> Tuple[int, int]: """Location tuple for the working position.""" return self.working_line_no, self.working_line_pos def working_loc_after(self, raw: str) -> Tuple[int, int]: """Location tuple for the working position.""" return self.infer_next_position( raw, self.working_line_no, self.working_line_pos, ) @classmethod def from_point( cls, source_point: int, templated_point: int, templated_file: "TemplatedFile", **kwargs: int, # kwargs can only contain working_line positions ) -> "PositionMarker": """Convenience method for creating point markers.""" return cls( zero_slice(source_point), zero_slice(templated_point), templated_file, **kwargs, ) @classmethod def from_points( cls, start_point_marker: "PositionMarker", end_point_marker: "PositionMarker", ) -> "PositionMarker": """Construct a position marker from the section between two points.""" return cls( slice( start_point_marker.source_slice.start, end_point_marker.source_slice.stop, ), slice( start_point_marker.templated_slice.start, end_point_marker.templated_slice.stop, ), # The templated file references from the point markers # should be the same, so we're just going to pick one. # TODO: If we assert that in this function, it's actually not # true - but preliminary debugging on this did not reveal why. start_point_marker.templated_file, # Line position should be of the _start_ of the section. start_point_marker.working_line_no, start_point_marker.working_line_pos, ) @classmethod def from_child_markers( cls, *markers: Optional["PositionMarker"] ) -> "PositionMarker": """Create a parent marker from it's children.""" source_slice = slice( min(m.source_slice.start for m in markers if m), max(m.source_slice.stop for m in markers if m), ) templated_slice = slice( min(m.templated_slice.start for m in markers if m), max(m.templated_slice.stop for m in markers if m), ) templated_files = {m.templated_file for m in markers if m} if len(templated_files) != 1: # pragma: no cover raise ValueError("Attempted to make a parent marker from multiple files.") templated_file = templated_files.pop() return cls(source_slice, templated_slice, templated_file) def source_position(self) -> Tuple[int, int]: """Return the line and position of this marker in the source.""" return self.templated_file.get_line_pos_of_char_pos( self.source_slice.start, source=True ) def templated_position(self) -> Tuple[int, int]: """Return the line and position of this marker in the source.""" return self.templated_file.get_line_pos_of_char_pos( self.templated_slice.start, source=False ) @property def line_no(self) -> int: """Return the line number in the source.""" return self.source_position()[0] @property def line_pos(self) -> int: """Return the line position in the source.""" return self.source_position()[1] def to_source_string(self) -> str: """Make a formatted string of this position.""" line, pos = self.source_position() return f"[L:{line:3d}, P:{pos:3d}]" def start_point_marker(self) -> "PositionMarker": """Get a point marker from the start.""" return self.__class__.from_point( self.source_slice.start, self.templated_slice.start, templated_file=self.templated_file, # Start points also pass on the working position. working_line_no=self.working_line_no, working_line_pos=self.working_line_pos, ) def end_point_marker(self) -> "PositionMarker": """Get a point marker from the end.""" return self.__class__.from_point( self.source_slice.stop, self.templated_slice.stop, templated_file=self.templated_file, ) @staticmethod def slice_is_point(test_slice: slice) -> bool: """Is this slice a point.""" is_point: bool = test_slice.start == test_slice.stop return is_point def is_point(self) -> bool: """A marker is a point if it has zero length in templated and source file.""" return self.slice_is_point(self.source_slice) and self.slice_is_point( self.templated_slice ) @staticmethod def infer_next_position(raw: str, line_no: int, line_pos: int) -> Tuple[int, int]: """Using the raw string provided to infer the position of the next. NB: Line position in 1-indexed. """ # No content? if not raw: return line_no, line_pos split = raw.split("\n") return ( line_no + len(split) - 1, line_pos + len(raw) if len(split) == 1 else len(split[-1]) + 1, ) def with_working_position(self, line_no: int, line_pos: int) -> "PositionMarker": """Copy this position and replace the working position.""" return self.__class__( source_slice=self.source_slice, templated_slice=self.templated_slice, templated_file=self.templated_file, working_line_no=line_no, working_line_pos=line_pos, ) def is_literal(self) -> bool: """Infer literalness from context. is_literal should return True if a fix can be applied across this area in the templated file while being confident that the fix is still appropriate in the source file. This obviously applies to any slices which are the same in the source and the templated files. Slices which are zero-length in the source are also "literal" because they can't be "broken" by any fixes, because they don't exist in the source. This includes meta segments and any segments added during the fixing process. This value is used for: - Ignoring linting errors in templated sections. - Whether `iter_patches` can return without recursing. - Whether certain rules (such as JJ01) are triggered. """ return self.templated_file.is_source_slice_literal(self.source_slice) def source_str(self) -> str: """Returns the string in the source at this position.""" return self.templated_file.source_str[self.source_slice] sqlfluff-2.3.5/src/sqlfluff/core/parser/match_algorithms.py000066400000000000000000000672541451700765000241220ustar00rootroot00000000000000"""Matching algorithms. These are mostly extracted from the body of either BaseSegment or BaseGrammar to un-bloat those classes. """ from collections import defaultdict from typing import DefaultDict, FrozenSet, List, Optional, Sequence, Tuple, cast from sqlfluff.core.errors import SQLParseError from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment, BracketedSegment, Dedent, Indent def skip_start_index_forward_to_code( segments: Sequence[BaseSegment], start_idx: int, max_idx: Optional[int] = None ) -> int: """Move an index forward through segments until segments[index] is code.""" if max_idx is None: max_idx = len(segments) for _idx in range(start_idx, max_idx): if segments[_idx].is_code: break else: _idx = max_idx return _idx def skip_stop_index_backward_to_code( segments: Sequence[BaseSegment], stop_idx: int, min_idx: int = 0 ) -> int: """Move an index backward through segments until segments[index - 1] is code.""" for _idx in range(stop_idx, min_idx, -1): if segments[_idx - 1].is_code: break else: _idx = min_idx return _idx def first_trimmed_raw(seg: BaseSegment) -> str: """Trim whitespace off a whole element raw. Used as a helper function in BaseGrammar._look_ahead_match. For existing compound segments, we should assume that within that segment, things are internally consistent, that means rather than enumerating all the individual segments of a longer one we just dump out the whole segment, but splitting off the first element separated by whitespace. This is a) faster and also b) prevents some really horrible bugs with bracket matching. See https://github.com/sqlfluff/sqlfluff/issues/433 This fetches the _whole_ raw of a potentially compound segment to match against, trimming off any whitespace. This is the most efficient way to get at the first element of a potentially longer segment. """ s = seg.raw_upper.split(maxsplit=1) return s[0] if s else "" def first_non_whitespace( segments: Sequence[BaseSegment], start_idx: int = 0, ) -> Optional[Tuple[str, FrozenSet[str]]]: """Return the upper first non-whitespace segment in the iterable.""" for i in range(start_idx, len(segments)): _segment = segments[i] if _segment.first_non_whitespace_segment_raw_upper: return ( _segment.first_non_whitespace_segment_raw_upper, _segment.class_types, ) return None def prune_options( options: Sequence[Matchable], segments: Sequence[BaseSegment], parse_context: ParseContext, start_idx: int = 0, ) -> List[Matchable]: """Use the simple matchers to prune which options to match on. Works in the context of a grammar making choices between options such as AnyOf or the content of Delimited. """ available_options = [] prune_buff = [] # Find the first code element to match against. first = first_non_whitespace(segments, start_idx=start_idx) # If we don't have an appropriate option to match against, # then we should just return immediately. Nothing will match. if not first: return list(options) first_raw, first_types = first for opt in options: simple = opt.simple(parse_context=parse_context) if simple is None: # This element is not simple, we have to do a # full match with it... available_options.append(opt) continue # Otherwise we have a simple option, so let's use # it for pruning. simple_raws, simple_types = simple matched = False # We want to know if the first meaningful element of the str_buff # matches the option, based on either simple _raw_ matching or # simple _type_ matching. # Match Raws if simple_raws and first_raw in simple_raws: # If we get here, it's matched the FIRST element of the string buffer. available_options.append(opt) matched = True # Match Types if simple_types and not matched and first_types.intersection(simple_types): # If we get here, it's matched the FIRST element of the string buffer. available_options.append(opt) matched = True if not matched: # Ditch this option, the simple match has failed prune_buff.append(opt) continue return available_options def longest_match( segments: Sequence[BaseSegment], matchers: Sequence[Matchable], idx: int, parse_context: ParseContext, ) -> Tuple[MatchResult, Optional[Matchable]]: """Return longest match from a selection of matchers. Priority is: 1. The first total match, which means we've matched all available segments or that we've hit a valid terminator. 2. The longest clean match. 3. The longest unclean match. 4. An empty match. If for #2 and #3, there's a tie for the longest match, priority is given to the first in the iterable. Returns: `tuple` of (match_object, matcher). NOTE: This matching method is the workhorse of the parser. It drives the functionality of the AnyOf & AnyNumberOf grammars, and therefore by extension the degree of branching within the parser. It's performance can be monitored using the `parse_stats` object on the context. The things which determine the performance of this method are: 1. Pruning. This method uses `prune_options()` to filter down which matchable options proceed to the full matching step. Ideally only very few do and this can handle the majority of the filtering. 2. Caching. This method uses the parse cache (`check_parse_cache` and `put_parse_cache`) on the ParseContext to speed up repetitive matching operations. As we make progress through a file there will often not be a cached value already available, and so this cache has the greatest impact within poorly optimised (or highly nested) expressions. 3. Terminators. By default, _all_ the options are evaluated, and then the longest (the `best`) is returned. The exception to this is when the match is `complete` (i.e. it matches _all_ the remaining segments), or when a match is followed by a valid terminator (i.e. a segment which indicates that the match is _effectively_ complete). In these latter scenarios, the _first_ complete or terminated match is returned. In the ideal case, the only matcher which is evaluated should be the "correct" one, and then no others should be attempted. """ max_idx = len(segments) # What is the limit # No matchers or no segments? No match. if not matchers or idx == max_idx: return MatchResult.empty_at(idx), None # Prune available options, based on their simple representation for efficiency. # TODO: Given we don't allow trimming here we should be able to remove # some complexity from this function so that we just take the first segment. # Maybe that's just small potatoes though. available_options = prune_options( matchers, segments, parse_context=parse_context, start_idx=idx ) # If no available options, return no match. if not available_options: return MatchResult.empty_at(idx), None terminators = parse_context.terminators or () terminated = False # At parse time we should be able to count on there being a position marker. _cache_position = segments[idx].pos_marker assert _cache_position # Characterise this location. # Initial segment raw, loc, type and length of segment series. loc_key = ( segments[idx].raw, _cache_position.working_loc, segments[idx].get_type(), # The reason that the max_idx is part of the cache key is to # account for scenarios where the end of the segment sequence # has been trimmed and we don't want to assume we can match # things which have now been trimmed off. max_idx, ) best_match = MatchResult.empty_at(idx) best_matcher: Optional[Matchable] = None # iterate at this position across all the matchers for matcher_idx, matcher in enumerate(available_options): # Check parse cache. matcher_key = matcher.cache_key() res_match: Optional[MatchResult] = parse_context.check_parse_cache( loc_key, matcher_key ) # If cache miss, match fresh and repopulate. # NOTE: By comparing with None, "failed" matches can still be used # from cache. They a falsy, but not None. if res_match is None: # Match fresh if no cache hit res_match = matcher.match(segments, idx, parse_context) # Cache it for later to for performance. parse_context.put_parse_cache(loc_key, matcher_key, res_match) # Have we matched all available segments? if res_match and res_match.matched_slice.stop == max_idx: return res_match, matcher # Is this the best match so far? if res_match.is_better_than(best_match): best_match = res_match best_matcher = matcher # If we've got a terminator next, it's an opportunity to # end earlier, and claim an effectively "complete" match. # NOTE: This means that by specifying terminators, we can # significantly increase performance. if matcher_idx == len(available_options) - 1: # If it's the last option - no need to check terminators. # We're going to end anyway, so we can skip that step. terminated = True break elif terminators: _next_code_idx = skip_start_index_forward_to_code( segments, best_match.matched_slice.stop ) if _next_code_idx == len(segments): # We're run out of segments, we're effectively terminated. terminated = True break for terminator in terminators: terminator_match: MatchResult = terminator.match( segments, _next_code_idx, parse_context ) if terminator_match: terminated = True break if terminated: break # Return the best we found. return best_match, best_matcher def next_match( segments: Sequence[BaseSegment], idx: int, matchers: Sequence[Matchable], parse_context: ParseContext, ) -> Tuple[MatchResult, Optional[Matchable]]: """Look ahead for matches beyond the first element of the segments list. NOTE: Returns *only clean* matches. This function also contains the performance improved hash-matching approach to searching for matches, which should significantly improve performance. Prioritise the first match, and if multiple match at the same point the longest. If two matches of the same length match at the same time, then it's the first in the iterable of matchers. Returns: `tuple` of (match_object, matcher). """ max_idx = len(segments) # Have we got any segments to match on? if idx >= max_idx: # No? Return empty. return MatchResult.empty_at(idx), None # This next section populates a lookup of the simple matchers. # TODO: This should really be populated on instantiation of the # host grammar. # NOTE: We keep the index of the matcher so we can prioritise # later. Mathchers themselves are obtained through direct lookup. raw_simple_map: DefaultDict[str, List[int]] = defaultdict(list) type_simple_map: DefaultDict[str, List[int]] = defaultdict(list) for _idx, matcher in enumerate(matchers): simple = matcher.simple(parse_context=parse_context) if not simple: # pragma: no cover # NOTE: For all bundled dialects, this clause is true, but until # the RegexMatcher is completely deprecated (and therefore that # `.simple()` must provide a result), it is still _possible_ # to end up here. raise NotImplementedError( "All matchers passed to `._next_match()` are " "assumed to have a functioning `.simple()` option. " "In a future release it will be compulsory for _all_ " "matchables to implement `.simple()`. Please report " "this as a bug on GitHub along with your current query " f"and dialect.\nProblematic matcher: {matcher}" ) for simple_raw in simple[0]: raw_simple_map[simple_raw].append(_idx) for simple_type in simple[1]: type_simple_map[simple_type].append(_idx) # TODO: There's an optimisation we could do here where we don't iterate # through them one by one, but we use a lookup which we pre-calculate # at the start of the whole matching process. for _idx in range(idx, max_idx): seg = segments[_idx] _matcher_idxs = [] # Raw matches first. _matcher_idxs.extend(raw_simple_map[first_trimmed_raw(seg)]) # Type matches second. _type_overlap = seg.class_types.intersection(type_simple_map.keys()) for _type in _type_overlap: _matcher_idxs.extend(type_simple_map[_type]) # If no matchers to work with, continue if not _matcher_idxs: continue # If we do have them, sort them and then do the full match. _matcher_idxs.sort() for _matcher_idx in _matcher_idxs: _matcher = matchers[_matcher_idx] _match = _matcher.match(segments, _idx, parse_context) # NOTE: We're only going to consider clean matches from this method. if _match: # This will do. Return. return _match, _matcher # If we finish the loop, we didn't find a match. Return empty. return MatchResult.empty_at(idx), None def resolve_bracket( segments: Sequence[BaseSegment], opening_match: MatchResult, opening_matcher: Matchable, start_brackets: List[Matchable], end_brackets: List[Matchable], bracket_persists: List[bool], parse_context: ParseContext, nested_match: bool = False, ) -> MatchResult: """Recursive match to resolve an opened bracket. If `nested_match` is True, then inner bracket matches are also returned as child matches. Otherwise only the outer match is returned. Returns when the opening bracket is resolved. """ assert opening_match assert opening_matcher in start_brackets type_idx = start_brackets.index(opening_matcher) matched_idx = opening_match.matched_slice.stop child_matches: Tuple[MatchResult, ...] = (opening_match,) while True: # Look for the next relevant bracket. match, matcher = next_match( segments, matched_idx, matchers=start_brackets + end_brackets, parse_context=parse_context, ) # Was it a failed match? if not match: # If it was failed, then this is a problem, we started an # opening bracket but never found the end. raise SQLParseError( "Couldn't find closing bracket for opening bracket.", segment=segments[opening_match.matched_slice.start], ) # Did we find a closing bracket? if matcher in end_brackets: closing_idx = end_brackets.index(matcher) if closing_idx == type_idx: _persists = bracket_persists[type_idx] # We're closing the opening type. # Add the closing bracket match to the result as a child. child_matches += (match,) _match = MatchResult( # Slice should span from the first to the second. slice(opening_match.matched_slice.start, match.matched_slice.stop), child_matches=child_matches, insert_segments=( (opening_match.matched_slice.stop, Indent), (match.matched_slice.start, Dedent), ), ) # NOTE: This is how we exit the loop. if not _persists: return _match return _match.wrap( BracketedSegment, segment_kwargs={ # TODO: This feels a bit weird. # Could we infer it on construction? "start_bracket": (segments[opening_match.matched_slice.start],), "end_bracket": (segments[match.matched_slice.start],), }, ) # Otherwise we're closing an unexpected type. This is less good. raise SQLParseError( f"Found unexpected end bracket!, " f"was expecting {end_brackets[type_idx]}, " f"but got {matcher}", segment=segments[match.matched_slice.stop - 1], ) # Otherwise we found a new opening bracket. assert matcher in start_brackets # Recurse into a new bracket matcher. inner_match = resolve_bracket( segments, opening_match=match, opening_matcher=matcher, start_brackets=start_brackets, end_brackets=end_brackets, bracket_persists=bracket_persists, parse_context=parse_context, ) # This will either error, or only return once we're back out of the # bracket which started it. The return value will be a match result for # the inner BracketedSegment. We ignore the inner and don't return it # as we only want to mutate the outer brackets. matched_idx = inner_match.matched_slice.stop if nested_match: child_matches += (inner_match,) # Head back around the loop again to see if we can find the end... def next_ex_bracket_match( segments: Sequence[BaseSegment], idx: int, matchers: Sequence[Matchable], parse_context: ParseContext, bracket_pairs_set: str = "bracket_pairs", ) -> Tuple[MatchResult, Optional[Matchable], Tuple[MatchResult, ...]]: """Same as `next_match` but with bracket counting. NB: Given we depend on `next_match` we can also utilise the same performance optimisations which are implemented there. bracket_pairs_set: Allows specific segments to override the available bracket pairs. See the definition of "angle_bracket_pairs" in the BigQuery dialect for additional context on why this exists. Returns: `tuple` of (match_object, matcher, `tuple` of inner bracketed matches). """ max_idx = len(segments) # Have we got any segments to match on? if idx >= max_idx: # No? Return empty. return MatchResult.empty_at(idx), None, () # Get hold of the bracket matchers from the dialect, and append them # to the list of matchers. We get them from the relevant set on the # dialect. _, start_bracket_refs, end_bracket_refs, bracket_persists = zip( *parse_context.dialect.bracket_sets(bracket_pairs_set) ) # These are matchables, probably StringParsers. start_brackets = [ parse_context.dialect.ref(seg_ref) for seg_ref in start_bracket_refs ] end_brackets = [parse_context.dialect.ref(seg_ref) for seg_ref in end_bracket_refs] bracket_matchers = start_brackets + end_brackets _matchers = list(matchers) + bracket_matchers # Make some buffers matched_idx = idx child_matches: Tuple[MatchResult, ...] = () while True: match, matcher = next_match( segments, matched_idx, _matchers, parse_context=parse_context, ) # Did we match? If so, is it a target or a bracket? if not match or matcher in matchers: # If there's either no match, or we hit a target, just pass the result. # NOTE: This method returns the same as `next_match` in a "no match" # scenario, which is why we can simplify like this. return match, matcher, child_matches # If it's a _closing_ bracket, then we also return no match. if matcher in end_brackets: # Unexpected end bracket! Return no match. return MatchResult.empty_at(idx), None, () # Otherwise we found a opening bracket before finding a target. # We now call the recursive function because there might be more # brackets inside. assert matcher, "If there's a match, there should be a matcher." # NOTE: This only returns on resolution of the opening bracket. bracket_match = resolve_bracket( segments, opening_match=match, opening_matcher=matcher, start_brackets=start_brackets, end_brackets=end_brackets, bracket_persists=cast(List[bool], bracket_persists), parse_context=parse_context, # Do keep the nested brackets in case the calling method # wants to use them. nested_match=True, ) matched_idx = bracket_match.matched_slice.stop child_matches += (bracket_match,) # Head back around the loop and keep looking. def greedy_match( segments: Sequence[BaseSegment], idx: int, parse_context: ParseContext, matchers: Sequence[Matchable], include_terminator: bool = False, nested_match: bool = False, ) -> MatchResult: """Match anything up to some defined terminator.""" working_idx = idx # NOTE: _stop_idx is always reset below after matching before reference # but mypy is unhappy unless we set a default value here. _stop_idx = idx # NOTE: child_matches is always tracked, but it will only ever have # _content_ if `nested_match` is True. It otherwise remains an empty tuple. child_matches: Tuple[MatchResult, ...] = () while True: with parse_context.deeper_match(name="GreedyUntil") as ctx: match, matcher, inner_matches = next_ex_bracket_match( segments, idx=working_idx, matchers=matchers, parse_context=ctx, ) if nested_match: child_matches += inner_matches # No match? That means we've not found any terminators. if not match: # Claim everything left. return MatchResult(slice(idx, len(segments)), child_matches=child_matches) _start_idx = match.matched_slice.start _stop_idx = match.matched_slice.stop # NOTE: For some terminators we only count them if they're preceded # by whitespace, and others we don't. In principle, we aim that for # _keywords_ we require whitespace, and for symbols we don't. # We do this by looking at the `simple` method of the returned # matcher, and if it's entirely alphabetical (as defined by # str.isalpha()) then we infer that it's a keyword, and therefore # _does_ require whitespace before it. assert matcher, f"Match without matcher: {match}" _simple = matcher.simple(parse_context) assert _simple, f"Terminators require a simple method: {matcher}" _strings, _types = _simple # NOTE: Typed matchers aren't common here, but we assume that they # _don't_ require preceding whitespace. # Do we need to enforce whitespace preceding? if all(_s.isalpha() for _s in _strings) and not _types: allowable_match = False # NOTE: Edge case - if we're matching the _first_ element (i.e. that # there are no `pre` segments) then we _do_ allow it. # TODO: Review whether this is as designed, but it is consistent # with past behaviour. if _start_idx == working_idx: allowable_match = True # Work backward through previous segments looking for whitespace. for _idx in range(_start_idx, working_idx, -1): if segments[_idx - 1].is_meta: continue elif segments[_idx - 1].is_type("whitespace", "newline"): allowable_match = True break else: # Found something other than metas and whitespace. break # If this match isn't preceded by whitespace and that is # a requirement, then we can't use it. Carry on... if not allowable_match: working_idx = _stop_idx # Loop around, don't return yet continue # Otherwise, it's allowable! break # Return without any child matches or inserts. Greedy Matching # shouldn't be used for mutation. if include_terminator: return MatchResult(slice(idx, _stop_idx), child_matches=child_matches) # If we're _not_ including the terminator, we need to work back a little. # If it's preceded by any non-code, we can't claim that. # Work backwards so we don't include it. _stop_idx = skip_stop_index_backward_to_code( segments, match.matched_slice.start, idx ) # If we went all the way back to `idx`, then ignore the _stop_idx. # There isn't any code in the gap _anyway_ - so there's no point trimming. if idx == _stop_idx: # TODO: I don't really like this rule, it feels like a hack. # Review whether it should be here. return MatchResult( slice(idx, match.matched_slice.start), child_matches=child_matches ) # Otherwise return the trimmed version. return MatchResult(slice(idx, _stop_idx), child_matches=child_matches) def trim_to_terminator( segments: Sequence[BaseSegment], idx: int, terminators: Sequence[Matchable], parse_context: ParseContext, ) -> int: """Trim forward segments based on terminators. Given a forward set of segments, trim elements from `segments` to `tail` by using a `greedy_match()` to identify terminators. If no terminators are found, no change is made. NOTE: This method is designed replace a `max_idx`: .. code-block:: python max_idx = _trim_to_terminator(segments[:max_idx], idx, ...) """ # Is there anything left to match on. if idx >= len(segments): # Nope. No need to trim. return len(segments) # NOTE: If there is a terminator _immediately_, then greedy # match will appear to not match (because there's "nothing" before # the terminator). To resolve that case, we first match immediately # on the terminators and handle that case explicitly if it occurs. with parse_context.deeper_match(name="Trim-GreedyA-@0") as ctx: pruned_terms = prune_options( terminators, segments, start_idx=idx, parse_context=ctx ) for term in pruned_terms: if term.match(segments, idx, ctx): # One matched immediately. Claim everything to the tail. return idx # If the above case didn't match then we proceed as expected. with parse_context.deeper_match( name="Trim-GreedyB-@0", track_progress=False ) as ctx: term_match = greedy_match( segments, idx, parse_context=ctx, matchers=terminators, ) # Greedy match always returns. # Skip backward from wherever it got to (either a terminator, or # the end of the sequence). return skip_stop_index_backward_to_code( segments, term_match.matched_slice.stop, idx ) sqlfluff-2.3.5/src/sqlfluff/core/parser/match_result.py000066400000000000000000000300261451700765000232520ustar00rootroot00000000000000"""Source for the MatchResult class. This should be the default response from any `match` method. """ from collections import defaultdict from dataclasses import dataclass, field from typing import ( TYPE_CHECKING, Any, DefaultDict, Dict, List, Optional, Sequence, Tuple, Type, Union, cast, ) from sqlfluff.core.helpers.slice import slice_length from sqlfluff.core.parser.markers import PositionMarker if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.segments import BaseSegment, MetaSegment, RawSegment def _get_point_pos_at_idx( segments: Sequence["BaseSegment"], idx: int ) -> PositionMarker: if idx < len(segments): _next_pos = segments[idx].pos_marker assert _next_pos, "Segments passed to .apply() should all have position." return _next_pos.start_point_marker() else: _prev_pos = segments[idx - 1].pos_marker assert _prev_pos, "Segments passed to .apply() should all have position." return _prev_pos.end_point_marker() @dataclass(frozen=True) class MatchResult: """This should be the default response from any `match` method. All references and indices are in reference to a single root tuple of segments. This result contains enough information to actually create the nested tree structure, but shouldn't actually contain any new segments itself. That means keeping information about: 1. Ranges of segments which should be included segments to be created. 2. References to the segment classes which we would create. 3. Information about any _new_ segments to add in the process, such as MetaSegment classes. Given the segments aren't yet "nested", the structure of this result *will* need to be nested, ideally self nested. In the case of finding unparsable locations, we should return the "best" result, referencing the furthest that we got. That allows us to identify those parsing issues and create UnparsableSegment classes later. """ # Slice in the reference tuple matched_slice: slice # Reference to the kind of segment to create. # NOTE: If this is null, it means we've matched a sequence of segments # but not yet created a container to put them in. matched_class: Optional[Type["BaseSegment"]] = None # kwargs to pass to the segment on creation. segment_kwargs: Dict[str, Any] = field(default_factory=dict) # Types and indices to add in new segments (they'll be meta segments) insert_segments: Tuple[Tuple[int, Type["MetaSegment"]], ...] = field( default_factory=tuple ) # Child segment matches (this is the recursive bit) child_matches: Tuple["MatchResult", ...] = field(default_factory=tuple) def __post_init__(self) -> None: """Do some lightweight validation post instantiation.""" if not slice_length(self.matched_slice): # Zero length matches with inserts are allowed, but not with # matched_class or child_matches. assert not self.matched_class, ( "Tried to create zero length MatchResult with " "`matched_class`. This MatchResult is invalid. " f"{self.matched_class} @{self.matched_slice}" ) assert not self.child_matches, ( "Tried to create zero length MatchResult with " "`child_matches`. Is this allowed?! " f"Result: {self}" ) def __len__(self) -> int: return slice_length(self.matched_slice) def __bool__(self) -> bool: """A MatchResult is truthy if it has length or inserts.""" return len(self) > 0 or bool(self.insert_segments) def stringify(self, indent: str = "") -> str: """Pretty print a match for debugging.""" prefix = f"Match ({self.matched_class}): {self.matched_slice}" buffer = prefix for key, value in self.segment_kwargs.items(): buffer += f"\n {indent}-{key}: {value!r}" if self.insert_segments: for idx, insert in self.insert_segments: buffer += f"\n {indent}+{idx}: {insert}" if self.child_matches: for child in self.child_matches: buffer += f"\n {indent}+{child.stringify(indent + ' ')}" return buffer @classmethod def empty_at(cls, idx: int) -> "MatchResult": """Create an empty match at a particular index.""" return cls(slice(idx, idx)) def is_better_than(self, other: "MatchResult") -> bool: """A match is better compared on length.""" return len(self) > len(other) def append( self, other: "MatchResult", insert_segments: Tuple[Tuple[int, Type["MetaSegment"]], ...] = (), ) -> "MatchResult": """Combine another subsequent match onto this one. NOTE: Because MatchResult is frozen, this returns a new match. """ # If the current match is empty, just return the other. if not len(self) and not self.insert_segments: return other # If the same is true of the other, just return self. if not len(other) and not other.insert_segments: return self # pragma: no cover # Otherwise the two must follow each other. # NOTE: A gap is allowed, but is assumed to be included in the # match. assert self.matched_slice.stop <= other.matched_slice.start new_slice = slice(self.matched_slice.start, other.matched_slice.stop) child_matches: Tuple[MatchResult, ...] = () for match in (self, other): # If it's got a matched class, add it as a child. if match.matched_class: child_matches += (match,) # Otherwise incorporate else: # Note: We're appending to the optional insert segments # provided in the kwargs. insert_segments += match.insert_segments child_matches += match.child_matches return MatchResult( new_slice, insert_segments=insert_segments, child_matches=child_matches, ) def wrap( self, outer_class: Type["BaseSegment"], insert_segments: Tuple[Tuple[int, Type["MetaSegment"]], ...] = (), segment_kwargs: Dict[str, Any] = {}, ) -> "MatchResult": """Wrap this result with an outer class. NOTE: Because MatchResult is frozen, this returns a new match. """ # If it's a failed (empty) match, then just pass straight # through. It's not valid to add a matched class to an empty # result. if not slice_length(self.matched_slice) and not self.insert_segments: assert not insert_segments, "Cannot wrap inserts onto an empty match." return self child_matches: Tuple[MatchResult, ...] if self.matched_class: # If the match already has a class, then make # the current one and child match and clear the # other buffers. child_matches = (self,) else: # Otherwise flatten the existing match into # the new one. insert_segments = self.insert_segments + insert_segments child_matches = self.child_matches # Otherwise flatten the content return MatchResult( self.matched_slice, matched_class=outer_class, segment_kwargs=segment_kwargs, insert_segments=insert_segments, child_matches=child_matches, ) def apply(self, segments: Tuple["BaseSegment", ...]) -> Tuple["BaseSegment", ...]: """Actually this match to segments to instantiate. This turns a theoretical match into a nested structure of segments. We handle child segments _first_ so that we can then include them when creating the parent. That means sequentially working through the children and any inserts. If there are overlaps, then we have a problem, and we should abort. """ result_segments: Tuple["BaseSegment", ...] = () if not slice_length(self.matched_slice): assert not self.matched_class, ( "Tried to apply zero length MatchResult with " "`matched_class`. This MatchResult is invalid. " f"{self.matched_class} @{self.matched_slice}" ) assert not self.child_matches, ( "Tried to apply zero length MatchResult with " "`child_matches`. This MatchResult is invalid. " f"Result: {self}" ) if self.insert_segments: assert segments, "Cannot insert segments without reference position." for idx, seg in self.insert_segments: assert idx == self.matched_slice.start, ( f"Tried to insert @{idx} outside of matched " f"slice {self.matched_slice}" ) _pos = _get_point_pos_at_idx(segments, idx) result_segments += (seg(pos_marker=_pos),) return result_segments assert len(segments) >= self.matched_slice.stop, ( f"Matched slice ({self.matched_slice}) sits outside segment " f"bounds: {len(segments)}" ) # Which are the locations we need to care about? trigger_locs: DefaultDict[ int, List[Union[MatchResult, Type["MetaSegment"]]] ] = defaultdict(list) # Add the inserts first... for insert in self.insert_segments: trigger_locs[insert[0]].append(insert[1]) # ...and then the matches for match in self.child_matches: trigger_locs[match.matched_slice.start].append(match) # Then work through creating any subsegments. max_idx = self.matched_slice.start for idx in sorted(trigger_locs.keys()): # Have we passed any untouched segments? if idx > max_idx: # If so, add them in unchanged. result_segments += segments[max_idx:idx] max_idx = idx elif idx < max_idx: # pragma: no cover raise ValueError( "Segment skip ahead error. An outer match contains " "overlapping child matches. This MatchResult was " "wrongly constructed." ) # Then work through each of the triggers. for trigger in trigger_locs[idx]: # If it's a match, apply it. if isinstance(trigger, MatchResult): result_segments += trigger.apply(segments=segments) # Update the end slice. max_idx = trigger.matched_slice.stop continue # Otherwise it's a segment. # Get the location from the next segment unless there isn't one. _pos = _get_point_pos_at_idx(segments, idx) result_segments += (trigger(pos_marker=_pos),) # If we finish working through the triggers and there's # still something left, then add that too. if max_idx < self.matched_slice.stop: result_segments += segments[max_idx : self.matched_slice.stop] if not self.matched_class: return result_segments # Otherwise construct the subsegment new_seg: "BaseSegment" if self.matched_class.class_is_type("raw"): _raw_type = cast(Type["RawSegment"], self.matched_class) assert len(result_segments) == 1 # TODO: Should this be a generic method on BaseSegment and RawSegment? # It feels a little strange to be this specific here. new_seg = _raw_type( raw=result_segments[0].raw, pos_marker=result_segments[0].pos_marker, **self.segment_kwargs, ) else: new_seg = self.matched_class( segments=result_segments, **self.segment_kwargs ) return (new_seg,) sqlfluff-2.3.5/src/sqlfluff/core/parser/matchable.py000066400000000000000000000050421451700765000225000ustar00rootroot00000000000000"""The definition of a matchable interface.""" import copy from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any, FrozenSet, Optional, Sequence, Tuple, TypeVar if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments import BaseSegment T = TypeVar("T", bound="Matchable") class Matchable(ABC): """A base object defining the matching interface.""" # Matchables are also not meta unless otherwise defined is_meta = False @abstractmethod def is_optional(self) -> bool: """Return whether this element is optional.""" @abstractmethod def simple( self, parse_context: "ParseContext", crumbs: Optional[Tuple[str, ...]] = None ) -> Optional[Tuple[FrozenSet[str], FrozenSet[str]]]: """Try to obtain a simple response from the matcher. Returns: None - if not simple. Tuple of two sets of strings if simple. The first is a set of uppercase raw strings which would match. The second is a set of segment types that would match. NOTE: the crumbs kwarg is designed to be used by Ref to detect recursion. """ @abstractmethod def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> "MatchResult": """Match against this matcher.""" def copy(self: T, **kwargs: Any) -> T: # pragma: no cover """Copy this Matchable. Matchable objects are usually copied during dialect inheritance. One dialect might make a copy (usually with some modifications) to a dialect element of a parent dialect which it can then use itself. This provides a little more modularity in dialect definition. NOTE: This method on the base class is not usually used, as the base matchable doesn't have any options for customisation. It is more frequently used by grammar objects such as Sequence, which provide more options for customisation. Those grammar objects should redefine this method accordingly. """ return copy.copy(self) @abstractmethod def cache_key(self) -> str: """A string to use for cache keying. This string should be unique at the parsing stage such that if there has already been a match against this key for a set of segments, that we can reuse that match. """ sqlfluff-2.3.5/src/sqlfluff/core/parser/parser.py000066400000000000000000000060271451700765000220600ustar00rootroot00000000000000"""Defines the Parser class.""" from typing import TYPE_CHECKING, Optional, Sequence, Type from sqlfluff.core.config import FluffConfig from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.helpers import check_still_complete if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.segments import BaseFileSegment, BaseSegment class Parser: """Instantiates parsed queries from a sequence of lexed raw segments.""" def __init__( self, config: Optional[FluffConfig] = None, dialect: Optional[str] = None ): # Allow optional config and dialect self.config = FluffConfig.from_kwargs(config=config, dialect=dialect) self.RootSegment: Type[BaseFileSegment] = self.config.get( "dialect_obj" ).get_root_segment() def parse( self, segments: Sequence["BaseSegment"], fname: Optional[str] = None, parse_statistics: bool = False, ) -> Optional["BaseSegment"]: """Parse a series of lexed tokens using the current dialect.""" if not segments: # pragma: no cover # This should normally never happen because there will usually # be an end_of_file segment. It would probably only happen in # api use cases. return None # NOTE: This is the only time we use the parse context not in the # context of a context manager. That's because it's the initial # instantiation. ctx = ParseContext.from_config(config=self.config) # Kick off parsing with the root segment. The BaseFileSegment has # a unique entry point to facilitate exaclty this. All other segments # will use the standard .match() route. root = self.RootSegment.root_parse( tuple(segments), fname=fname, parse_context=ctx ) # Basic Validation, that we haven't dropped anything. check_still_complete(tuple(segments), (root,), ()) if parse_statistics: # pragma: no cover # NOTE: We use ctx.logger.warning here to output the statistics. # It's not particularly beautiful, but for the users who do utilise # this functionality, I don't think they mind. ¯\_(ツ)_/¯ # In the future, this clause might become unnecessary. ctx.logger.warning("==== Parse Statistics ====") for key in ctx.parse_stats: if key == "next_counts": continue ctx.logger.warning(f"{key}: {ctx.parse_stats[key]}") ctx.logger.warning("## Tokens following un-terminated matches") ctx.logger.warning( "Adding terminator clauses to catch these may improve performance." ) for key, val in sorted( ctx.parse_stats["next_counts"].items(), reverse=True, key=lambda item: item[1], ): ctx.logger.warning(f"{val}: {key!r}") ctx.logger.warning("==== End Parse Statistics ====") return root sqlfluff-2.3.5/src/sqlfluff/core/parser/parsers.py000066400000000000000000000251061451700765000222420ustar00rootroot00000000000000"""Individual segment parsers. Matchable objects which return individual segments. """ from abc import abstractmethod from typing import Any, Collection, Dict, Optional, Sequence, Tuple, Type from uuid import uuid4 import regex from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments import BaseSegment, RawSegment from sqlfluff.core.parser.types import SimpleHintType class BaseParser(Matchable): """An abstract class from which other Parsers should inherit.""" # Meta segments are handled separately. All Parser elements # are assumed to be not meta. is_meta: bool = False @abstractmethod def __init__( self, raw_class: Type[RawSegment], type: Optional[str] = None, optional: bool = False, # The following kwargs are passed on to the segment: trim_chars: Optional[Tuple[str, ...]] = None, ) -> None: self.raw_class = raw_class # Store instance_types rather than just type to allow # for multiple possible types to be supported in derivative # classes. self._instance_types: Tuple[str, ...] = (type or raw_class.type,) self.optional = optional self._trim_chars = trim_chars # Generate a cache key self._cache_key = uuid4().hex def cache_key(self) -> str: """Get the cache key for this parser. For parsers, they're unique per-instance. """ return self._cache_key def is_optional(self) -> bool: """Return whether this element is optional.""" return self.optional def _match_at(self, idx: int) -> MatchResult: """Construct a MatchResult at a given index. This is a helper function for reuse by other parsers. """ segment_kwargs: Dict[str, Any] = {} if self._instance_types: segment_kwargs["instance_types"] = self._instance_types if self._trim_chars: segment_kwargs["trim_chars"] = self._trim_chars return MatchResult( matched_slice=slice(idx, idx + 1), matched_class=self.raw_class, segment_kwargs=segment_kwargs, ) class TypedParser(BaseParser): """An object which matches and returns raw segments based on types.""" def __init__( self, template: str, raw_class: Type[RawSegment], type: Optional[str] = None, optional: bool = False, trim_chars: Optional[Tuple[str, ...]] = None, ) -> None: """Initialize a new instance of the class. Args: template (str): The template type. raw_class (Type[RawSegment]): The raw segment class. type (Optional[str]): The type of the instance. optional (bool): Whether the instance is optional. trim_chars (Optional[Tuple[str, ...]]): The characters to trim. Returns: None """ # NB: the template in this case is the _target_ type. # The type kwarg is the eventual type. self.template = template # Pre-calculate the appropriate frozenset for matching later. self._target_types = frozenset((template,)) super().__init__( raw_class=raw_class, optional=optional, trim_chars=trim_chars, ) # NOTE: We override the instance types after initialising the base # class. We want to ensure that re-matching is possible by ensuring that # the `type` pre-matching is still present post-match even if it's not # part of the natural type hierarchy for the new `raw_class`. # The new `type` becomes the "primary" type, but the template will still # be part of the resulting `class_types`. # We do this here rather than in the base class to keep the dialect-facing # API the same. self._instance_types: Tuple[str, ...] = () # Primary type if set. if type is not None: self._instance_types += (type,) # New root types if type != raw_class.type: self._instance_types += (raw_class.type,) # Template type (if it's not in the subclasses of the raw_class). if not raw_class.class_is_type(template): self._instance_types += (template,) def __repr__(self) -> str: """Return a string representation of the TypedParser object.""" return f"" def simple( self, parse_context: ParseContext, crumbs: Optional[Tuple[str, ...]] = None ) -> SimpleHintType: """Check if the matcher supports uppercase hash matching route. The TypedParser segment does not support matching against raw strings, but it does support matching against types. Matching is done against both the template and the resulting type, to support re-matching. Args: parse_context (ParseContext): The parse context. crumbs (Optional[Tuple[str, ...]], optional): The crumbs. Defaults to None. Returns: SimpleHintType: A set of target types. """ return frozenset(), self._target_types def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against this matcher.""" if segments[idx].is_type(self.template): return self._match_at(idx) return MatchResult.empty_at(idx) class StringParser(BaseParser): """An object which matches and returns raw segments based on strings.""" def __init__( self, template: str, raw_class: Type[RawSegment], type: Optional[str] = None, optional: bool = False, trim_chars: Optional[Tuple[str, ...]] = None, ): self.template = template.upper() # Create list version upfront to avoid recreating it multiple times. self._simple = frozenset((self.template,)) super().__init__( raw_class=raw_class, type=type, optional=optional, trim_chars=trim_chars, ) def __repr__(self) -> str: return f"" def simple( self, parse_context: "ParseContext", crumbs: Optional[Tuple[str, ...]] = None ) -> SimpleHintType: """Return simple options for this matcher. Because string matchers are not case sensitive we can just return the template here. """ return self._simple, frozenset() def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against this matcher. NOTE: We check that the segment is also code to avoid matching unexpected comments. """ if segments[idx].raw_upper == self.template and segments[idx].is_code: return self._match_at(idx) return MatchResult.empty_at(idx) class MultiStringParser(BaseParser): """An object which matches and returns raw segments on a collection of strings.""" def __init__( self, templates: Collection[str], raw_class: Type[RawSegment], type: Optional[str] = None, optional: bool = False, trim_chars: Optional[Tuple[str, ...]] = None, ): self.templates = {template.upper() for template in templates} # Create list version upfront to avoid recreating it multiple times. self._simple = frozenset(self.templates) super().__init__( raw_class=raw_class, type=type, optional=optional, trim_chars=trim_chars, ) def __repr__(self) -> str: return f"" def simple( self, parse_context: "ParseContext", crumbs: Optional[Tuple[str, ...]] = None ) -> SimpleHintType: """Return simple options for this matcher. Because string matchers are not case sensitive we can just return the templates here. """ return self._simple, frozenset() def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against this matcher. NOTE: We check that the segment is also code to avoid matching unexpected comments. """ if segments[idx].is_code and segments[idx].raw_upper in self.templates: return self._match_at(idx) return MatchResult.empty_at(idx) class RegexParser(BaseParser): """An object which matches and returns raw segments based on a regex.""" def __init__( self, template: str, raw_class: Type[RawSegment], type: Optional[str] = None, optional: bool = False, anti_template: Optional[str] = None, trim_chars: Optional[Tuple[str, ...]] = None, ): # Store the optional anti-template self.template = template self.anti_template = anti_template # Compile regexes upfront to avoid repeated overhead self._anti_template = regex.compile(anti_template or r"", regex.IGNORECASE) self._template = regex.compile(template, regex.IGNORECASE) super().__init__( raw_class=raw_class, type=type, optional=optional, trim_chars=trim_chars, ) def __repr__(self) -> str: return f"" def simple( cls, parse_context: ParseContext, crumbs: Optional[Tuple[str, ...]] = None ) -> None: """Does this matcher support a uppercase hash matching route? Regex segment does NOT for now. We might need to later for efficiency. """ return None def match( self, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Match against this matcher. NOTE: This method uses .raw_upper and so case sensitivity is not supported. """ _raw = segments[idx].raw_upper result = self._template.match(_raw) if result: result_string = result.group(0) # Check that we've fully matched if result_string == _raw: # Check that the anti_template (if set) hasn't also matched if not self.anti_template or not self._anti_template.match(_raw): return self._match_at(idx) return MatchResult.empty_at(idx) sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/000077500000000000000000000000001451700765000220325ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/__init__.py000066400000000000000000000032631451700765000241470ustar00rootroot00000000000000"""Definitions of the segment classes.""" from sqlfluff.core.parser.segments.base import ( BaseSegment, SourceFix, UnparsableSegment, ) from sqlfluff.core.parser.segments.bracketed import BracketedSegment from sqlfluff.core.parser.segments.common import ( BinaryOperatorSegment, CodeSegment, CommentSegment, ComparisonOperatorSegment, CompositeBinaryOperatorSegment, CompositeComparisonOperatorSegment, IdentifierSegment, LiteralSegment, NewlineSegment, SymbolSegment, UnlexableSegment, WhitespaceSegment, WordSegment, ) from sqlfluff.core.parser.segments.file import BaseFileSegment from sqlfluff.core.parser.segments.generator import SegmentGenerator from sqlfluff.core.parser.segments.keyword import KeywordSegment, LiteralKeywordSegment from sqlfluff.core.parser.segments.meta import ( Dedent, EndOfFile, ImplicitIndent, Indent, MetaSegment, TemplateLoop, TemplateSegment, ) from sqlfluff.core.parser.segments.raw import RawSegment __all__ = ( "BaseSegment", "BaseFileSegment", "UnparsableSegment", "BracketedSegment", "SegmentGenerator", "RawSegment", "CodeSegment", "UnlexableSegment", "CommentSegment", "WhitespaceSegment", "NewlineSegment", "KeywordSegment", "LiteralKeywordSegment", "SymbolSegment", "MetaSegment", "Indent", "Dedent", "ImplicitIndent", "TemplateSegment", "EndOfFile", "TemplateLoop", "SourceFix", "IdentifierSegment", "LiteralSegment", "BinaryOperatorSegment", "CompositeBinaryOperatorSegment", "ComparisonOperatorSegment", "CompositeComparisonOperatorSegment", "WordSegment", ) sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/base.py000066400000000000000000001374421451700765000233310ustar00rootroot00000000000000"""Base segment definitions. Here we define: - BaseSegment. This is the root class for all segments, and is designed to hold other subsegments. - UnparsableSegment. A special wrapper to indicate that the parse function failed on this block of segments and to prevent further analysis. """ # Import annotations for py 3.7 to allow `weakref.ReferenceType["BaseSegment"]` from __future__ import annotations import logging import weakref from dataclasses import dataclass from io import StringIO from itertools import chain from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple, Type, Union, cast, ) from uuid import uuid4 from sqlfluff.core.cached_property import cached_property from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.helpers import trim_non_code_segments from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.types import SimpleHintType if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects import Dialect from sqlfluff.core.parser.segments.raw import RawSegment # Instantiate the linter logger (only for use in methods involved with fixing.) linter_logger = logging.getLogger("sqlfluff.linter") TupleSerialisedSegment = Tuple[str, Union[str, Tuple["TupleSerialisedSegment", ...]]] RecordSerialisedSegment = Dict[ str, Union[None, str, "RecordSerialisedSegment", List["RecordSerialisedSegment"]] ] @dataclass(frozen=True) class SourceFix: """A stored reference to a fix in the non-templated file.""" edit: str source_slice: slice # TODO: It might be possible to refactor this to not require # a templated_slice (because in theory it's unnecessary). # However much of the fix handling code assumes we need # a position in the templated file to interpret it. # More work required to achieve that if desired. templated_slice: slice def __hash__(self) -> int: # Only hash based on the source slice, not the # templated slice (which might change) return hash((self.edit, self.source_slice.start, self.source_slice.stop)) @dataclass(frozen=True) class PathStep: """An element of the response to BaseSegment.path_to(). Attributes: segment (:obj:`BaseSegment`): The segment in the chain. idx (int): The index of the target within its `segment`. len (int): The number of children `segment` has. code_idxs (:obj:`tuple` of int): The indices which contain code. """ segment: "BaseSegment" idx: int len: int code_idxs: Tuple[int, ...] def _iter_base_types( new_type: Optional[str], bases: Tuple[Type["BaseSegment"]] ) -> Iterator[str]: """Iterate types for a new segment class. This is a helper method used within in the construction of SegmentMetaclass so that we can construct a frozenset directly off the results. """ if new_type is not None: yield new_type for base in bases: yield from base._class_types class SegmentMetaclass(type, Matchable): """The metaclass for segments. This metaclass provides pre-computed class attributes based on the defined attributes of specific classes. Segments as a *type* should also implement the Matchable interface too. Once instantiated they no longer need to but we should be able to treat the BaseSegment class as a Matchable interface. """ def __new__( mcs: Type[type], name: str, bases: Tuple[Type["BaseSegment"]], class_dict: Dict[str, Any], ) -> SegmentMetaclass: """Generate a new class. We use the `type` class attribute for the class and it's parent base classes to build up a `set` of types on construction to use in type checking later in the process. Doing it on construction here saves calculating it at runtime for each instance of the class. """ # Create a cache uuid on definition. # We do it here so every _definition_ of a segment # gets a unique UUID regardless of dialect. class_dict["_cache_key"] = uuid4().hex # Populate the `_class_types` property on creation. added_type = class_dict.get("type", None) class_dict["_class_types"] = frozenset(_iter_base_types(added_type, bases)) return cast(Type["BaseSegment"], type.__new__(mcs, name, bases, class_dict)) class BaseSegment(metaclass=SegmentMetaclass): """The base segment element. This defines the base element which drives both Lexing, Parsing and Linting. A large chunk of the logic which defines those three operations are centered here. Much of what is defined in the BaseSegment is also used by its many subclasses rather than directly here. For clarity, the `BaseSegment` is mostly centered around a segment which contains other subsegments. For segments which don't have *children*, refer to the `RawSegment` class (which still inherits from this one). Segments are used both as instances to hold chunks of text, but also as classes themselves where they function a lot like grammars, and return instances of themselves when they match. The many classmethods in this class are usually to serve their purpose as a matcher. """ # `type` should be the *category* of this kind of segment type: ClassVar[str] = "base" _class_types: ClassVar[FrozenSet[str]] # NOTE: Set by SegmentMetaclass # We define the type here but no value. Subclasses must provide a value. match_grammar: Matchable comment_separate = False is_meta = False # Are we able to have non-code at the start or end? can_start_end_non_code = False # Can we allow it to be empty? Usually used in combination # with the can_start_end_non_code. allow_empty = False # What other kwargs need to be copied when applying fixes. additional_kwargs: List[str] = [] pos_marker: Optional[PositionMarker] # NOTE: Cache key is generated by the SegmentMetaclass _cache_key: str # _preface_modifier used in ._preface() _preface_modifier: str = "" # Optional reference to the parent. Stored as a weakref. _parent: Optional[weakref.ReferenceType["BaseSegment"]] = None _parent_idx: Optional[int] = None def __init__( self, segments: Tuple["BaseSegment", ...], pos_marker: Optional[PositionMarker] = None, uuid: Optional[int] = None, ) -> None: if len(segments) == 0: # pragma: no cover raise RuntimeError( "Setting {} with a zero length segment set. This shouldn't " "happen.".format(self.__class__) ) if not pos_marker: # If no pos given, work it out from the children. if all(seg.pos_marker for seg in segments): pos_marker = PositionMarker.from_child_markers( *(seg.pos_marker for seg in segments) ) assert not hasattr(self, "parse_grammar"), "parse_grammar is deprecated." self.pos_marker = pos_marker self.segments: Tuple["BaseSegment", ...] = segments # Tracker for matching when things start moving. # NOTE: We're storing the .int attribute so that it's swifter # for comparisons. self.uuid = uuid or uuid4().int self.set_as_parent(recurse=False) self.validate_non_code_ends() self._recalculate_caches() def __setattr__(self, key: str, value: Any) -> None: try: if key == "segments": self._recalculate_caches() except (AttributeError, KeyError): # pragma: no cover pass super().__setattr__(key, value) def __eq__(self, other: Any) -> bool: # NB: this should also work for RawSegment if not isinstance(other, BaseSegment): return False # pragma: no cover # If the uuids match, then we can easily return early. if self.uuid == other.uuid: return True return ( # Same class NAME. (could be constructed elsewhere) self.__class__.__name__ == other.__class__.__name__ and (self.raw == other.raw) # Both must have a non-null position marker to compare. and self.pos_marker is not None and other.pos_marker is not None # We only match that the *start* is the same. This means we can # still effectively construct searches look for segments. # This is important for .apply_fixes(). # NOTE: `.working_loc` is much more performant than creating # a new start point marker for comparison. and (self.pos_marker.working_loc == other.pos_marker.working_loc) ) @cached_property def _hash(self) -> int: """Cache the hash property to avoid recalculating it often.""" return hash( ( self.__class__.__name__, self.raw, # NOTE: We use the start of the source slice because it's # the lowest cost way of getting a reliable location in the source # file for deduplication. self.pos_marker.source_slice.start if self.pos_marker else None, ) ) def __hash__(self) -> int: return self._hash def __repr__(self) -> str: return f"<{self.__class__.__name__}: ({self.pos_marker})>" def __getstate__(self) -> Dict[str, Any]: """Get the current state to allow pickling.""" s = self.__dict__.copy() # Kill the parent ref. It won't pickle well. s["_parent"] = None return s def __setstate__(self, state: Dict[str, Any]) -> None: """Set state during process of unpickling.""" self.__dict__ = state.copy() # Once state is ingested - repopulate, NOT recursing. # Child segments will do it for themselves on unpickling. self.set_as_parent(recurse=False) # ################ PRIVATE PROPERTIES @property def _comments(self) -> List["BaseSegment"]: """Returns only the comment elements of this segment.""" return [seg for seg in self.segments if seg.is_type("comment")] @property def _non_comments(self) -> List["BaseSegment"]: # pragma: no cover TODO? """Returns only the non-comment elements of this segment.""" return [seg for seg in self.segments if not seg.is_type("comment")] # ################ PUBLIC PROPERTIES @cached_property def is_code(self) -> bool: """Return True if this segment contains any code.""" return any(seg.is_code for seg in self.segments) @cached_property def _code_indices(self) -> Tuple[int, ...]: """The indices of code elements. This is used in the path_to algorithm for tree traversal. """ return tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code) @cached_property def is_comment(self) -> bool: # pragma: no cover TODO? """Return True if this is entirely made of comments.""" return all(seg.is_comment for seg in self.segments) @cached_property def is_whitespace(self) -> bool: """Return True if this segment is entirely whitespace.""" return all(seg.is_whitespace for seg in self.segments) @cached_property def raw(self) -> str: """Make a string from the segments of this segment.""" return "".join(seg.raw for seg in self.segments) @property def class_types(self) -> FrozenSet[str]: """The set of types for this segment.""" # NOTE: This version is simple, but some dependent classes # (notably RawSegment) override this with something more # custom. return self._class_types @cached_property def descendant_type_set(self) -> FrozenSet[str]: """The set of all contained types. This is used for rule crawling. NOTE: Does not include the types of the parent segment itself. """ return frozenset( chain.from_iterable( seg.descendant_type_set | seg.class_types for seg in self.segments ) ) @cached_property def direct_descendant_type_set(self) -> Set[str]: """The set of all directly child types. This is used for rule crawling. NOTE: Does not include the types of the parent segment itself. """ return set(chain.from_iterable(seg.class_types for seg in self.segments)) @cached_property def raw_upper(self) -> str: """Make an uppercase string from the segments of this segment.""" return self.raw.upper() @cached_property def raw_segments(self) -> List["RawSegment"]: """Returns a list of raw segments in this segment.""" return self.get_raw_segments() @cached_property def raw_segments_with_ancestors( self, ) -> List[Tuple["RawSegment", List[PathStep]]]: """Returns a list of raw segments in this segment with the ancestors.""" buffer = [] code_idxs = tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code) for idx, seg in enumerate(self.segments): # If it's a raw, yield it with this segment as the parent new_step = [PathStep(self, idx, len(self.segments), code_idxs)] if seg.is_type("raw"): buffer.append((cast("RawSegment", seg), new_step)) # If it's not, recurse - prepending self to the ancestor stack else: buffer.extend( [ (raw_seg, new_step + stack) for raw_seg, stack in seg.raw_segments_with_ancestors ] ) return buffer @cached_property def source_fixes(self) -> List[SourceFix]: """Return any source fixes as list.""" return list(chain.from_iterable(s.source_fixes for s in self.segments)) @cached_property def first_non_whitespace_segment_raw_upper(self) -> Optional[str]: """Returns the first non-whitespace subsegment of this segment.""" for seg in self.raw_segments: if seg.raw_upper.strip(): return seg.raw_upper return None # return [seg.raw_upper for seg in self.raw_segments] @cached_property def is_templated(self) -> bool: """Returns True if the segment includes any templated code. This is a simple, very efficient check that doesn't require looking up the RawFileSlices for the segment. NOTE: A segment returning a True result may still have some literal code as well (i.e. a mixture of literal and templated). """ # We check two things: # * Source slice not empty: If it's empty, this means it doesn't appear # in the source, e.g. because it is new code generated by a lint fix. # Return False for these. # * It's not a literal slice. If it's a literal and has size then it's # not templated. assert self.pos_marker return ( self.pos_marker.source_slice.start != self.pos_marker.source_slice.stop and not self.pos_marker.is_literal() ) # ################ STATIC METHODS def _suffix(self) -> str: """Return any extra output required at the end when logging. NB Override this for specific subclasses if we want extra output. """ return "" @classmethod def _position_segments( cls, segments: Tuple["BaseSegment", ...], parent_pos: PositionMarker, ) -> Tuple["BaseSegment", ...]: """Refresh positions of segments within a span. This does two things: - Assign positions to any segments without them. - Updates the working line_no and line_pos for all segments during fixing. New segments are assumed to be metas or insertions and so therefore have a zero-length position in the source and templated file. """ assert segments, "_position_segments called on empty sequence." line_no = parent_pos.working_line_no line_pos = parent_pos.working_line_pos # Use the index so that we can look forward # and backward. segment_buffer: Tuple["BaseSegment", ...] = () for idx, segment in enumerate(segments): # Get hold of the current position. old_position = segment.pos_marker new_position = segment.pos_marker # Fill any that don't have a position. if not old_position: # Can we get a position from the previous? start_point = None if idx > 0: prev_seg = segment_buffer[idx - 1] # Given we're going back in the buffer we should # have set the position marker for everything already # in there. This is mostly a hint to mypy. assert prev_seg.pos_marker start_point = prev_seg.pos_marker.end_point_marker() # Can we get it from the parent? elif parent_pos: start_point = parent_pos.start_point_marker() # Search forward for the end point. end_point = None for fwd_seg in segments[idx + 1 :]: if fwd_seg.pos_marker: # NOTE: Use raw segments because it's more reliable. end_point = fwd_seg.raw_segments[ 0 ].pos_marker.start_point_marker() break if start_point and end_point and start_point != end_point: # We should construct a wider position marker. new_position = PositionMarker.from_points( start_point, end_point, ) # If we have start point (or if they were equal above), # just apply start point. elif start_point: new_position = start_point # Do we have an end? elif end_point: # pragma: no cover new_position = end_point else: # pragma: no cover raise ValueError("Unable to position new segment") assert new_position # Regardless of whether we change the position, we still need to # update the working location and keep track of it. new_position = new_position.with_working_position(line_no, line_pos) line_no, line_pos = new_position.infer_next_position( segment.raw, line_no, line_pos ) # NOTE: If the position is already correct, we still # need to copy, but we don't need to reposition any further. if segment.segments and old_position != new_position: # Recurse to work out the child segments FIRST, before # copying the parent so we don't double the work. assert new_position child_segments = cls._position_segments( segment.segments, parent_pos=new_position ) new_seg = segment.copy(segments=child_segments) new_seg.pos_marker = new_position else: new_seg = segment.copy() new_seg.pos_marker = new_position new_seg.pos_marker = new_position segment_buffer += (new_seg,) continue return segment_buffer # ################ CLASS METHODS @classmethod def simple( cls, parse_context: ParseContext, crumbs: Optional[Tuple[str, ...]] = None ) -> Optional["SimpleHintType"]: """Does this matcher support an uppercase hash matching route? This should be true if the MATCH grammar is simple. Most more complicated segments will be assumed to overwrite this method if they wish to be considered simple. """ if cls.match_grammar: return cls.match_grammar.simple(parse_context=parse_context, crumbs=crumbs) else: # pragma: no cover TODO? # Other segments will either override this method, or aren't # simple. return None @classmethod def cache_key(cls) -> str: """Return the cache key for this segment definition. NOTE: The key itself is generated on _definition_ by the metaclass. """ return cls._cache_key @classmethod def is_optional(cls) -> bool: # pragma: no cover """Returns False because Segments are never optional. This is used _only_ in the `Sequence` & `Bracketed` grammars to indicate optional elements in a sequence which may not be present while still returning a valid match. Typically in dialect definition, Segments are rarely referred to directly, but normally are referenced via a `Ref()` grammar. The `Ref()` grammar supports optional referencing and so we recommend wrapping a segment in an optional `Ref()` to take advantage of optional sequence elements as this is not supported directly on the Segment itself. """ return False @classmethod def class_is_type(cls, *seg_type: str) -> bool: """Is this segment class (or its parent) of the given type.""" # Use set intersection if cls._class_types.intersection(seg_type): return True return False @classmethod def structural_simplify( cls, elem: TupleSerialisedSegment ) -> RecordSerialisedSegment: """Simplify the structure recursively so it serializes nicely in json/yaml. This is used in the .as_record() method. """ assert len(elem) == 2 key, value = elem assert isinstance(key, str) if isinstance(value, str): return {key: value} assert isinstance(value, tuple) # If it's an empty tuple return a dict with None. if not value: return {key: None} # Otherwise value is a tuple with length. # Simplify all the child elements contents = [cls.structural_simplify(e) for e in value] # Any duplicate elements? subkeys: List[str] = [] for _d in contents: subkeys.extend(_d.keys()) if len(set(subkeys)) != len(subkeys): # Yes: use a list of single dicts. # Recurse directly. return {key: contents} # Otherwise there aren't duplicates, un-nest the list into a dict: content_dict = {} for record in contents: for k, v in record.items(): content_dict[k] = v return {key: content_dict} @classmethod def match( cls, segments: Sequence["BaseSegment"], idx: int, parse_context: ParseContext ) -> MatchResult: """Match a list of segments against this segment. Note: Match for segments is done in the ABSTRACT. When dealing with concrete then we're always in parse. Parse is what happens during expand. Matching can be done from either the raw or the segments. This raw function can be overridden, or a grammar defined on the underlying class. """ if idx >= len(segments): # pragma: no cover return MatchResult.empty_at(idx) # Is this already the right kind of segment? if isinstance(segments[idx], cls): # Very simple "consume one" result. return MatchResult(slice(idx, idx + 1)) assert cls.match_grammar, f"{cls.__name__} has no match grammar." with parse_context.deeper_match(name=cls.__name__) as ctx: match = cls.match_grammar.match(segments, idx, ctx) # Wrap are return regardless of success. return match.wrap(cls) # ################ PRIVATE INSTANCE METHODS def _recalculate_caches(self) -> None: for key in [ "is_code", "is_comment", "is_whitespace", "raw", "raw_upper", "matched_length", "raw_segments", "raw_segments_with_ancestors", "first_non_whitespace_segment_raw_upper", "source_fixes", "full_type_set", "descendant_type_set", "direct_descendant_type_set", "_code_indices", "_hash", ]: self.__dict__.pop(key, None) def _preface(self, ident: int, tabsize: int) -> str: """Returns the preamble to any logging.""" padded_type = "{padding}{modifier}{type}".format( padding=" " * (ident * tabsize), modifier=self._preface_modifier, type=self.get_type() + ":", ) preface = "{pos:20}|{padded_type:60} {suffix}".format( pos=str(self.pos_marker) if self.pos_marker else "-", padded_type=padded_type, suffix=self._suffix() or "", ) # Trim unnecessary whitespace before returning return preface.rstrip() # ################ PUBLIC INSTANCE METHODS def set_as_parent(self, recurse: bool = True) -> None: """Set this segment as parent for child all segments.""" for idx, seg in enumerate(self.segments): seg.set_parent(self, idx) # Recurse if not disabled if recurse: seg.set_as_parent(recurse=recurse) def set_parent(self, parent: "BaseSegment", idx: int) -> None: """Set the weak reference to the parent. We keep a reference to the index within the parent too as that is often used at the same point in the operation. NOTE: Don't validate on set, because we might not have fully initialised the parent yet (because we call this method during the instantiation of the parent). """ self._parent = weakref.ref(parent) self._parent_idx = idx def get_parent(self) -> Optional[Tuple["BaseSegment", int]]: """Get the parent segment, with some validation. This is provided as a performance optimisation when searching through the syntax tree. Any methods which depend on this should have an alternative way of assessing position, and ideally also set the parent of any segments found without them. As a performance optimisation, we also store the index of the segment within the parent to avoid needing to recalculate that. NOTE: We only store a weak reference to the parent so it might not be present. We also validate here that it's _still_ the parent and potentially also return None if those checks fail. """ if not self._parent: return None _parent = self._parent() if not _parent or self not in _parent.segments: return None assert self._parent_idx is not None return _parent, self._parent_idx def get_type(self) -> str: """Returns the type of this segment as a string.""" return self.type def count_segments(self, raw_only: bool = False) -> int: """Returns the number of segments in this segment.""" if self.segments: self_count = 0 if raw_only else 1 return self_count + sum( seg.count_segments(raw_only=raw_only) for seg in self.segments ) else: return 1 def is_type(self, *seg_type: str) -> bool: """Is this segment (or its parent) of the given type.""" return self.class_is_type(*seg_type) def invalidate_caches(self) -> None: """Invalidate the cached properties. This should be called whenever the segments within this segment is mutated. """ for seg in self.segments: seg.invalidate_caches() self._recalculate_caches() def get_start_point_marker(self) -> PositionMarker: # pragma: no cover """Get a point marker at the start of this segment.""" assert self.pos_marker, f"{self} has no PositionMarker" return self.pos_marker.start_point_marker() def get_end_point_marker(self) -> PositionMarker: """Get a point marker at the end of this segment.""" assert self.pos_marker, f"{self} has no PositionMarker" return self.pos_marker.end_point_marker() def get_start_loc(self) -> Tuple[int, int]: """Get a location tuple at the start of this segment.""" assert self.pos_marker, f"{self} has no PositionMarker" return self.pos_marker.working_loc def get_end_loc(self) -> Tuple[int, int]: """Get a location tuple at the end of this segment.""" assert self.pos_marker, f"{self} has no PositionMarker" return self.pos_marker.working_loc_after( self.raw, ) def stringify( self, ident: int = 0, tabsize: int = 4, code_only: bool = False ) -> str: """Use indentation to render this segment and its children as a string.""" buff = StringIO() preface = self._preface(ident=ident, tabsize=tabsize) buff.write(preface + "\n") if not code_only and self.comment_separate and len(self._comments) > 0: if self._comments: # pragma: no cover TODO? buff.write((" " * ((ident + 1) * tabsize)) + "Comments:" + "\n") for seg in self._comments: buff.write( seg.stringify( ident=ident + 2, tabsize=tabsize, code_only=code_only, ) ) if self._non_comments: # pragma: no cover TODO? buff.write((" " * ((ident + 1) * tabsize)) + "Code:" + "\n") for seg in self._non_comments: buff.write( seg.stringify( ident=ident + 2, tabsize=tabsize, code_only=code_only, ) ) else: for seg in self.segments: # If we're in code_only, only show the code segments, otherwise always # true if not code_only or seg.is_code: buff.write( seg.stringify( ident=ident + 1, tabsize=tabsize, code_only=code_only, ) ) return buff.getvalue() def to_tuple( self, code_only: bool = False, show_raw: bool = False, include_meta: bool = False, ) -> TupleSerialisedSegment: """Return a tuple structure from this segment.""" # works for both base and raw if show_raw and not self.segments: return (self.get_type(), self.raw) elif code_only: return ( self.get_type(), tuple( seg.to_tuple( code_only=code_only, show_raw=show_raw, include_meta=include_meta, ) for seg in self.segments if seg.is_code and not seg.is_meta ), ) else: return ( self.get_type(), tuple( seg.to_tuple( code_only=code_only, show_raw=show_raw, include_meta=include_meta, ) for seg in self.segments if include_meta or not seg.is_meta ), ) def copy( self, segments: Optional[Tuple["BaseSegment", ...]] = None, parent: Optional["BaseSegment"] = None, parent_idx: Optional[int] = None, ) -> "BaseSegment": """Copy the segment recursively, with appropriate copying of references. Optionally provide child segments which have already been dealt with to avoid another copy operation. NOTE: In the copy operation it's really important that we get a clean segregation so that we can't go backward and mutate the source object, but at the same time we should be mindful of what _needs_ to be copied to avoid a deep copy where one isn't required. """ cls = self.__class__ new_segment = cls.__new__(cls) # Position markers are immutable, and it's important that we keep # a reference to the same TemplatedFile, so keep the same position # marker. By updating from the source dict, we achieve that. # By using the __dict__ object we also transfer the _cache_ too # which is stored there by @cached_property. new_segment.__dict__.update(self.__dict__) # Reset the parent if provided. if parent: assert parent_idx is not None, "parent_idx must be provided it parent is." new_segment.set_parent(parent, parent_idx) # If the segment doesn't have a segments property, we're done. # NOTE: This is a proxy way of understanding whether it's a RawSegment # of not. Typically will _have_ a `segments` attribute, but it's an # empty tuple. if not self.__dict__.get("segments", None): assert ( not segments ), f"Cannot provide `segments` argument to {cls.__name__} `.copy()`\n" # If segments were provided, use them. elif segments: new_segment.segments = segments # Otherwise we should handle recursive segment coping. # We use the native .copy() method (this method!) appropriately # so that the same logic is applied in recursion. # We set the parent for children directly on the copy method # to ensure those line up properly. else: new_segment.segments = tuple( seg.copy(parent=new_segment, parent_idx=idx) for idx, seg in enumerate(self.segments) ) return new_segment def as_record(self, **kwargs: bool) -> Optional[RecordSerialisedSegment]: """Return the segment as a structurally simplified record. This is useful for serialization to yaml or json. kwargs passed to to_tuple """ return self.structural_simplify(self.to_tuple(**kwargs)) def get_raw_segments(self) -> List["RawSegment"]: """Iterate raw segments, mostly for searching.""" return [item for s in self.segments for item in s.raw_segments] def iter_segments( self, expanding: Optional[Sequence[str]] = None, pass_through: bool = False ) -> Iterator["BaseSegment"]: """Iterate segments, optionally expanding some children.""" for s in self.segments: if expanding and s.is_type(*expanding): yield from s.iter_segments( expanding=expanding if pass_through else None ) else: yield s def iter_unparsables(self) -> Iterator["UnparsableSegment"]: """Iterate through any unparsables this segment may contain.""" for s in self.segments: yield from s.iter_unparsables() def type_set(self) -> Set[str]: """Return a set of the types contained, mostly for testing.""" typs = {self.type} for s in self.segments: typs |= s.type_set() return typs def is_raw(self) -> bool: """Return True if this segment has no children.""" return len(self.segments) == 0 def get_child(self, *seg_type: str) -> Optional[BaseSegment]: """Retrieve the first of the children of this segment with matching type.""" for seg in self.segments: if seg.is_type(*seg_type): return seg return None def get_children(self, *seg_type: str) -> List[BaseSegment]: """Retrieve the all of the children of this segment with matching type.""" buff = [] for seg in self.segments: if seg.is_type(*seg_type): buff.append(seg) return buff def select_children( self, start_seg: Optional["BaseSegment"] = None, stop_seg: Optional["BaseSegment"] = None, select_if: Optional[Callable[["BaseSegment"], Any]] = None, loop_while: Optional[Callable[["BaseSegment"], Any]] = None, ) -> List["BaseSegment"]: """Retrieve subset of children based on range and filters. Often useful by linter rules when generating fixes, e.g. to find whitespace segments between two already known segments. """ start_index = self.segments.index(start_seg) if start_seg else -1 stop_index = self.segments.index(stop_seg) if stop_seg else len(self.segments) buff = [] for seg in self.segments[start_index + 1 : stop_index]: if loop_while and not loop_while(seg): break if not select_if or select_if(seg): buff.append(seg) return buff def recursive_crawl_all(self, reverse: bool = False) -> Iterator[BaseSegment]: """Recursively crawl all descendant segments.""" if reverse: for seg in reversed(self.segments): yield from seg.recursive_crawl_all(reverse=reverse) yield self if not reverse: for seg in self.segments: yield from seg.recursive_crawl_all(reverse=reverse) def recursive_crawl( self, *seg_type: str, recurse_into: bool = True, no_recursive_seg_type: Optional[str] = None, allow_self: bool = True, ) -> Iterator[BaseSegment]: """Recursively crawl for segments of a given type. Args: seg_type: :obj:`str`: one or more type of segment to look for. recurse_into: :obj:`bool`: When an element of type "seg_type" is found, whether to recurse into it. no_recursive_seg_type: :obj:`str`: a type of segment not to recurse further into. It is highly recommended to set this argument where possible, as it can significantly narrow the search pattern. allow_self: :obj:`bool`: Whether to allow the initial segment this is called on to be one of the results. """ # Assuming there is a segment to be found, first check self (if allowed): if allow_self and self.is_type(*seg_type): match = True yield self else: match = False # Check whether the types we're looking for are in this segment # at all. If not, exit early. if not self.descendant_type_set.intersection(seg_type): # Terminate iteration. return None # Then handle any recursion. if recurse_into or not match: for seg in self.segments: # Don't recurse if the segment is of a type we shouldn't # recurse into. # NOTE: Setting no_recursive_seg_type can significantly # improve performance in many cases. if not no_recursive_seg_type or not seg.is_type(no_recursive_seg_type): yield from seg.recursive_crawl( *seg_type, recurse_into=recurse_into, no_recursive_seg_type=no_recursive_seg_type, ) def path_to(self, other: "BaseSegment") -> List[PathStep]: """Given a segment which is assumed within self, get the intermediate segments. Returns: :obj:`list` of :obj:`PathStep`, not including the segment we're looking for. If `other` is not found, then empty list. This includes if called on self. The result of this should be interpreted as *the path from `self` to `other`*. If the return value is `[]` (an empty list), that implies there is no path from `self` to `other`. This would include the case where the two are the same segment, as there is no path from a segment to itself. Technically this could be seen as a "half open interval" of the path between two segments: in that it includes the root segment, but not the leaf. We first use any existing parent references to work upward, and then if that doesn't take us far enough we fill in from the top (setting any missing references as we go). This tries to be as efficient in that process as possible. """ # Return empty if they are the same segment. if self is other: return [] # pragma: no cover # Do we have any child segments at all? if not self.segments: return [] # Identifying the highest parent we can using any preset parent values. midpoint = other lower_path = [] while True: _higher = midpoint.get_parent() # If we've run out of parents, stop for now. if not _higher: break _seg, _idx = _higher # If the higher doesn't have a position we'll run into problems. # Check that in advance. assert _seg.pos_marker, ( f"`path_to()` found segment {_seg} without position. " "This shouldn't happen post-parse." ) lower_path.append( PathStep( _seg, _idx, len(_seg.segments), _seg._code_indices, ) ) midpoint = _seg # If we're found the target segment we can also stop. if midpoint == self: break # Reverse the path so far lower_path.reverse() # Have we already found the parent? if midpoint == self: return lower_path # Have we gone all the way up to the file segment? elif midpoint.class_is_type("file"): return [] # pragma: no cover # Are we in the right ballpark? # NOTE: Comparisons have a higher precedence than `not`. elif not self.get_start_loc() <= midpoint.get_start_loc() <= self.get_end_loc(): return [] # From here, we've worked "up" as far as we can, we now work "down". # When working down, we only need to go as far as the `midpoint`. # Check through each of the child segments for idx, seg in enumerate(self.segments): # Set the parent if it's not already set. seg.set_parent(self, idx) # Build the step. step = PathStep(self, idx, len(self.segments), self._code_indices) # Have we found the target? # NOTE: Check for _equality_ not _identity_ here as that's most reliable. if seg == midpoint: return [step] + lower_path # Is there a path to the target? res = seg.path_to(midpoint) if res: return [step] + res + lower_path # Not found. return [] # pragma: no cover @staticmethod def _is_code_or_meta(segment: "BaseSegment") -> bool: return segment.is_code or segment.is_meta def validate_non_code_ends(self) -> None: """Validates the start and end of the sequence based on it's config. Most normal segments may *not* start or end with whitespace. Any surrounding whitespace should be within the outer segment containing this one. The exception is for segments which configure `can_start_end_non_code` for which not check is conducted. TODO: Check whether it's only `can_start_end_non_code` is only set for FileSegment, in which case - take away the config and just override this method for that segment. """ if self.can_start_end_non_code: return None if not self.segments: # pragma: no cover return None assert self._is_code_or_meta(self.segments[0]), ( f"Segment {self} starts with whitespace segment: " f"{self.segments[0].raw!r}.\n{self.segments!r}" ) assert self._is_code_or_meta(self.segments[-1]), ( f"Segment {self} ends with whitespace segment: " f"{self.segments[-1].raw!r}.\n{self.segments!r}" ) def validate_segment_with_reparse( self, dialect: "Dialect", ) -> bool: """Checks correctness of new segment by re-parsing it.""" ctx = ParseContext(dialect=dialect) # We're going to check the rematch without any metas because the # matching routines will assume they haven't already been added. # We also strip any non-code from the ends which might have moved. raw_content = tuple(s for s in self.raw_segments if not s.is_meta) _, trimmed_content, _ = trim_non_code_segments(raw_content) if not trimmed_content and self.can_start_end_non_code: # Edge case for empty segments which are allowed to be empty. return True rematch = self.match(trimmed_content, 0, ctx) if not rematch.matched_slice == slice(0, len(trimmed_content)): linter_logger.debug( f"Validation Check Fail for {self}.Incomplete Match. " f"\nMatched: {rematch.apply(trimmed_content)}. " f"\nUnmatched: {trimmed_content[rematch.matched_slice.stop:]}." ) return False opening_unparsables = set(self.recursive_crawl("unparsable")) closing_unparsables: Set[BaseSegment] = set() new_segments = rematch.apply(trimmed_content) for seg in new_segments: closing_unparsables.update(seg.recursive_crawl("unparsable")) # Check we don't introduce any _additional_ unparsables. # Pre-existing unparsables are ok, and for some rules that's as # designed. The idea is that we shouldn't make the situation _worse_. if opening_unparsables >= closing_unparsables: return True linter_logger.debug( f"Validation Check Fail for {self}.\nFound additional Unparsables: " f"{closing_unparsables - opening_unparsables}" ) for unparsable in closing_unparsables - opening_unparsables: linter_logger.debug(f"Unparsable:\n{unparsable.stringify()}\n") return False @staticmethod def _log_apply_fixes_check_issue( message: str, *args: Any ) -> None: # pragma: no cover linter_logger.critical(message, exc_info=True, *args) def edit( self, raw: Optional[str] = None, source_fixes: Optional[List[SourceFix]] = None ) -> BaseSegment: """Stub.""" raise NotImplementedError() class UnparsableSegment(BaseSegment): """This is a segment which can't be parsed. It indicates a error during parsing.""" type = "unparsable" # From here down, comments are printed separately. comment_separate = True # Unparsable segments could contain anything. can_start_end_non_code = True _expected = "" def __init__( self, segments: Tuple[BaseSegment, ...], pos_marker: Optional[PositionMarker] = None, expected: str = "", ) -> None: self._expected = expected super().__init__(segments=segments, pos_marker=pos_marker) def _suffix(self) -> str: """Return any extra output required at the end when logging. NB Override this for specific subclasses if we want extra output. """ return f"!! Expected: {self._expected!r}" def iter_unparsables(self) -> Iterator["UnparsableSegment"]: """Iterate through any unparsables. As this is an unparsable, it should yield itself. """ yield self sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/bracketed.py000066400000000000000000000060101451700765000243250ustar00rootroot00000000000000"""The BracketedSegment.""" from typing import TYPE_CHECKING, Optional, Sequence, Set, Tuple from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments.base import BaseSegment if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.types import SimpleHintType class BracketedSegment(BaseSegment): """A segment containing a bracketed expression.""" type = "bracketed" additional_kwargs = ["start_bracket", "end_bracket"] def __init__( self, segments: Tuple["BaseSegment", ...], # These are tuples of segments but we're expecting them to # be tuples of length 1. This is because we'll almost always # be doing tuple arithmetic with the results and constructing # 1-tuples on the fly is very easy to misread. start_bracket: Tuple[BaseSegment], end_bracket: Tuple[BaseSegment], pos_marker: Optional[PositionMarker] = None, uuid: Optional[int] = None, ): """Stash the bracket segments for later.""" if not start_bracket or not end_bracket: # pragma: no cover raise ValueError( "Attempted to construct Bracketed segment without specifying brackets." ) self.start_bracket = start_bracket self.end_bracket = end_bracket super().__init__(segments=segments, pos_marker=pos_marker, uuid=uuid) @classmethod def simple( cls, parse_context: ParseContext, crumbs: Optional[Tuple[str, ...]] = None ) -> Optional["SimpleHintType"]: """Simple methods for bracketed and the persistent brackets.""" start_brackets = [ start_bracket for _, start_bracket, _, persistent in parse_context.dialect.bracket_sets( "bracket_pairs" ) if persistent ] simple_raws: Set[str] = set() for ref in start_brackets: bracket_simple = parse_context.dialect.ref(ref).simple( parse_context, crumbs=crumbs ) assert bracket_simple, "All bracket segments must support simple." assert bracket_simple[0], "All bracket segments must support raw simple." # NOTE: By making this assumption we don't have to handle the "typed" # simple here. simple_raws.update(bracket_simple[0]) return frozenset(simple_raws), frozenset() @classmethod def match( cls, segments: Sequence["BaseSegment"], idx: int, parse_context: "ParseContext", ) -> MatchResult: """Only useful as a terminator. NOTE: Coverage of this method is poor, because in typical use as a terminator - the `.simple()` method covers everything we need. """ if isinstance(segments[idx], cls): # pragma: no cover return MatchResult(slice(idx, idx + 1)) return MatchResult.empty_at(idx) sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/common.py000066400000000000000000000056621451700765000237050ustar00rootroot00000000000000"""Common segment types used as building blocks of dialects. The expectation for these segments is that they have no additional logic (or very minimal logic). """ from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.raw import RawSegment class CodeSegment(RawSegment): """An alias for RawSegment. This has a more explicit name for segment creation. """ pass class UnlexableSegment(CodeSegment): """A placeholder to unlexable sections. This otherwise behaves exactly like a code section. """ type = "unlexable" class CommentSegment(RawSegment): """Segment containing a comment.""" type = "comment" _is_code = False _is_comment = True class WhitespaceSegment(RawSegment): """Segment containing whitespace.""" type = "whitespace" _is_whitespace = True _is_code = False _is_comment = False _default_raw = " " class NewlineSegment(RawSegment): """Segment containing a newline. NOTE: NewlineSegment does not inherit from WhitespaceSegment. Therefore NewlineSegment.is_type('whitespace') returns False. This is intentional and convenient for rules. If users want to match on both, call .is_type('whitespace', 'newline') """ type = "newline" _is_whitespace = True _is_code = False _is_comment = False _default_raw = "\n" class SymbolSegment(CodeSegment): """A segment used for matching single entities which aren't keywords. We rename the segment class here so that descendants of _ProtoKeywordSegment can use the same functionality but don't end up being labelled as a `keyword` later. """ type = "symbol" class IdentifierSegment(CodeSegment): """An identifier segment. Defined here for type inheritance. """ type = "identifier" class LiteralSegment(CodeSegment): """A literal segment. Defined here for type inheritance. """ type = "literal" class BinaryOperatorSegment(CodeSegment): """A binary operator segment. Defined here for type inheritance. Inherits from RawSegment. """ type = "binary_operator" class CompositeBinaryOperatorSegment(BaseSegment): """A composite binary operator segment. Defined here for type inheritance. Inherits from BaseSegment. """ type = "binary_operator" class ComparisonOperatorSegment(CodeSegment): """A comparison operator segment. Defined here for type inheritance. Inherits from RawSegment. """ type = "comparison_operator" class CompositeComparisonOperatorSegment(BaseSegment): """A comparison operator segment. Defined here for type inheritance. Inherits from BaseSegment. """ type = "comparison_operator" class WordSegment(CodeSegment): """A generic (likely letters only) segment. Defined here for type inheritance. This is the base segment for things like keywords and naked identifiers. """ type = "word" sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/file.py000066400000000000000000000075501451700765000233320ustar00rootroot00000000000000"""Definition of the BaseFileSegment.""" from abc import abstractmethod from typing import Optional, Set, Tuple from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments.base import BaseSegment, UnparsableSegment class BaseFileSegment(BaseSegment): """A segment representing a whole file or script. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. """ type = "file" # The file segment is the only one which can start or end with non-code can_start_end_non_code = True # A file can be empty! allow_empty = True def __init__( self, segments: Tuple[BaseSegment, ...], pos_marker: Optional[PositionMarker] = None, fname: Optional[str] = None, ): self._file_path = fname super().__init__(segments, pos_marker=pos_marker) @property def file_path(self) -> Optional[str]: """File path of a parsed SQL file.""" return self._file_path @abstractmethod def get_table_references(self) -> Set[str]: """Use parsed tree to extract table references.""" @classmethod def root_parse( cls, segments: Tuple[BaseSegment, ...], parse_context: ParseContext, fname: Optional[str] = None, ) -> "BaseFileSegment": """This is the entry method into parsing a file lexed segments. For single pass matching, this trims any non code off the start, matches the middle and then trims the end. Anything unexpected at the end is regarded as unparsable. """ # Trim the start _start_idx = 0 for _start_idx in range(len(segments)): if segments[_start_idx].is_code: break # Trim the end _end_idx = len(segments) for _end_idx in range(len(segments), _start_idx - 1, -1): if segments[_end_idx - 1].is_code: break if _start_idx == _end_idx: # Return just a file of non-code segments. return cls(segments, fname=fname) # Match the middle assert not hasattr( cls, "parse_grammar" ), "`parse_grammar` is deprecated on FileSegment." assert cls.match_grammar # Set up the progress bar for parsing. _final_seg = segments[-1] assert _final_seg.pos_marker _closing_position = _final_seg.pos_marker.templated_slice.stop with parse_context.progress_bar(_closing_position): # NOTE: Don't call .match() on the segment class itself, but go # straight to the match grammar inside. match = cls.match_grammar.match( segments[:_end_idx], _start_idx, parse_context ) parse_context.logger.info("Root Match:\n%s", match.stringify()) _matched = match.apply(segments) _unmatched = segments[match.matched_slice.stop : _end_idx] content: Tuple[BaseSegment, ...] if not match: content = ( UnparsableSegment( segments[_start_idx:_end_idx], expected=str(cls.match_grammar) ), ) elif _unmatched: _idx = 0 for _idx in range(len(_unmatched)): if _unmatched[_idx].is_code: break content = ( _matched + _unmatched[:_idx] + ( UnparsableSegment( _unmatched[_idx:], expected="Nothing else in FileSegment." ), ) ) else: content = _matched + _unmatched return cls( segments[:_start_idx] + content + segments[_end_idx:], fname=fname, ) sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/generator.py000066400000000000000000000020431451700765000243710ustar00rootroot00000000000000"""A Segment Generator. Used to create Segments upon calling the expand function first. Helpful when using the sets attribute of the dialect. """ from typing import TYPE_CHECKING, Callable if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.parser.matchable import Matchable class SegmentGenerator: """Defines a late-bound dialect object. It returns a single dialect object on expansion. These are defined using a callable, which is only called once everything else is defined. Very useful for template inheritance. """ def __init__(self, func: Callable[["Dialect"], "Matchable"]) -> None: self.func = func # For all functions, use the function call def expand(self, dialect: "Dialect") -> "Matchable": """Expand this object into its true dialect object. The inner function is passed an instance of the current dialect and so has access to the current sets of that dialect. """ return self.func(dialect) sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/keyword.py000066400000000000000000000036141451700765000240740ustar00rootroot00000000000000"""The KeywordSegment class.""" from typing import List, Optional, Tuple from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments.base import SourceFix from sqlfluff.core.parser.segments.common import WordSegment class KeywordSegment(WordSegment): """A segment used for matching single words. We rename the segment class here so that descendants of _ProtoKeywordSegment can use the same functionality but don't end up being labelled as a `keyword` later. """ type = "keyword" def __init__( self, raw: Optional[str] = None, pos_marker: Optional[PositionMarker] = None, instance_types: Tuple[str, ...] = (), source_fixes: Optional[List[SourceFix]] = None, trim_chars: Optional[Tuple[str, ...]] = None, ): """If no other name is provided we extrapolate it from the raw.""" super().__init__( raw=raw, pos_marker=pos_marker, instance_types=instance_types, source_fixes=source_fixes, ) def edit( self, raw: Optional[str] = None, source_fixes: Optional[List[SourceFix]] = None ) -> "KeywordSegment": """Create a new segment, with exactly the same position but different content. Returns: A copy of this object with new contents. Used mostly by fixes. NOTE: This *doesn't* copy the uuid. The edited segment is a new segment. """ return self.__class__( raw=raw or self.raw, pos_marker=self.pos_marker, instance_types=self.instance_types, source_fixes=source_fixes or self.source_fixes, ) class LiteralKeywordSegment(KeywordSegment): """A keyword style literal segment. This should be used for things like NULL, NAN, TRUE & FALSE. Defined here for type inheritance. """ type = "literal" sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/meta.py000066400000000000000000000223401451700765000233330ustar00rootroot00000000000000"""Indent and Dedent classes.""" from typing import List, Optional, Sequence, Tuple from uuid import UUID from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.raw import RawSegment, SourceFix from sqlfluff.core.templaters.base import TemplatedFile class MetaSegment(RawSegment): """A segment which is empty but indicates where something should be.""" type = "meta" _is_code = False _template = "" indent_val = 0 # Implicit indents are to be considered _taken_ unless # closed on the same line. is_implicit = False is_meta = True _preface_modifier = "[META] " def __init__( self, pos_marker: Optional[PositionMarker] = None, is_template: bool = False, block_uuid: Optional[UUID] = None, source_fixes: Optional[List[SourceFix]] = None, ): """Constructor for MetaSegment. Args: pos_marker (:obj:`PositionMarker`, optional): The position of the segment. is_template (:obj:`bool`, optional): A flag to indicate whether this meta segment is related to a templated section. This allows proper handling. block_uuid (:obj:`UUID`, optional): A reference to link together markers which refer to the same structure in a template (e.g. the beginning and end of an if statement). source_fixes: (:obj:`list` of :obj:`SourceFix`, optional): A list of any source fixes to apply to this segment. """ super().__init__(pos_marker=pos_marker, source_fixes=source_fixes) self.is_template = is_template self.block_uuid = block_uuid def _suffix(self) -> str: """Return any extra output required at the end when logging. Meta classes have not much to say here so just stay blank. """ return "" @classmethod def match( cls, segments: Sequence["BaseSegment"], idx: int, parse_context: ParseContext ) -> MatchResult: # pragma: no cover """This will never be called. If it is then we're using it wrong.""" raise NotImplementedError( "{} has no match method, it should only be used in a Sequence!".format( cls.__name__ ) ) @classmethod def simple( cls, parse_context: ParseContext, crumbs: Optional[Tuple[str, ...]] = None ) -> None: """Does this matcher support an uppercase hash matching route? This should be true if the MATCH grammar is simple. Most more complicated segments will be assumed to overwrite this method if they wish to be considered simple. """ return None class EndOfFile(MetaSegment): """A meta segment to indicate the end of the file.""" type = "end_of_file" class TemplateLoop(MetaSegment): """A meta segment to indicate the presence of a backward template jump. More specifically these indicate the presence of where there is a placeholder in the source, but in the templated file we don't have one _yet_ because we're going back for another pass around a loop. These are particularly useful for any rules concernced with layout, because and indented TemplateLoop is allowable, but without the marker we would just see trailing whitespace. """ type = "template_loop" class Indent(MetaSegment): """A segment which is empty but indicates where an indent should be. This segment is always empty, i.e. its raw format is '', but it indicates the position of a theoretical indent which will be used in linting and reconstruction. Even if there is an *actual indent* that occurs in the same place this intentionally *won't* capture it, they will just be compared later. """ type = "indent" indent_val = 1 def _suffix(self) -> str: """If present, output the block uuid.""" return f"[Block: {self.block_uuid.hex[:6]!r}]" if self.block_uuid else "" class ImplicitIndent(Indent): """A variant on the indent, that is considered *taken* unless closed in line. This is primarily for facilitating constructions which behave a little like hanging indents, without the complicated indentation spacing. .. code-block:: sql SELECT * FROM foo WHERE a -- The theoretical indent between WHERE and "a" is implicit. AND b """ _preface_modifier = "[META] (implicit) " is_implicit = True class Dedent(Indent): """A segment which is empty but indicates where an dedent should be. This segment is always empty, i.e. its raw format is '', but it indicates the position of a theoretical dedent which will be used in linting and reconstruction. Even if there is an *actual dedent* that occurs in the same place this intentionally *won't* capture it, they will just be compared later. """ type = "dedent" indent_val = -1 class TemplateSegment(MetaSegment): """A segment which is empty but indicates where something should be. This segment is always empty, i.e. its raw format is '', but it indicates the position of an element on a line which has been removed. This is used to record the position of template blocks, so that their indents are not removed during linting. This is used to hold a reference point for code from the source file which is removed in the templated version such as loop blocks or comments. On initialisation we optionally accept the source string as a kwarg in case rules want to lint this down the line. """ type = "placeholder" def __init__( self, pos_marker: Optional[PositionMarker] = None, source_str: str = "", block_type: str = "", source_fixes: Optional[List[SourceFix]] = None, block_uuid: Optional[UUID] = None, ): """Initialise a placeholder with the source code embedded.""" # NOTE: Empty string is ok, None is not. if source_str is None: # pragma: no cover raise ValueError("Cannot instantiate TemplateSegment without a source_str.") self.source_str = source_str self.block_type = block_type # Call the super of the pos_marker. super().__init__( pos_marker=pos_marker, source_fixes=source_fixes, block_uuid=block_uuid ) def _suffix(self) -> str: """Also output what it's a placeholder for.""" return ( f"[Type: {self.block_type!r}, Raw: {self.source_str!r}" + (f", Block: {self.block_uuid.hex[:6]!r}" if self.block_uuid else "") + "]" ) @classmethod def from_slice( cls, source_slice: slice, templated_slice: slice, block_type: str, templated_file: TemplatedFile, block_uuid: Optional[UUID] = None, ) -> "TemplateSegment": """Construct template segment from slice of a source file.""" pos_marker = PositionMarker( source_slice, templated_slice, templated_file, ) return cls( pos_marker=pos_marker, source_str=templated_file.source_str[source_slice], block_type=block_type, block_uuid=block_uuid, ) def to_tuple( self, code_only: bool = False, show_raw: bool = False, include_meta: bool = False, ) -> Tuple[str, str]: """Return a tuple structure from this segment. Unlike most segments, we return the _source_ content for placeholders if viewing metas is allowed. This allows verification of the content of those placeholders for inspection or debugging. NOTE: This method does not use the `include_meta` argument. This method relies on any parent segment to do filtering associated with whether to include or not include meta segments. """ return (self.get_type(), self.source_str) def edit( self, raw: Optional[str] = None, source_fixes: Optional[List[SourceFix]] = None, source_str: Optional[str] = None, ) -> MetaSegment: """Create a new segment, with exactly the same position but different content. Returns: A copy of this object with new contents. Used mostly by fixes. NOTE: This *doesn't* copy the uuid. The edited segment is a new segment. """ if raw: raise ValueError( "Cannot set raw of a template placeholder!" ) # pragma: no cover if source_fixes or self.source_fixes: sf = (source_fixes or []) + (self.source_fixes + []) else: # pragma: no cover # There's _usually_ a source fix if we're editing a templated # segment - but not necessarily guaranteed. sf = None return self.__class__( pos_marker=self.pos_marker, source_str=source_str if source_str is not None else self.source_str, block_type=self.block_type, source_fixes=sf, block_uuid=self.block_uuid, ) sqlfluff-2.3.5/src/sqlfluff/core/parser/segments/raw.py000066400000000000000000000170661451700765000232070ustar00rootroot00000000000000"""Raw segment definitions. This is designed to be the root segment, without any children, and the output of the lexer. """ from typing import Any, FrozenSet, List, Optional, Tuple from uuid import uuid4 from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments.base import BaseSegment, SourceFix class RawSegment(BaseSegment): """This is a segment without any subsegments.""" type = "raw" _is_code = True _is_comment = False _is_whitespace = False # Classes inheriting from RawSegment may provide a _default_raw # to enable simple initialisation. _default_raw = "" def __init__( self, raw: Optional[str] = None, pos_marker: Optional[PositionMarker] = None, # For legacy and syntactic sugar we allow the simple # `type` argument here, but for more precise inheritance # we suggest using the `instance_types` option. type: Optional[str] = None, instance_types: Tuple[str, ...] = (), trim_start: Optional[Tuple[str, ...]] = None, trim_chars: Optional[Tuple[str, ...]] = None, source_fixes: Optional[List[SourceFix]] = None, uuid: Optional[int] = None, ): """Initialise raw segment. If raw is not provided, we default to _default_raw if present. If pos_marker is not provided, it is assume that this will be inserted later as part of a reposition phase. """ if raw is not None: # NB, raw *can* be an empty string and be valid self._raw = raw else: self._raw = self._default_raw self._raw_upper = self._raw.upper() # pos marker is required here. We ignore the typing initially # because it might *initially* be unset, but it will be reset # later. self.pos_marker: PositionMarker = pos_marker # type: ignore # Set the segments attribute to be an empty tuple. self.segments = () self.instance_types: Tuple[str, ...] if type: assert not instance_types, "Cannot set `type` and `instance_types`." self.instance_types = (type,) else: self.instance_types = instance_types # What should we trim off the ends to get to content self.trim_start = trim_start self.trim_chars = trim_chars # Keep track of any source fixes self._source_fixes = source_fixes # UUID for matching (the int attribute of it) self.uuid = uuid or uuid4().int self.representation = "<{}: ({}) {!r}>".format( self.__class__.__name__, self.pos_marker, self.raw ) def __repr__(self) -> str: # This is calculated at __init__, because all elements are immutable # and this was previously recalculating the pos marker, # and became very expensive return self.representation def __setattr__(self, key: str, value: Any) -> None: """Overwrite BaseSegment's __setattr__ with BaseSegment's superclass.""" super(BaseSegment, self).__setattr__(key, value) # ################ PUBLIC PROPERTIES @property def is_code(self) -> bool: """Return True if this segment is code.""" return self._is_code @property def is_comment(self) -> bool: """Return True if this segment is a comment.""" return self._is_comment @property def is_whitespace(self) -> bool: """Return True if this segment is whitespace.""" return self._is_whitespace @property def raw(self) -> str: """Returns the raw segment.""" return self._raw @property def raw_upper(self) -> str: """Returns the raw segment in uppercase.""" return self._raw_upper @property def raw_segments(self) -> List["RawSegment"]: """Returns self to be compatible with calls to its superclass.""" return [self] @property def class_types(self) -> FrozenSet[str]: """The set of full types for this segment, including inherited. Add the surrogate type for raw segments. """ return frozenset(self.instance_types) | super().class_types @property def source_fixes(self) -> List[SourceFix]: """Return any source fixes as list.""" return self._source_fixes or [] # ################ INSTANCE METHODS def invalidate_caches(self) -> None: """Overwrite superclass functionality.""" pass def get_type(self) -> str: """Returns the type of this segment as a string.""" if self.instance_types: return self.instance_types[0] return super().get_type() def is_type(self, *seg_type: str) -> bool: """Extend the parent class method with the surrogate types.""" if set(self.instance_types).intersection(seg_type): return True return self.class_is_type(*seg_type) def get_raw_segments(self) -> List["RawSegment"]: """Iterate raw segments, mostly for searching.""" return [self] def raw_trimmed(self) -> str: """Return a trimmed version of the raw content. Returns: str: The trimmed version of the raw content. """ raw_buff = self.raw if self.trim_start: for seq in self.trim_start: if raw_buff.startswith(seq): raw_buff = raw_buff[len(seq) :] if self.trim_chars: raw_buff = self.raw # for each thing to trim for seq in self.trim_chars: # trim start while raw_buff.startswith(seq): raw_buff = raw_buff[len(seq) :] # trim end while raw_buff.endswith(seq): raw_buff = raw_buff[: -len(seq)] return raw_buff return raw_buff def stringify( self, ident: int = 0, tabsize: int = 4, code_only: bool = False ) -> str: """Use indentation to render this segment and its children as a string. Args: ident (int, optional): The indentation level. Defaults to 0. tabsize (int, optional): The size of each tab. Defaults to 4. code_only (bool, optional): Whether to render only the code. Defaults to False. Returns: str: The rendered string. """ preface = self._preface(ident=ident, tabsize=tabsize) return preface + "\n" def _suffix(self) -> str: """Return any extra output required at the end when logging. NB Override this for specific subclasses if we want extra output. Returns: str: The extra output. """ return f"{self.raw!r}" def edit( self, raw: Optional[str] = None, source_fixes: Optional[List[SourceFix]] = None ) -> "RawSegment": """Create a new segment, with exactly the same position but different content. Args: raw (Optional[str]): The new content for the segment. source_fixes (Optional[List[SourceFix]]): A list of fixes to be applied to the segment. Returns: RawSegment: A copy of this object with new contents. Used mostly by fixes. NOTE: This *doesn't* copy the uuid. The edited segment is a new segment. """ return self.__class__( raw=raw or self.raw, pos_marker=self.pos_marker, instance_types=self.instance_types, trim_start=self.trim_start, trim_chars=self.trim_chars, source_fixes=source_fixes or self.source_fixes, ) sqlfluff-2.3.5/src/sqlfluff/core/parser/types.py000066400000000000000000000044231451700765000217260ustar00rootroot00000000000000"""Complex Type helpers.""" from enum import Enum from typing import TYPE_CHECKING, FrozenSet, Optional, Tuple, Union if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.matchable import Matchable from sqlfluff.core.parser.segments.generator import SegmentGenerator # When defining elements of a dialect they can be matchables or generators. DialectElementType = Union["Matchable", "SegmentGenerator"] # Simple hints has a set of strings first and a set of types second. SimpleHintType = Optional[Tuple[FrozenSet[str], FrozenSet[str]]] # The content type of the set of bracket pairs. # bracket_type, start_ref, end_ref, persists BracketPairTuple = Tuple[str, str, str, bool] # Define the potential parse modes. These are used in grammars # to define how greedy they are in claiming unmatched segments. # While the default is to only claim what they can match this # can make pinpointing unparsable sections very difficult. By # occasionally allowing more eager matching (for example in the # content of bracketed expressions), we can provide more helpful # feedback to the user. ParseMode = Enum( "ParseMode", [ # Strict only returns a match if the full content matches. # i.e. if it's not a successful match, then don't return _any_ # match and never raise unparsable sections. # NOTE: This is the default for all grammars. "STRICT", # Greedy will always return a match, providing there is at least # one code element before a terminators. Terminators are not included # in the match, but are searched for before matching any content. Segments # which are part of any terminator (or beyond) are not available for # matching by any content. # NOTE: This replicates the `GreedyUntil` semantics. "GREEDY", # Optionally, a variant on "GREEDY", will return behave like "STRICT" # if nothing matches, but behaves like "GREEDY" once something has # matched. # NOTE: This replicates the `StartsWith` semantics. "GREEDY_ONCE_STARTED", # TODO: All of the existing modes here match terminators _before_ # matching the majority of content. While that is safer, there should # be room for more efficient parsing modes in some cases. ], ) sqlfluff-2.3.5/src/sqlfluff/core/plugin/000077500000000000000000000000001451700765000202075ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/plugin/__init__.py000066400000000000000000000006101451700765000223150ustar00rootroot00000000000000"""Marker to be imported and used in plugins (and for own implementations).""" from typing import Any, Callable, TypeVar, cast import pluggy # Improvement suggested by @oremanj on python/typing gitter F = TypeVar("F", bound=Callable[..., Any]) project_name = "sqlfluff" plugin_base_name = f"{project_name}-plugin" hookimpl = cast(Callable[[F], F], pluggy.HookimplMarker(plugin_base_name)) sqlfluff-2.3.5/src/sqlfluff/core/plugin/hookspecs.py000066400000000000000000000012241451700765000225560ustar00rootroot00000000000000"""Defines the specification to implement a plugin.""" from abc import abstractmethod import pluggy from sqlfluff.core.plugin import plugin_base_name hookspec = pluggy.HookspecMarker(plugin_base_name) class PluginSpec: """Defines the method signatures for plugin implementations.""" @hookspec @abstractmethod def get_rules(self): """Get plugin rules.""" @hookspec @abstractmethod def load_default_config(self) -> dict: """Loads the default configuration for the plugin.""" @hookspec @abstractmethod def get_configs_info(self) -> dict: """Get rule config validations and descriptions.""" sqlfluff-2.3.5/src/sqlfluff/core/plugin/host.py000066400000000000000000000043361451700765000215440ustar00rootroot00000000000000"""Defines the plugin manager getter. NOTE: The plugin manager will load all of the plugins on the first pass. Each plugin will also load the plugin manager on load to register themselves. To ensure this is as performant as possible, we cache the plugin manager within the context of each thread. """ from contextvars import ContextVar from typing import Optional import pluggy from sqlfluff.core.plugin import plugin_base_name, project_name from sqlfluff.core.plugin.hookspecs import PluginSpec _plugin_manager: ContextVar[Optional[pluggy.PluginManager]] = ContextVar( "_plugin_manager", default=None ) plugins_loaded: ContextVar[bool] = ContextVar("plugins_loaded", default=False) # NOTE: The is_main_process context var is defined here, but # we rely on each parallel runner (found in `runner.py`) to # maintain the value of this variable. is_main_process: ContextVar[bool] = ContextVar("is_main_process", default=True) def get_plugin_manager() -> pluggy.PluginManager: """Initializes the PluginManager. NOTE: We cache the plugin manager as a global to avoid reloading all the plugins each time. """ plugin_manager = _plugin_manager.get() if plugin_manager: return plugin_manager plugin_manager = pluggy.PluginManager(plugin_base_name) plugin_manager.add_hookspecs(PluginSpec) # NOTE: We set the plugin manager before loading the # entrypoints. This is because when we load the entry # points, this function gets called again - and we only # want to load the entry points once! _plugin_manager.set(plugin_manager) plugin_manager.load_setuptools_entrypoints(project_name) # Once plugins are loaded we set a second context var # to indicate that loading is complete. Other parts of # the codebase can use this to detect whether it's safe. plugins_loaded.set(True) return plugin_manager def purge_plugin_manager() -> None: """Purge the current loaded plugin manager. NOTE: This method should not be used in normal SQFluff operation, but exists so that in the test suite we can reliably clear the cached plugin manager and force plugins to be reload. """ # Reset back to defaults. _plugin_manager.set(None) plugins_loaded.set(False) sqlfluff-2.3.5/src/sqlfluff/core/plugin/lib.py000066400000000000000000000024061451700765000213310ustar00rootroot00000000000000"""Base implementation for the plugin.""" from typing import Any, Dict, List, Type from sqlfluff.core.config import ConfigLoader from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule from sqlfluff.core.rules.config_info import STANDARD_CONFIG_INFO_DICT from sqlfluff.core.rules.loader import get_rules_from_path from sqlfluff.core.templaters import RawTemplater, core_templaters @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: All standard rules will eventually be loaded as plugins and so before 2.0.0, once all legacy plugin definitions are migrated, this function will be amended to return no rules. """ return get_rules_from_path() @hookimpl def get_templaters() -> List[Type[RawTemplater]]: """Get templaters.""" templaters = list(t for t in core_templaters()) return templaters @hookimpl def load_default_config() -> Dict[str, Any]: """Loads the default configuration for the plugin.""" return ConfigLoader.get_global().load_config_resource( package="sqlfluff.core", file_name="default_config.cfg", ) @hookimpl def get_configs_info() -> Dict[str, Any]: """Get rule config validations and descriptions.""" return STANDARD_CONFIG_INFO_DICT sqlfluff-2.3.5/src/sqlfluff/core/rules/000077500000000000000000000000001451700765000200435ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/rules/__init__.py000066400000000000000000000024321451700765000221550ustar00rootroot00000000000000"""Configuration and examples for individual rules.""" from sqlfluff.core.plugin.host import get_plugin_manager from sqlfluff.core.rules.base import ( BaseRule, EvalResultType, LintFix, LintResult, RuleGhost, RulePack, RuleSet, ) from sqlfluff.core.rules.config_info import STANDARD_CONFIG_INFO_DICT from sqlfluff.core.rules.context import RuleContext def _load_standard_rules() -> RuleSet: """Initialise the standard ruleset. We do this on each call so that dynamic rules changes are possible. """ std_rule_set = RuleSet(name="standard", config_info=STANDARD_CONFIG_INFO_DICT) # Iterate through the rules list and register each rule with the standard set. for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: std_rule_set.register(rule) return std_rule_set def get_ruleset(name: str = "standard") -> RuleSet: """Get a ruleset by name.""" std_rules = _load_standard_rules() lookup = {std_rules.name: std_rules} # Return a copy in case someone modifies the register. return lookup[name].copy() __all__ = ( "get_ruleset", "RuleSet", "RulePack", "BaseRule", "LintResult", "LintFix", "RuleContext", "RuleGhost", "EvalResultType", ) sqlfluff-2.3.5/src/sqlfluff/core/rules/base.py000066400000000000000000001371451451700765000213420ustar00rootroot00000000000000"""Implements the base rule class. Rules crawl through the trees returned by the parser and evaluate particular rules. The intent is that it should be possible for the rules to be expressed as simply as possible, with as much of the complexity abstracted away. The evaluation function should take enough arguments that it can evaluate the position of the given segment in relation to its neighbors, and that the segment which finally "triggers" the error, should be the one that would be corrected OR if the rule relates to something that is missing, then it should flag on the segment FOLLOWING, the place that the desired element is missing. """ import bdb import copy import fnmatch import logging import pathlib import re from collections import defaultdict, namedtuple from dataclasses import dataclass from typing import ( TYPE_CHECKING, Any, DefaultDict, Dict, Iterator, List, Optional, Sequence, Set, Tuple, Type, Union, ) import regex from sqlfluff.core.errors import SQLFluffUserError, SQLLintError from sqlfluff.core.helpers.string import split_comma_separated_string from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.plugin.host import is_main_process, plugins_loaded from sqlfluff.core.rules.config_info import get_config_info from sqlfluff.core.rules.context import RuleContext from sqlfluff.core.rules.crawlers import BaseCrawler from sqlfluff.core.rules.fix import LintFix from sqlfluff.core.templaters.base import TemplatedFile # Best solution for generic types on older python versions # https://github.com/python/typeshed/issues/7855 if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.config import FluffConfig from sqlfluff.core.dialects import Dialect from sqlfluff.core.plugin.hookspecs import PluginSpec from sqlfluff.core.rules.noqa import IgnoreMask _LoggerAdapter = logging.LoggerAdapter[logging.Logger] else: _LoggerAdapter = logging.LoggerAdapter # The ghost of a rule (mostly used for testing) RuleGhost = namedtuple("RuleGhost", ["code", "name", "description"]) # Instantiate the rules logger rules_logger = logging.getLogger("sqlfluff.rules") linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") class RuleLoggingAdapter(_LoggerAdapter): """A LoggingAdapter for rules which adds the code of the rule to it.""" def process(self, msg: str, kwargs: Any) -> Tuple[str, Any]: """Add the code element to the logging message before emit.""" return "[{}] {}".format(self.extra["code"] if self.extra else "", msg), kwargs class LintResult: """A class to hold the results of a rule evaluation. Args: anchor (:obj:`BaseSegment`, optional): A segment which represents the *position* of the problem. NB: Each fix will also hold its own reference to position, so this position is mostly for alerting the user to where the *problem* is. fixes (:obj:`list` of :obj:`LintFix`, optional): An array of any fixes which would correct this issue. If not present then it's assumed that this issue will have to manually fixed. memory (:obj:`dict`, optional): An object which stores any working memory for the rule. The `memory` returned in any `LintResult` will be passed as an input to the next segment to be crawled. description (:obj:`str`, optional): A description of the problem identified as part of this result. This will override the description of the rule as what gets reported to the user with the problem if provided. source (:obj:`str`, optional): A string identifier for what generated the result. Within larger libraries like reflow this can be useful for tracking where a result came from. """ def __init__( self, anchor: Optional[BaseSegment] = None, fixes: Optional[List["LintFix"]] = None, memory: Optional[Any] = None, description: Optional[str] = None, source: Optional[str] = None, ): # An anchor of none, means no issue self.anchor = anchor # Fixes might be blank self.fixes = fixes or [] # Memory is passed back in the linting result self.memory = memory # store a description_override for later self.description = description # Optional code for where the result came from self.source: str = source or "" def __repr__(self) -> str: if not self.anchor: return "LintResult()" # The "F" at the end is short for "fixes", to indicate how many there are. fix_coda = f"+{len(self.fixes)}F" if self.fixes else "" if self.description: if self.source: return ( f"LintResult({self.description} [{self.source}]" f": {self.anchor}{fix_coda})" ) return f"LintResult({self.description}: {self.anchor}{fix_coda})" return f"LintResult({self.anchor}{fix_coda})" def to_linting_error(self, rule: "BaseRule") -> Optional[SQLLintError]: """Convert a linting result to a :exc:`SQLLintError` if appropriate.""" if self.anchor: # Allow description override from the LintResult description = self.description or rule.description return SQLLintError( rule=rule, segment=self.anchor, fixes=self.fixes, description=description, ) return None EvalResultType = Union[LintResult, List[LintResult], None] class RuleMetaclass(type): """The metaclass for rules. This metaclass provides provides auto-enrichment of the rule docstring so that examples, groups, aliases and names are added. The reason we enrich the docstring is so that it can be picked up by autodoc and all be displayed in the sqlfluff docs. """ # Precompile the regular expressions _doc_search_regex = re.compile( "(\\s{4}\\*\\*Anti-pattern\\*\\*|\\s{4}\\.\\. note::|" "\\s\\s{4}\\*\\*Configuration\\*\\*)", flags=re.MULTILINE, ) _valid_classname_regex = regex.compile(r"Rule_?([A-Z]{1}[a-zA-Z]+)?_([A-Z0-9]{4})") _valid_rule_name_regex = regex.compile(r"[a-z][a-z\.\_]+") @staticmethod def _populate_code_and_description( name: str, class_dict: Dict[str, Any] ) -> Dict[str, Any]: """Extract and validate the rule code & description. We expect that rules are defined as classes with the name `Rule_XXXX` where `XXXX` is of the form `LLNN`, where L is a letter and N is a two digit number. For backward compatibility we also still support the legacy format of LNNN i.e. a single letter and three digit number. The two letters should be indicative of the grouping and focus of the rule. e.g. capitalisation rules have the code CP for CaPitalisation. If this receives classes by any other name, then it will raise a :exc:`ValueError`. """ rule_name_match = RuleMetaclass._valid_classname_regex.match(name) # Validate the name if not rule_name_match: # pragma: no cover raise SQLFluffUserError( f"Tried to define rule class with " f"unexpected format: {name}. Format should be: " "'Rule_PluginName_LL23' (for plugins) or " "`Rule_LL23` (for core rules)." ) plugin_name, code = rule_name_match.groups() # If the docstring is multiline, then we extract just summary. description = class_dict["__doc__"].replace("``", "'").split("\n")[0] if plugin_name: code = f"{plugin_name}_{code}" class_dict["code"] = code class_dict["description"] = description return class_dict @staticmethod def _populate_docstring(name: str, class_dict: Dict[str, Any]) -> Dict[str, Any]: """Enrich the docstring in the class_dict. This takes the various defined values in the BaseRule class and uses them to populate documentation in the final class docstring so that it can be displayed in the sphinx docs. """ # Ensure that there _is_ a docstring. assert ( "__doc__" in class_dict ), f"Tried to define rule {name!r} without docstring." # Build up a buffer of entries to add to the docstring. fix_docs = ( " This rule is ``sqlfluff fix`` compatible.\n\n" if class_dict.get("is_fix_compatible", False) else "" ) name_docs = ( f" **Name**: ``{class_dict['name']}``\n\n" if class_dict.get("name", "") else "" ) alias_docs = ( (" **Aliases**: ``" + "``, ``".join(class_dict["aliases"]) + "``\n\n") if class_dict.get("aliases", []) else "" ) groups_docs = ( (" **Groups**: ``" + "``, ``".join(class_dict["groups"]) + "``\n\n") if class_dict.get("groups", []) else "" ) config_docs = "" # NOTE: We should only validate and add config keywords # into the docstring if the plugin loading methods have # fully completed (i.e. plugins_loaded.get() is True). if name == "BaseRule" or not is_main_process.get(): # Except if it's the base rule, or we're not in the main process/thread # in which case we shouldn't try and alter the docstrings anyway. # NOTE: The order of imports within child threads/processes is less # controllable, and so we should just avoid checking whether plugins # are already loaded. pass elif not plugins_loaded.get(): # Show a warning if a plugin has their imports set up in a suboptimal # way. The example plugin imports the rules in both ways, to test the # triggering of this warning. rules_logger.warning( f"Rule {name!r} has been imported before all plugins " "have been fully loaded. For best performance, plugins " "should import any rule definitions within their `get_rules()` " "method. Please update your plugin to remove this warning. See: " "https://docs.sqlfluff.com/en/stable/developingplugins.html" ) elif class_dict.get("config_keywords", []): config_docs = "\n **Configuration**\n" config_info = get_config_info() for keyword in sorted(class_dict["config_keywords"]): try: info_dict = config_info[keyword] except KeyError: # pragma: no cover raise KeyError( "Config value {!r} for rule {} is not configured in " "`config_info`.".format(keyword, name) ) config_docs += "\n * ``{}``: {}".format( keyword, info_dict["definition"] ) if ( config_docs[-1] != "." and config_docs[-1] != "?" and config_docs[-1] != "\n" ): config_docs += "." if "validation" in info_dict: config_docs += " Must be one of ``{}``.".format( info_dict["validation"] ) config_docs += "\n" all_docs = fix_docs + name_docs + alias_docs + groups_docs + config_docs # Modify the docstring using the search regex. class_dict["__doc__"] = RuleMetaclass._doc_search_regex.sub( f"\n\n{all_docs}\n\n\\1", class_dict["__doc__"], count=1 ) # If the inserted string is not now in the docstring - append it on # the end. This just means the regex didn't find a better place to # put it. if all_docs not in class_dict["__doc__"]: class_dict["__doc__"] += f"\n\n{all_docs}" # Return the modified class_dict return class_dict def __new__( mcs, name: str, bases: List["BaseRule"], class_dict: Dict[str, Any], ) -> "RuleMetaclass": """Generate a new class.""" # Optionally, groups may be inherited. At this stage of initialisation # they won't have been. Check parent classes if they exist. # names, aliases and description are less appropriate to inherit. # NOTE: This applies in particular to CP02, which inherits all groups # from CP01. If we don't do this, those groups don't show in the docs. for base in reversed(bases): if "groups" in class_dict: break elif base.groups: class_dict["groups"] = base.groups break class_dict = RuleMetaclass._populate_docstring(name, class_dict) # Don't try and infer code and description for the base class if bases: class_dict = RuleMetaclass._populate_code_and_description(name, class_dict) # Validate rule names rule_name = class_dict.get("name", "") if rule_name: if not RuleMetaclass._valid_rule_name_regex.match(rule_name): raise SQLFluffUserError( f"Tried to define rule with unexpected " f"name format: {rule_name}. Rule names should be lowercase " "and snake_case with optional `.` characters to indicate " "a namespace or grouping. e.g. `layout.spacing`." ) # Use the stock __new__ method now we've adjusted the docstring. # There are no overload variants of type.__new__ that are compatible, so # we ignore type checking in this case. return super().__new__(mcs, name, bases, class_dict) # type: ignore class BaseRule(metaclass=RuleMetaclass): """The base class for a rule. Args: code (:obj:`str`): The identifier for this rule, used in inclusion or exclusion. description (:obj:`str`): A human readable description of what this rule does. It will be displayed when any violations are found. """ _check_docstring = True _works_on_unparsable = True _adjust_anchors = False targets_templated = False # Some fix routines do their own checking for whether their fixes # are safe around templated elements. For those - the default # safety checks might be inappropriate. In those cases, set # template_safe_fixes to True. template_safe_fixes = False # Config settings supported for this rule. # See config_info.py for supported values. config_keywords: List[str] = [] # Lint loop / crawl behavior. When appropriate, rules can (and should) # override these values to make linting faster. crawl_behaviour: BaseCrawler # Rules can override this to specify "post". "Post" rules are those that are # not expected to trigger any downstream rules, e.g. capitalization fixes. # They run on two occasions: # - On the first pass of the main phase # - In a second linter pass after the main phase lint_phase = "main" # Groups attribute to be overwritten. groups: Tuple[str, ...] = () # Name attribute to be overwritten. # NOTE: for backward compatibility we should handle the case # where no name is set gracefully. name: str = "" # Optional set of aliases for the rule. Most often used for old codes which # referred to this rule. aliases: Tuple[str, ...] = () # NOTE: code and description are provided here as hints, but should not # be set directly. They are set automatically by the metaclass based on # the class _name_ when defined. code: str description: str # Should we document this rule as fixable? Used by the metaclass to add # a line to the docstring. is_fix_compatible = False # Add comma separated string to Base Rule to ensure that it uses the same # Configuration that is defined in the Config.py file split_comma_separated_string = staticmethod(split_comma_separated_string) def __init__(self, code: str, description: str, **kwargs: Any) -> None: self.description = description self.code = code # kwargs represents the config passed to the rule. Add all kwargs as class # attributes so they can be accessed in rules which inherit from this class for key, value in kwargs.items(): self.__dict__[key] = value # We also define a custom logger here, which also includes the code # of the rule in the logging. self.logger = RuleLoggingAdapter(rules_logger, {"code": code}) # Validate that declared configuration options exist for keyword in self.config_keywords: if keyword not in kwargs.keys(): raise ValueError( ( "Unrecognized config '{}' for Rule {}. If this " "is a new option, please add it to " "`default_config.cfg`" ).format(keyword, code) ) @classmethod def get_config_ref(cls) -> str: """Return the config lookup ref for this rule. If a `name` is defined, it's the name - otherwise the code. The name is a much more understandable reference and so makes config files more readable. For backward compatibility however we also support the rule code for those without names. """ return cls.name if cls.name else cls.code def _eval(self, context: RuleContext) -> EvalResultType: """Evaluate this rule against the current context. This should indicate whether a linting violation has occurred and/or whether there is something to remember from this evaluation. Note that an evaluate function should always accept `**kwargs`, but if it relies on any available kwargs, it should explicitly call them out at definition. Returns: :obj:`LintResult`, list of :obj:`LintResult` or :obj:`None`. The reason that this method is called :meth:`_eval` and not `eval` is a bit of a hack with sphinx autodoc, to make it so that the rule documentation auto-generates nicely. """ raise NotImplementedError( ( "{} has not had its `eval` function defined. This is a problem " "with the rule setup." ).format(self.__class__.__name__) ) # pragma: no cover def crawl( self, tree: BaseSegment, dialect: "Dialect", fix: bool, templated_file: Optional["TemplatedFile"], ignore_mask: Optional["IgnoreMask"], fname: Optional[str], config: "FluffConfig", ) -> Tuple[ List[SQLLintError], Tuple[RawSegment, ...], List[LintFix], Optional[Dict[str, Any]], ]: """Run the rule on a given tree. Returns: A tuple of (vs, raw_stack, fixes, memory) """ root_context = RuleContext( dialect=dialect, fix=fix, templated_file=templated_file, path=pathlib.Path(fname) if fname else None, segment=tree, config=config, ) vs: List[SQLLintError] = [] fixes: List[LintFix] = [] # Propagates memory from one rule _eval() to the next. memory = root_context.memory context = root_context for context in self.crawl_behaviour.crawl(root_context): try: context.memory = memory res = self._eval(context=context) except (bdb.BdbQuit, KeyboardInterrupt): # pragma: no cover raise # Any exception at this point would halt the linter and # cause the user to get no results except Exception as e: # If a filename is present, include it in the critical exception. self.logger.critical( f"Applying rule {self.code} to {fname!r} threw an Exception: {e}" if fname else f"Applying rule {self.code} threw an Exception: {e}", exc_info=True, ) assert context.segment.pos_marker exception_line, _ = context.segment.pos_marker.source_position() self._log_critical_errors(e) vs.append( SQLLintError( rule=self, segment=context.segment, fixes=[], description=( f"Unexpected exception: {str(e)};\n" "Could you open an issue at " "https://github.com/sqlfluff/sqlfluff/issues ?\n" "You can ignore this exception for now, by adding " f"'-- noqa: {self.code}' at the end\n" f"of line {exception_line}\n" ), ) ) return vs, context.raw_stack, fixes, context.memory new_lerrs: List[SQLLintError] = [] new_fixes: List[LintFix] = [] if res is None or res == []: # Assume this means no problems (also means no memory) pass elif isinstance(res, LintResult): # Extract any memory memory = res.memory self._adjust_anchors_for_fixes(context, res) self._process_lint_result( res, templated_file, ignore_mask, new_lerrs, new_fixes, tree ) elif isinstance(res, list) and all( isinstance(elem, LintResult) for elem in res ): # Extract any memory from the *last* one, assuming # it was the last to be added memory = res[-1].memory for elem in res: self._adjust_anchors_for_fixes(context, elem) self._process_lint_result( elem, templated_file, ignore_mask, new_lerrs, new_fixes, tree ) else: # pragma: no cover raise TypeError( "Got unexpected result [{!r}] back from linting rule: {!r}".format( res, self.code ) ) for lerr in new_lerrs: self.logger.info("!! Violation Found: %r", lerr.description) if new_fixes: if not self.is_fix_compatible: # pragma: no cover rules_logger.error( f"Rule {self.code} returned a fix but is not documented as " "`is_fix_compatible`, you may encounter unusual fixing " "behaviour. Report this a bug to the developer of this rule." ) for lfix in new_fixes: self.logger.info("!! Fix Proposed: %r", lfix) # Consume the new results vs += new_lerrs fixes += new_fixes return vs, context.raw_stack if context else tuple(), fixes, context.memory # HELPER METHODS -------- @staticmethod def _log_critical_errors(error: Exception) -> None: # pragma: no cover """This method is monkey patched into a "raise" for certain tests.""" pass def _process_lint_result( self, res: LintResult, templated_file: Optional[TemplatedFile], ignore_mask: Optional["IgnoreMask"], new_lerrs: List[SQLLintError], new_fixes: List[LintFix], root: BaseSegment, ) -> None: # Unless the rule declares that it's already template safe. Do safety # checks. if not self.template_safe_fixes: self.discard_unsafe_fixes(res, templated_file) lerr = res.to_linting_error(rule=self) if not lerr: return None if ignore_mask: if not ignore_mask.ignore_masked_violations([lerr]): return None # Check whether this should be filtered out for being unparsable. # To do that we check the parents of the anchors (of the violation # and fixes) against the filter in the crawler. # NOTE: We use `.passes_filter` here to do the test for unparsable # to avoid duplicating code because that test is already implemented # there. anchors = [lerr.segment] + [fix.anchor for fix in lerr.fixes] for anchor in anchors: if not self.crawl_behaviour.passes_filter(anchor): # pragma: no cover # NOTE: This clause is untested, because it's a hard to produce # edge case. The latter clause is much more likely. linter_logger.info( "Fix skipped due to anchor not passing filter: %s", anchor ) return None parent_stack = root.path_to(anchor) if not all( self.crawl_behaviour.passes_filter(ps.segment) for ps in parent_stack ): linter_logger.info( "Fix skipped due to parent of anchor not passing filter: %s", [ps.segment for ps in parent_stack], ) return None new_lerrs.append(lerr) new_fixes.extend(res.fixes) @staticmethod def filter_meta( segments: Sequence[BaseSegment], keep_meta: bool = False ) -> Tuple[BaseSegment, ...]: """Filter the segments to non-meta. Or optionally the opposite if keep_meta is True. """ buff = [] for elem in segments: if elem.is_meta is keep_meta: buff.append(elem) return tuple(buff) @classmethod def get_parent_of( cls, segment: BaseSegment, root_segment: BaseSegment ) -> Optional[BaseSegment]: # pragma: no cover TODO? """Return the segment immediately containing segment. NB: This is recursive. Args: segment: The segment to look for. root_segment: Some known parent of the segment we're looking for (although likely not the direct parent in question). """ if segment in root_segment.segments: return root_segment elif root_segment.segments: # try each of the subsegments for sub in root_segment.segments: p = cls.get_parent_of(segment, sub) if p: return p # Not directly in the segment and # no subsegments to check. Return None. return None @staticmethod def discard_unsafe_fixes( lint_result: LintResult, templated_file: Optional[TemplatedFile] ) -> None: """Remove (discard) LintResult fixes if they are "unsafe". By removing its fixes, a LintResult will still be reported, but it will be treated as _unfixable_. """ if not lint_result.fixes or not templated_file: return # Check for fixes that touch templated code. for fix in lint_result.fixes: if fix.has_template_conflicts(templated_file): linter_logger.info( " * Discarding fixes that touch templated code: %s", lint_result.fixes, ) lint_result.fixes = [] return # Issue 3079: Fixes that span multiple template blocks are bad. Don't # permit them. block_indices: Set[int] = set() for fix in lint_result.fixes: fix_slices = fix.get_fix_slices(templated_file, within_only=True) for fix_slice in fix_slices: # Ignore fix slices that exist only in the source. For purposes # of this check, it's not meaningful to say that a fix "touched" # one of these. if not fix_slice.is_source_only_slice(): block_indices.add(fix_slice.block_idx) if len(block_indices) > 1: linter_logger.info( " * Discarding fixes that span multiple template blocks: %s", lint_result.fixes, ) lint_result.fixes = [] return @classmethod def _adjust_anchors_for_fixes( cls, context: RuleContext, lint_result: LintResult ) -> None: """Makes simple fixes to the anchor position for fixes. Some rules return fixes where the anchor is too low in the tree. These are most often rules like LT02 and LT05 that make whitespace changes without a "deep" understanding of the parse structure. This function attempts to correct those issues automatically. It may not be perfect, but it should be an improvement over the old behaviour, where rules like LT02 often corrupted the parse tree, placing spaces in weird places that caused issues with other rules. For more context, see issue #1304. """ if not cls._adjust_anchors: return for fix in lint_result.fixes: if fix.anchor: fix.anchor = cls._choose_anchor_segment( # If no parent stack, that means the segment itself is the root context.parent_stack[0] if context.parent_stack else context.segment, fix.edit_type, fix.anchor, ) @staticmethod def _choose_anchor_segment( root_segment: BaseSegment, edit_type: str, segment: BaseSegment, filter_meta: bool = False, ) -> BaseSegment: """Choose the anchor point for a lint fix, i.e. where to apply the fix. From a grammar perspective, segments near the leaf of the tree are generally less likely to allow general edits such as whitespace insertion. This function avoids such issues by taking a proposed anchor point (assumed to be near the leaf of the tree) and walking "up" the parse tree as long as the ancestor segments have the same start or end point (depending on the edit type) as "segment". This newly chosen anchor is more likely to be a valid anchor point for the fix. """ if edit_type not in ("create_before", "create_after"): return segment anchor: BaseSegment = segment child: BaseSegment = segment path: Optional[List[BaseSegment]] = ( [ps.segment for ps in root_segment.path_to(segment)] if root_segment else None ) assert path, f"No path found from {root_segment} to {segment}!" for seg in path[::-1]: # If the segment allows non code ends, then no problem. # We're done. This is usually the outer file segment. if seg.can_start_end_non_code: linter_logger.debug( "Stopping hoist at %s, as allows non code ends.", seg ) break # Which lists of children to check against. children_lists: List[List[BaseSegment]] = [] if filter_meta: # Optionally check against filtered (non-meta only) children. children_lists.append( [child for child in seg.segments if not child.is_meta] ) # Always check against the full set of children. children_lists.append(list(seg.segments)) children: List[BaseSegment] for children in children_lists: if edit_type == "create_before" and children[0] is child: linter_logger.debug( "Hoisting anchor from before %s to %s", anchor, seg ) anchor = seg assert anchor.raw.startswith(segment.raw) child = seg break elif edit_type == "create_after" and children[-1] is child: linter_logger.debug( "Hoisting anchor from after %s to %s", anchor, seg ) anchor = seg assert anchor.raw.endswith(segment.raw) child = seg break return anchor @dataclass(frozen=True) class RuleManifest: """Element in the rule register.""" code: str name: str description: str groups: Tuple[str, ...] aliases: Tuple[str, ...] rule_class: Type[BaseRule] @dataclass class RulePack: """A bundle of rules to be applied. This contains a set of rules, post filtering but also contains the mapping required to interpret any noqa messages found in files. The reason for this object is that rules are filtered and instantiated into this pack in the main process when running in multi-processing mode so that user defined rules can be used without reference issues. Attributes: rules (:obj:`list` of :obj:`BaseRule`): A filtered list of instantiated rules to be applied to a given file. reference_map (:obj:`dict`): A mapping of rule references to the codes they refer to, e.g. `{"my_ref": {"LT01", "LT02"}}`. The references (i.e. the keys) may be codes, groups, aliases or names. The values of the mapping are sets of rule codes *only*. This object acts as a lookup to be able to translate selectors (which may contain diverse references) into a consolidated list of rule codes. This mapping contains the full set of rules, rather than just the filtered set present in the `rules` attribute. """ rules: List[BaseRule] reference_map: Dict[str, Set[str]] def codes(self) -> Iterator[str]: """Returns an iterator through the codes contained in the pack.""" return (r.code for r in self.rules) class RuleSet: """Class to define a ruleset. A rule set is instantiated on module load, but the references to each of its classes are instantiated at runtime. This means that configuration values can be passed to those rules live and be responsive to any changes in configuration from the path that the file is in. Rules should be fetched using the :meth:`get_rulelist` command which also handles any filtering (i.e. allowlisting and denylisting). New rules should be added to the instance of this class using the :meth:`register` decorator. That decorator registers the class, but also performs basic type and name-convention checks. The code for the rule will be parsed from the name, the description from the docstring. The eval function is assumed that it will be overridden by the subclass, and the parent class raises an error on this function if not overridden. """ def __init__(self, name: str, config_info: Dict[str, Dict[str, Any]]) -> None: self.name = name self.config_info = config_info self._register: Dict[str, RuleManifest] = {} def _validate_config_options( self, config: "FluffConfig", rule_ref: Optional[str] = None ) -> None: """Ensure that all config options are valid. Config options can also be checked for a specific rule e.g CP01. """ rule_config = config.get_section("rules") for config_name, info_dict in self.config_info.items(): config_option = ( rule_config.get(config_name) if not rule_ref else rule_config.get(rule_ref).get(config_name) ) valid_options = info_dict.get("validation") if ( valid_options and config_option not in valid_options and config_option is not None ): raise ValueError( ( "Invalid option '{}' for {} configuration. Must be one of {}" ).format( config_option, config_name, valid_options, ) ) def register( self, cls: Type[BaseRule], plugin: Optional["PluginSpec"] = None ) -> Type[BaseRule]: """Decorate a class with this to add it to the ruleset. .. code-block:: python @myruleset.register class Rule_LT01(BaseRule): "Description of rule." def eval(self, **kwargs): return LintResult() We expect that rules are defined as classes with the name `Rule_XXXX` where `XXXX` is of the form `LNNN`, where L is a letter (literally L for *linting* by default) and N is a three digit number. If this receives classes by any other name, then it will raise a :exc:`ValueError`. """ code = cls.code # Check for code collisions. if code in self._register: # pragma: no cover raise ValueError( "Rule {!r} has already been registered on RuleSet {!r}!".format( code, self.name ) ) assert "all" in cls.groups, "Rule {!r} must belong to the 'all' group".format( code ) self._register[code] = RuleManifest( code=code, name=cls.name, description=cls.description, groups=cls.groups, aliases=cls.aliases, rule_class=cls, ) # Make sure we actually return the original class return cls def _expand_rule_refs( self, glob_list: List[str], reference_map: Dict[str, Set[str]] ) -> Set[str]: """Expand a list of rule references into a list of rule codes. Returns: :obj:`set` of :obj:`str` rule codes. """ expanded_rule_set: Set[str] = set() for r in glob_list: # Is it a direct reference? if r in reference_map: expanded_rule_set.update(reference_map[r]) # Otherwise treat as a glob expression on all references. # NOTE: We expand _all_ references (i.e. groups, aliases, names # AND codes) so that we preserve the most backward compatibility # with existing references to legacy codes in config files. else: matched_refs = fnmatch.filter(reference_map.keys(), r) for matched in matched_refs: expanded_rule_set.update(reference_map[matched]) return expanded_rule_set def rule_reference_map(self) -> Dict[str, Set[str]]: """Generate a rule reference map for looking up rules. Generate the master reference map. The priority order is: codes > names > groups > aliases (i.e. if there's a collision between a name and an alias - we assume the alias is wrong) """ valid_codes: Set[str] = set(self._register.keys()) reference_map: Dict[str, Set[str]] = {code: {code} for code in valid_codes} # Generate name map. name_map: Dict[str, Set[str]] = { manifest.name: {manifest.code} for manifest in self._register.values() if manifest.name } # Check collisions. name_collisions = set(name_map.keys()) & valid_codes if name_collisions: # pragma: no cover # NOTE: This clause is untested, because it's quite hard to actually # have a valid name which replicates a valid code. The name validation # will probably catch it first. rules_logger.warning( "The following defined rule names were found which collide " "with codes. Those names will not be available for selection: %s", name_collisions, ) # Incorporate (with existing references taking precedence). reference_map = {**name_map, **reference_map} # Generate the group map. group_map: DefaultDict[str, Set[str]] = defaultdict(set) for manifest in self._register.values(): for group in manifest.groups: if group in reference_map: rules_logger.warning( "Rule %s defines group %r which is already defined as a " "name or code of %s. This group will not be available " "for use as a result of this collision.", manifest.code, group, reference_map[group], ) else: group_map[group].add(manifest.code) # Incorporate after all checks are done. reference_map = {**group_map, **reference_map} # Generate the alias map. alias_map: DefaultDict[str, Set[str]] = defaultdict(set) for manifest in self._register.values(): for alias in manifest.aliases: if alias in reference_map: rules_logger.warning( "Rule %s defines alias %r which is already defined as a " "name, code or group of %s. This alias will " "not be available for use as a result of this collision.", manifest.code, alias, reference_map[alias], ) else: alias_map[alias].add(manifest.code) # Incorporate after all checks are done. return {**alias_map, **reference_map} def get_rulepack(self, config: "FluffConfig") -> RulePack: """Use the config to return the appropriate rules. We use the config both for allowlisting and denylisting, but also for configuring the rules given the given config. """ # Validate all generic rule configs self._validate_config_options(config) # Fetch config section: rules_config = config.get_section("rules") # Generate the master reference map. The priority order is: # codes > names > groups > aliases # (i.e. if there's a collision between a name and an # alias - we assume the alias is wrong.) valid_codes: Set[str] = set(self._register.keys()) reference_map = self.rule_reference_map() valid_config_lookups = set( manifest.rule_class.get_config_ref() for manifest in self._register.values() ) # Validate config doesn't try to specify values for unknown rules. # NOTE: We _warn_ here rather than error. for unexpected_ref in [ # Filtering to dicts gives us the sections. k for k, v in rules_config.items() if isinstance(v, dict) # Only keeping ones we don't expect if k not in valid_config_lookups ]: rules_logger.warning( "Rule configuration contain a section for unexpected " f"rule {unexpected_ref!r}. These values will be ignored." ) # For convenience (and migration), if we do find a potential match # for the reference - add that as a warning. # NOTE: We don't actually accept config in these cases, even though # we could potentially match - because how to resolve _multiple_ # matching config sections is ambiguous. if unexpected_ref in reference_map: referenced_codes = reference_map[unexpected_ref] if len(referenced_codes) == 1: referenced_code = list(referenced_codes)[0] referenced_name = self._register[referenced_code].name config_ref = self._register[ referenced_code ].rule_class.get_config_ref() rules_logger.warning( "The reference was however found as a match for rule " f"{referenced_code} with name {referenced_name!r}. " "SQLFluff assumes configuration for this rule will " f"be specified in 'sqlfluff:rules:{config_ref}'." ) elif referenced_codes: rules_logger.warning( "The reference was found as a match for multiple rules: " f"{referenced_codes}. Config should be specified by the " "name of the relevant rule e.g. " "'sqlfluff:rules:capitalisation.keywords'." ) # The lists here are lists of references, which might be codes, # names, aliases or groups. # We default the allowlist to all the rules if not set (i.e. not specifying # any rules, just means "all the rules"). allowlist = config.get("rule_allowlist") or list(valid_codes) denylist = config.get("rule_denylist") or [] allowlisted_unknown_rule_codes = [ r for r in allowlist # Add valid groups to the register when searching for invalid rules _only_ if not fnmatch.filter(reference_map.keys(), r) ] if any(allowlisted_unknown_rule_codes): rules_logger.warning( "Tried to allowlist unknown rule references: {!r}".format( allowlisted_unknown_rule_codes ) ) denylisted_unknown_rule_codes = [ r for r in denylist if not fnmatch.filter(reference_map.keys(), r) ] if any(denylisted_unknown_rule_codes): # pragma: no cover rules_logger.warning( "Tried to denylist unknown rules references: {!r}".format( denylisted_unknown_rule_codes ) ) keylist = sorted(self._register.keys()) # First we expand the allowlist and denylist globs expanded_allowlist = self._expand_rule_refs(allowlist, reference_map) expanded_denylist = self._expand_rule_refs(denylist, reference_map) # Then we filter the rules keylist = [ r for r in keylist if r in expanded_allowlist and r not in expanded_denylist ] # Construct the kwargs for each rule and instantiate in turn. instantiated_rules = [] # Keep only config which isn't a section (for specific rule) (i.e. isn't a dict) # We'll handle those directly in the specific rule config section below. generic_rule_config = { k: v for k, v in rules_config.items() if not isinstance(v, dict) } for code in keylist: kwargs = {} rule_class = self._register[code].rule_class # Fetch the lookup code for the rule. rule_config_ref = rule_class.get_config_ref() specific_rule_config = config.get_section(("rules", rule_config_ref)) if generic_rule_config: kwargs.update(generic_rule_config) if specific_rule_config: # Validate specific rule config before adding self._validate_config_options(config, rule_config_ref) kwargs.update(specific_rule_config) kwargs["code"] = code # Allow variable substitution in making the description kwargs["description"] = self._register[code].description.format(**kwargs) # Instantiate when ready instantiated_rules.append(rule_class(**kwargs)) return RulePack(instantiated_rules, reference_map) def copy(self) -> "RuleSet": """Return a copy of self with a separate register.""" new_ruleset = copy.copy(self) new_ruleset._register = self._register.copy() return new_ruleset sqlfluff-2.3.5/src/sqlfluff/core/rules/config_info.py000066400000000000000000000202731451700765000227010ustar00rootroot00000000000000"""Documenting and validating rule configuration. Provide a mapping with all configuration options, with information on valid inputs and definitions. This mapping is used to validate rule config inputs, as well as document rule configuration. """ from typing import Any, Dict from sqlfluff.core.plugin.host import get_plugin_manager STANDARD_CONFIG_INFO_DICT: Dict[str, Dict[str, Any]] = { "tab_space_size": { "validation": range(100), "definition": ( "The number of spaces to consider equal to one tab. " "Used in the fixing step of this rule." ), }, "indent_unit": { "validation": ["space", "tab"], "definition": "Whether to use tabs or spaces to add new indents.", }, "hanging_indents": { "validation": [True, False], "definition": ( "Whether hanging indents will be considered when evaluating the " "indentation of a file." ), }, "allow_scalar": { "validation": [True, False], "definition": ( "Whether or not to allow a single element in the " " select clause to be without an alias." ), }, "single_table_references": { "validation": ["consistent", "qualified", "unqualified"], "definition": "The expectation for references in single-table select.", }, "force_enable": { "validation": [True, False], "definition": ( "Run this rule even for dialects where this rule is disabled by default." ), }, "unquoted_identifiers_policy": { "validation": ["all", "aliases", "column_aliases"], "definition": "Types of unquoted identifiers to flag violations for.", }, "quoted_identifiers_policy": { "validation": ["all", "aliases", "column_aliases", "none"], "definition": "Types of quoted identifiers to flag violations for.", }, "capitalisation_policy": { "validation": ["consistent", "upper", "lower", "capitalise"], "definition": "The capitalisation policy to enforce.", }, "extended_capitalisation_policy": { "validation": ["consistent", "upper", "lower", "pascal", "capitalise"], "definition": ( "The capitalisation policy to enforce, extended with PascalCase. " "This is separate from ``capitalisation_policy`` as it should not be " "applied to keywords." ), }, "select_clause_trailing_comma": { "validation": ["forbid", "require"], "definition": ( "Should trailing commas within select clauses be required or forbidden?" ), }, "ignore_comment_lines": { "validation": [True, False], "definition": ( "Should lines that contain only whitespace and comments" " be ignored when linting line lengths?" ), }, "ignore_comment_clauses": { "validation": [True, False], "definition": ( "Should comment clauses (e.g. column comments) be ignored" " when linting line lengths?" ), }, "ignore_words": { "definition": ("Comma separated list of words to ignore from rule"), }, "ignore_words_regex": { "definition": ( "Words to ignore from rule if they are a partial match for the regular " "expression. To ignore only full matches you can use ``^`` (beginning " "of text) and ``$`` (end of text). Due to regular expression operator " "precedence, it is good practice to use parentheses around everything " "between ``^`` and ``$``." ), }, "forbid_subquery_in": { "validation": ["join", "from", "both"], "definition": "Which clauses should be linted for subqueries?", }, "prefer_count_1": { "validation": [True, False], "definition": ("Should count(1) be preferred over count(*) and count(0)?"), }, "prefer_count_0": { "validation": [True, False], "definition": ("Should count(0) be preferred over count(*) and count(1)?"), }, "operator_new_lines": { "validation": ["before", "after"], "definition": ("Should operator be placed before or after newlines?"), }, "aliasing": { "validation": ["implicit", "explicit"], "definition": ( "Should alias have an explicit AS or is implicit aliasing required?" ), }, "fully_qualify_join_types": { "validation": ["inner", "outer", "both"], "definition": ("Which types of JOIN clauses should be fully qualified?"), }, "multiline_newline": { "validation": [True, False], "definition": ( "Should semi-colons be placed on a new line after multi-line statements?" ), }, "require_final_semicolon": { "validation": [True, False], "definition": ( "Should final semi-colons be required? " "(N.B. forcing trailing semi-colons is not recommended for dbt users " "as it can cause issues when wrapping the query within other SQL queries)." ), }, "group_by_and_order_by_style": { "validation": ["consistent", "implicit", "explicit"], "definition": ( "The expectation for using explicit column name references " "or implicit positional references." ), }, "allow_space_in_identifier": { "validation": [True, False], "definition": ("Should spaces in identifiers be allowed?"), }, "additional_allowed_characters": { "definition": ( "Optional list of extra allowed characters, " "in addition to alphanumerics (A-Z, a-z, 0-9) and underscores." ), }, "prefer_quoted_identifiers": { "validation": [True, False], "definition": ( "If ``True``, requires every identifier to be quoted. " "Defaults to ``False``." ), }, "prefer_quoted_keywords": { "validation": [True, False], "definition": ( "If ``True``, requires every keyword used as an identifier to be quoted. " "Defaults to ``False``." ), }, "blocked_words": { "definition": ( "Optional, comma-separated list of blocked words which should not be used " "in statements." ), }, "blocked_regex": { "definition": ( "Optional, regex of blocked pattern which should not be used in statements." ), }, "match_source": { "definition": ( "Optional, also match regex of blocked pattern before applying templating" ), }, "preferred_quoted_literal_style": { "validation": ["consistent", "single_quotes", "double_quotes"], "definition": ( "Preferred quoting style to use for the quoted literals. If set to " "``consistent`` quoting style is derived from the first quoted literal" "in the file." ), }, "min_alias_length": { "validation": range(1000), "definition": ( "The minimum length of an alias to allow without raising a violation." ), }, "max_alias_length": { "validation": range(1000), "definition": ( "The maximum length of an alias to allow without raising a violation." ), }, "wildcard_policy": { "validation": ["single", "multiple"], "definition": "Treatment of wildcards. Defaults to ``single``.", }, "preferred_type_casting_style": { "validation": ["consistent", "shorthand", "convert", "cast"], "definition": ("The expectation for using sql type casting"), }, "preferred_first_table_in_join_clause": { "validation": ["earlier", "later"], "definition": ( "Which table to list first when joining two tables. " "Defaults to ``earlier``." ), }, } def get_config_info() -> Dict[str, Any]: """Gets the config from core sqlfluff and sqlfluff plugins and merges them.""" plugin_manager = get_plugin_manager() configs_info = plugin_manager.hook.get_configs_info() return { k: v for config_info_dict in configs_info for k, v in config_info_dict.items() } sqlfluff-2.3.5/src/sqlfluff/core/rules/context.py000066400000000000000000000033241451700765000221030ustar00rootroot00000000000000"""Define RuleContext class.""" import pathlib from dataclasses import dataclass, field from typing import Any, Optional, Tuple from sqlfluff.core.config import FluffConfig from sqlfluff.core.dialects import Dialect from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.templaters.base import TemplatedFile @dataclass class RuleContext: """Class for holding the context passed to rule eval functions.""" # These don't change within a file. dialect: Dialect fix: bool templated_file: Optional[TemplatedFile] path: Optional[pathlib.Path] config: FluffConfig # These change within a file. # segment: The segment in question segment: BaseSegment # parent_stack: A tuple of the path from the root to this segment. parent_stack: Tuple[BaseSegment, ...] = field(default=tuple()) # raw_stack: All of the raw segments so far in the file raw_stack: Tuple[RawSegment, ...] = field(default=tuple()) # memory: Arbitrary storage for the rule memory: Any = field(default_factory=dict) # segment_idx: The index of this segment in the parent segment_idx: int = field(default=0) @property def siblings_pre(self) -> Tuple[BaseSegment, ...]: # pragma: no cover """Return sibling segments prior to self.segment.""" if self.parent_stack: return self.parent_stack[-1].segments[: self.segment_idx] else: return tuple() @property def siblings_post(self) -> Tuple[BaseSegment, ...]: """Return sibling segments after self.segment.""" if self.parent_stack: return self.parent_stack[-1].segments[self.segment_idx + 1 :] else: return tuple() # pragma: no cover sqlfluff-2.3.5/src/sqlfluff/core/rules/crawlers.py000066400000000000000000000133601451700765000222420ustar00rootroot00000000000000"""Definitions of crawlers.""" from abc import ABC, abstractmethod from typing import Any, Iterator, Set, cast from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.parser.segments.raw import RawSegment from sqlfluff.core.rules.context import RuleContext class BaseCrawler(ABC): """The base interface for crawler classes.""" def __init__(self, works_on_unparsable: bool = False, **kwargs: Any) -> None: self.works_on_unparsable = works_on_unparsable def passes_filter(self, segment: BaseSegment) -> bool: """Returns true if this segment considered at all. This method is called during crawling but also in evaluating the anchors for linting violations and their fixes to make sure we don't get issues with linting sections of queries that we can't parse. See `BaseRule._process_lint_result()`. """ return self.works_on_unparsable or not segment.is_type("unparsable") @abstractmethod def crawl(self, context: RuleContext) -> Iterator[RuleContext]: """Yields a RuleContext for each segment the rule should process.""" class RootOnlyCrawler(BaseCrawler): """A crawler that doesn't crawl. This just yields one context on the root-level (topmost) segment of the file. """ def crawl(self, context: RuleContext) -> Iterator[RuleContext]: """Yields a RuleContext for each segment the rule should process.""" if self.passes_filter(context.segment): yield context class SegmentSeekerCrawler(BaseCrawler): """A crawler that efficiently searches for specific segment types. The segment type(s) are specified on creation. """ def __init__( self, types: Set[str], provide_raw_stack: bool = False, allow_recurse: bool = True, **kwargs: Any ) -> None: self.types = types # Tracking a raw stack involves a lot of tuple manipulation, so we # only do it when required - otherwise we skip it. Rules can explicitly # request it when defining their crawler. self.provide_raw_stack = provide_raw_stack # If allow_recurse is false, then once a segment matches, none of it's # children will be returned. This is useful in cases where we might have # many start points, but one root segment will check any matching sub- # segments in the same evaluation. self.allow_recurse = allow_recurse super().__init__(**kwargs) def is_self_match(self, segment: BaseSegment) -> bool: """Does this segment match the relevant criteria.""" return segment.is_type(*self.types) def crawl(self, context: RuleContext) -> Iterator[RuleContext]: """Yields a RuleContext for each segment the rule should process. We assume that segments are yielded by their parent. """ # Check whether we should consider this segment _or it's children_ # at all. self_match = False if not self.passes_filter(context.segment): if self.provide_raw_stack: # pragma: no cover context.raw_stack += tuple(context.segment.raw_segments) return # Then check the segment itself, yield if it's a match. if self.is_self_match(context.segment): self_match = True yield context # Check whether any children? # Abort if not - we've already yielded self. # NOTE: This same clause also works if we did match but aren't # allowed to recurse. if not context.segment.segments or (self_match and not self.allow_recurse): # Add self to raw stack first if so. if self.provide_raw_stack: context.raw_stack += (cast(RawSegment, context.segment),) return # Check whether one of the targets is present (set intersection) if not self.types & context.segment.descendant_type_set: # None present. Don't look further. # This aggressive pruning helps performance. # Track raw stack if required. if self.provide_raw_stack: context.raw_stack += tuple(context.segment.raw_segments) return # NOTE: Full context is not implemented yet. More dev work required # before everything will be available here. # Given we know that one is present in here somewhere, search for it. new_parent_stack = context.parent_stack + (context.segment,) for idx, child in enumerate(context.segment.segments): # For performance reasons, don't create a new RuleContext for # each segment; just modify the existing one in place. This # requires some careful bookkeeping, but it avoids creating a # HUGE number of short-lived RuleContext objects # (#linter loops x #rules x #segments). # Importantly, we're resetting values here, because they # may have been modified deeper in the recursion. context.segment = child context.parent_stack = new_parent_stack context.segment_idx = idx yield from self.crawl(context) class ParentOfSegmentCrawler(SegmentSeekerCrawler): """A crawler that efficiently searches for parents of specific segment types. The segment type(s) are specified on creation. """ def is_self_match(self, segment: BaseSegment) -> bool: """Does this segment match the relevant criteria. We use the _direct_ child set here so that if any of the direct child segments match any of the types we're looking for, then we know that this segment is a parent of that kind of segment. """ return bool(self.types & segment.direct_descendant_type_set) sqlfluff-2.3.5/src/sqlfluff/core/rules/doc_decorators.py000066400000000000000000000030071451700765000234070ustar00rootroot00000000000000"""A collection of decorators to modify rule docstrings for Sphinx. NOTE: All of these decorators are deprecated from SQLFluff 2.0.0 onwards. They are still included to allow a transition period, but the functionality is now packaged in the BaseRule class via the RuleMetaclass. """ from typing import TYPE_CHECKING, Any, Type from sqlfluff.core.rules.base import rules_logger # noqa if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.rules.base import BaseRule def document_fix_compatible(cls: Type["BaseRule"]) -> Type["BaseRule"]: """Mark the rule as fixable in the documentation.""" rules_logger.warning( f"{cls.__name__} uses the @document_fix_compatible decorator " "which is deprecated in SQLFluff 2.0.0. Remove the decorator " "to resolve this warning." ) return cls def document_groups(cls: Type["BaseRule"]) -> Type["BaseRule"]: """Mark the rule as fixable in the documentation.""" rules_logger.warning( f"{cls.__name__} uses the @document_groups decorator " "which is deprecated in SQLFluff 2.0.0. Remove the decorator " "to resolve this warning." ) return cls def document_configuration(cls: Type["BaseRule"], **kwargs: Any) -> Type["BaseRule"]: """Add a 'Configuration' section to a Rule docstring.""" rules_logger.warning( f"{cls.__name__} uses the @document_configuration decorator " "which is deprecated in SQLFluff 2.0.0. Remove the decorator " "to resolve this warning." ) return cls sqlfluff-2.3.5/src/sqlfluff/core/rules/fix.py000066400000000000000000001060141451700765000212050ustar00rootroot00000000000000"""Helper classes & methods for applying fixes to segments.""" import logging from collections import defaultdict from dataclasses import dataclass, field from itertools import chain from typing import ( TYPE_CHECKING, Dict, Iterable, Iterator, List, Optional, Set, Sized, Tuple, cast, ) from sqlfluff.core.parser import ( BaseSegment, PositionMarker, RawSegment, SourceFix, ) from sqlfluff.core.templaters import RawFileSlice, TemplatedFile if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.dialects import Dialect linter_logger = logging.getLogger("sqlfluff.linter") rules_logger = logging.getLogger("sqlfluff.rules") @dataclass class FixPatch: """An edit patch for a source file.""" templated_slice: slice fixed_raw: str # The patch category, functions mostly for debugging and explanation # than for function. It allows traceability of *why* this patch was # generated. It has no significance for processing. patch_category: str source_slice: slice templated_str: str source_str: str def dedupe_tuple(self) -> Tuple[slice, str]: """Generate a tuple of this fix for deduping.""" return (self.source_slice, self.fixed_raw) class LintFix: """A class to hold a potential fix to a linting violation. Args: edit_type (:obj:`str`): One of `create_before`, `create_after`, `replace`, `delete` to indicate the kind of fix this represents. anchor (:obj:`BaseSegment`): A segment which represents the *position* that this fix should be applied at. For deletions it represents the segment to delete, for creations it implies the position to create at (with the existing element at this position to be moved *after* the edit), for a `replace` it implies the segment to be replaced. edit (iterable of :obj:`BaseSegment`, optional): For `replace` and `create` fixes, this holds the iterable of segments to create or replace at the given `anchor` point. source (iterable of :obj:`BaseSegment`, optional): For `replace` and `create` fixes, this holds iterable of segments that provided code. IMPORTANT: The linter uses this to prevent copying material from templated areas. """ def __init__( self, edit_type: str, anchor: BaseSegment, edit: Optional[Iterable[BaseSegment]] = None, source: Optional[Iterable[BaseSegment]] = None, ) -> None: if edit_type not in ( "create_before", "create_after", "replace", "delete", ): # pragma: no cover raise ValueError(f"Unexpected edit_type: {edit_type}") self.edit_type = edit_type if not anchor: # pragma: no cover raise ValueError("Fixes must provide an anchor.") self.anchor = anchor self.edit: Optional[List[BaseSegment]] = None if edit is not None: # Copy all the elements of edit to stop contamination. # We're about to start stripping the position markers # off some of the elements and we don't want to end up # stripping the positions of the original elements of # the parsed structure. self.edit = [s.copy() for s in edit] # Check that any edits don't have a position marker set. # We should rely on realignment to make position markers. # Strip position markers of anything enriched, otherwise things can get # blurry for seg in self.edit: if seg.pos_marker: # Developer warning. rules_logger.debug( "Developer Note: Edit segment found with preset position " "marker. These should be unset and calculated later." ) seg.pos_marker = None # Once stripped, we shouldn't replace any markers because # later code may rely on them being accurate, which we # can't guarantee with edits. self.source = [seg for seg in source if seg.pos_marker] if source else [] # On creation of the fix we'll also validate the edits are non-trivial. if self.edit_type in ("create_before", "create_after"): assert self.edit, "A create fix must have an edit." # They should all have a non-zero raw. assert all( seg.raw for seg in self.edit ), f"Invalid edit found: {self.edit}." elif self.edit_type == "replace": assert ( self.edit != self.anchor ), "Fix created which replaces segment with itself." def is_just_source_edit(self) -> bool: """Return whether this a valid source only edit.""" return ( self.edit_type == "replace" and self.edit is not None and len(self.edit) == 1 and self.edit[0].raw == self.anchor.raw ) def __repr__(self) -> str: if self.edit_type == "delete": detail = f"delete:{self.anchor.raw!r}" elif self.edit_type in ("replace", "create_before", "create_after"): seg_list = cast(List[BaseSegment], self.edit) new_detail = "".join(s.raw for s in seg_list) if self.edit_type == "replace": if self.is_just_source_edit(): seg_list = cast(List[BaseSegment], self.edit) detail = f"src-edt:{seg_list[0].source_fixes!r}" else: detail = f"edt:{self.anchor.raw!r}->{new_detail!r}" else: detail = f"create:{new_detail!r}" else: detail = "" # pragma: no cover TODO? return ( f"" ) def __eq__(self, other: object) -> bool: """Compare equality with another fix. A fix is equal to another if is in the same place (position), with the same type and (if appropriate) the same edit values. """ # We have to assert this here rather in the type annotation so we don't # violate the Liskov substitution principle. # More context here: https://stackoverflow.com/a/37557540/11381493 if not isinstance(other, LintFix): # pragma: no cover return NotImplemented if not self.edit_type == other.edit_type: return False # For checking anchor equality, first check types. if not self.anchor.class_types == other.anchor.class_types: return False # If types match, check uuids to see if they're the same original segment. if self.anchor.uuid != other.anchor.uuid: return False # Then compare edits, here we only need to check the raw and source # fixes (positions are meaningless). # Only do this if we have edits. if self.edit: # We have to get weird here to appease mypy --strict # mypy seems to have a bug where even though we check above to make sure # self.edit is not None it still thinks it could be None when doing the # type check below. But if we use cast(List[BaseSegment], self.edit) then # it throws a redundant-cast error, because magically now it _does_ know # that self.edit is not None. So we have to cast to Sized for the len() # check and to Iterable[BaseSegment] for the looped check to make mypy # happy. # 1. Check lengths edit_list = cast(Sized, self.edit) other_list = cast(Sized, other.edit) if len(edit_list) != len(other_list): return False # pragma: no cover # 2. Zip and compare edit_list2 = cast(Iterable[BaseSegment], self.edit) other_list2 = cast(Iterable[BaseSegment], other.edit) for a, b in zip(edit_list2, other_list2): # Check raws if a.raw != b.raw: return False # Check source fixes if a.source_fixes != b.source_fixes: return False return True @classmethod def delete(cls, anchor_segment: BaseSegment) -> "LintFix": """Delete supplied anchor segment.""" return cls("delete", anchor_segment) @classmethod def replace( cls, anchor_segment: BaseSegment, edit_segments: Iterable[BaseSegment], source: Optional[Iterable[BaseSegment]] = None, ) -> "LintFix": """Replace supplied anchor segment with the edit segments.""" return cls("replace", anchor_segment, edit_segments, source) @classmethod def create_before( cls, anchor_segment: BaseSegment, edit_segments: Iterable[BaseSegment], source: Optional[Iterable[BaseSegment]] = None, ) -> "LintFix": """Create edit segments before the supplied anchor segment.""" return cls( "create_before", anchor_segment, edit_segments, source, ) @classmethod def create_after( cls, anchor_segment: BaseSegment, edit_segments: Iterable[BaseSegment], source: Optional[Iterable[BaseSegment]] = None, ) -> "LintFix": """Create edit segments after the supplied anchor segment.""" return cls( "create_after", anchor_segment, edit_segments, source, ) def get_fix_slices( self, templated_file: TemplatedFile, within_only: bool ) -> Set[RawFileSlice]: """Returns slices touched by the fix.""" # Goal: Find the raw slices touched by the fix. Two cases, based on # edit type: # 1. "delete", "replace": Raw slices touching the anchor segment. # 2. "create_before", "create_after": Raw slices encompassing the two # character positions surrounding the insertion point (**NOT** the # whole anchor segment, because we're not *touching* the anchor # segment, we're inserting **RELATIVE** to it. assert self.anchor.pos_marker, f"Anchor missing position marker: {self.anchor}" anchor_slice = self.anchor.pos_marker.templated_slice templated_slices = [anchor_slice] # If "within_only" is set for a "create_*" fix, the slice should only # include the area of code "within" the area of insertion, not the other # side. adjust_boundary = 1 if not within_only else 0 if self.edit_type == "create_before": # Consider the first position of the anchor segment and the # position just before it. templated_slices = [ slice(anchor_slice.start - 1, anchor_slice.start + adjust_boundary), ] elif self.edit_type == "create_after": # Consider the last position of the anchor segment and the # character just after it. templated_slices = [ slice(anchor_slice.stop - adjust_boundary, anchor_slice.stop + 1), ] elif ( self.edit_type == "replace" and self.anchor.pos_marker.source_slice.stop == self.anchor.pos_marker.source_slice.start ): # We're editing something with zero size in the source. This means # it likely _didn't exist_ in the source and so can be edited safely. # We return an empty set because this edit doesn't touch anything # in the source. return set() elif ( self.edit_type == "replace" and all(edit.is_type("raw") for edit in cast(List[RawSegment], self.edit)) and all(edit._source_fixes for edit in cast(List[RawSegment], self.edit)) ): # As an exception to the general rule about "replace" fixes (where # they're only safe if they don't touch a templated section at all), # source-only fixes are different. This clause handles that exception. # So long as the fix is *purely* source-only we can assume that the # rule has done the relevant due diligence on what it's editing in # the source and just yield the source slices directly. # More complicated fixes that are a blend or source and templated # fixes are currently not supported but this (mostly because they've # not arisen yet!), so further work would be required to support them # elegantly. rules_logger.debug("Source only fix.") source_edit_slices = [ fix.source_slice # We can assume they're all raw and all have source fixes, because we # check that above. for fix in chain.from_iterable( cast(List[SourceFix], edit._source_fixes) for edit in cast(List[RawSegment], self.edit) ) ] if len(source_edit_slices) > 1: # pragma: no cover raise NotImplementedError( "Unable to handle multiple source only slices." ) return set( templated_file.raw_slices_spanning_source_slice(source_edit_slices[0]) ) # TRICKY: For creations at the end of the file, there won't be an # existing slice. In this case, the function adds file_end_slice to the # result, as a sort of placeholder or sentinel value. We pass a literal # slice for "file_end_slice" so that later in this function, the LintFix # is interpreted as literal code. Otherwise, it could be interpreted as # a fix to *templated* code and incorrectly discarded. return self._raw_slices_from_templated_slices( templated_file, templated_slices, file_end_slice=RawFileSlice("", "literal", -1), ) def has_template_conflicts(self, templated_file: TemplatedFile) -> bool: """Based on the fix slices, should we discard the fix?""" # Check for explicit source fixes. # TODO: This doesn't account for potentially more complicated source fixes. # If we're replacing a single segment with many *and* doing source fixes # then they will be discarded here as unsafe. if self.edit_type == "replace" and self.edit and len(self.edit) == 1: edit: BaseSegment = self.edit[0] if edit.raw == self.anchor.raw and edit.source_fixes: return False # Given fix slices, check for conflicts. check_fn = all if self.edit_type in ("create_before", "create_after") else any fix_slices = self.get_fix_slices(templated_file, within_only=False) result = check_fn(fs.slice_type == "templated" for fs in fix_slices) if result or not self.source: return result # Fix slices were okay. Now check template safety of the "source" field. templated_slices = [ cast(PositionMarker, source.pos_marker).templated_slice for source in self.source ] raw_slices = self._raw_slices_from_templated_slices( templated_file, templated_slices ) return any(fs.slice_type == "templated" for fs in raw_slices) @staticmethod def _raw_slices_from_templated_slices( templated_file: TemplatedFile, templated_slices: List[slice], file_end_slice: Optional[RawFileSlice] = None, ) -> Set[RawFileSlice]: raw_slices: Set[RawFileSlice] = set() for templated_slice in templated_slices: try: raw_slices.update( templated_file.raw_slices_spanning_source_slice( templated_file.templated_slice_to_source_slice(templated_slice) ) ) except (IndexError, ValueError): # These errors will happen with "create_before" at the beginning # of the file or "create_after" at the end of the file. By # default, we ignore this situation. If the caller passed # "file_end_slice", add that to the result. In effect, # file_end_slice serves as a placeholder or sentinel value. if file_end_slice is not None: raw_slices.add(file_end_slice) return raw_slices @dataclass class AnchorEditInfo: """For a given fix anchor, count of the fix edit types and fixes for it.""" delete: int = field(default=0) replace: int = field(default=0) create_before: int = field(default=0) create_after: int = field(default=0) fixes: List["LintFix"] = field(default_factory=list) source_fixes: List[SourceFix] = field(default_factory=list) # First fix of edit_type "replace" in "fixes" _first_replace: Optional["LintFix"] = field(default=None) def add(self, fix: "LintFix") -> None: """Adds the fix and updates stats. We also allow potentially multiple source fixes on the same anchor by condensing them together here. """ if fix in self.fixes: # Deduplicate fixes in case it's already in there. return if fix.is_just_source_edit(): assert fix.edit # is_just_source_edit confirms there will be a list # so we can hint that to mypy. self.source_fixes += fix.edit[0].source_fixes # is there already a replace? if self._first_replace: assert self._first_replace.edit # is_just_source_edit confirms there will be a list # and that's the only way to get into _first_replace # if it's populated so we can hint that to mypy. linter_logger.info( "Multiple edits detected, condensing %s onto %s", fix, self._first_replace, ) self._first_replace.edit[0] = self._first_replace.edit[0].edit( source_fixes=self.source_fixes ) linter_logger.info("Condensed fix: %s", self._first_replace) # Return without otherwise adding in this fix. return self.fixes.append(fix) if fix.edit_type == "replace" and not self._first_replace: self._first_replace = fix setattr(self, fix.edit_type, getattr(self, fix.edit_type) + 1) @property def total(self) -> int: """Returns total count of fixes.""" return len(self.fixes) @property def is_valid(self) -> bool: """Returns True if valid combination of fixes for anchor. Cases: * 0-1 fixes of any type: Valid * 2 fixes: Valid if and only if types are create_before and create_after """ if self.total <= 1: # Definitely valid (i.e. no conflict) if 0 or 1. In practice, this # function probably won't be called if there are 0 fixes, but 0 is # valid; it simply means "no fixes to apply". return True if self.total == 2: # This is only OK for this special case. We allow this because # the intent is clear (i.e. no conflict): Insert something *before* # the segment and something else *after* the segment. return self.create_before == 1 and self.create_after == 1 # Definitely bad if > 2. return False # pragma: no cover def compute_anchor_edit_info(fixes: List["LintFix"]) -> Dict[int, AnchorEditInfo]: """Group and count fixes by anchor, return dictionary.""" anchor_info = defaultdict(AnchorEditInfo) # type: ignore for fix in fixes: # :TRICKY: Use segment uuid as the dictionary key since # different segments may compare as equal. anchor_id = fix.anchor.uuid anchor_info[anchor_id].add(fix) return dict(anchor_info) def apply_fixes( segment: BaseSegment, dialect: "Dialect", rule_code: str, fixes: Dict[int, AnchorEditInfo], ) -> Tuple["BaseSegment", List["BaseSegment"], List["BaseSegment"], bool]: """Apply a dictionary of fixes to this segment. Used in applying fixes if we're fixing linting errors. If anything changes, this should return a new version of the segment rather than mutating the original. Note: We need to have fixes to apply AND this must have children. In the case of raw segments, they will be replaced or removed by their parent and so this function should just return self. """ if not fixes or segment.is_raw(): return segment, [], [], True seg_buffer = [] before = [] after = [] fixes_applied: List[LintFix] = [] requires_validate = False for seg in segment.segments: # Look for uuid match. # This handles potential positioning ambiguity. anchor_info: Optional[AnchorEditInfo] = fixes.pop(seg.uuid, None) if anchor_info is None: # No fix matches here, just add the segment and move on. seg_buffer.append(seg) continue # Otherwise there is a fix match. seg_fixes = anchor_info.fixes if ( len(seg_fixes) == 2 and seg_fixes[0].edit_type == "create_after" ): # pragma: no cover # Must be create_before & create_after. Swap so the # "before" comes first. seg_fixes.reverse() for f in anchor_info.fixes: assert f.anchor.uuid == seg.uuid fixes_applied.append(f) linter_logger.debug( "Matched fix for %s against segment: %s -> %s", rule_code, f, seg, ) # Deletes are easy. if f.edit_type == "delete": # We're just getting rid of this segment. requires_validate = True # NOTE: We don't add the segment in this case. continue # Otherwise it must be a replace or a create. assert f.edit_type in ( "replace", "create_before", "create_after", ), f"Unexpected edit_type: {f.edit_type!r} in {f!r}" if f.edit_type == "create_after" and len(anchor_info.fixes) == 1: # in the case of a creation after that is not part # of a create_before/create_after pair, also add # this segment before the edit. seg_buffer.append(seg) # We're doing a replacement (it could be a single # segment or an iterable) assert f.edit, f"Edit {f.edit_type!r} requires `edit`." consumed_pos = False for s in f.edit: seg_buffer.append(s) # If one of them has the same raw representation # then the first that matches gets to take the # original position marker. if f.edit_type == "replace" and s.raw == seg.raw and not consumed_pos: seg_buffer[-1].pos_marker = seg.pos_marker consumed_pos = True # If we're just editing a segment AND keeping the type the # same then no need to validate. Otherwise we should # trigger a validation (e.g. for creations or # multi-replace). if not ( f.edit_type == "replace" and len(f.edit) == 1 and f.edit[0].class_types == seg.class_types ): requires_validate = True if f.edit_type == "create_before": # in the case of a creation before, also add this # segment on the end seg_buffer.append(seg) # Invalidate any caches segment.invalidate_caches() # If any fixes applied, do an intermediate reposition. When applying # fixes to children and then trying to reposition them, that recursion # may rely on the parent having already populated positions for any # of the fixes applied there first. This ensures those segments have # working positions to work with. if fixes_applied: assert segment.pos_marker seg_buffer = list( segment._position_segments(tuple(seg_buffer), parent_pos=segment.pos_marker) ) # Then recurse (i.e. deal with the children) (Requeueing) seg_queue = seg_buffer seg_buffer = [] for seg in seg_queue: s, pre, post, validated = apply_fixes(seg, dialect, rule_code, fixes) # 'before' and 'after' will usually be empty. Only used when # lower-level fixes left 'seg' with non-code (usually # whitespace) segments as the first or last children. This is # generally not allowed (see the can_start_end_non_code field), # and these segments need to be "bubbled up" the tree. seg_buffer.extend(pre) seg_buffer.append(s) seg_buffer.extend(post) # If we fail to validate a child segment, make sure to validate this # segment. if not validated: requires_validate = True # Most correct whitespace positioning will have already been handled # _however_, the exception is `replace` edits which match start or # end with whitespace. We also need to handle any leading or trailing # whitespace ejected from the any fixes applied to child segments. # Here we handle those by checking the start and end of the resulting # segment sequence for whitespace. # If we're left with any non-code at the end, trim them off and pass them # up to the parent segment for handling. if not segment.can_start_end_non_code: _idx = 0 for _idx in range(0, len(seg_buffer)): if segment._is_code_or_meta(seg_buffer[_idx]): break before = seg_buffer[:_idx] seg_buffer = seg_buffer[_idx:] _idx = len(seg_buffer) for _idx in range(len(seg_buffer), 0, -1): if segment._is_code_or_meta(seg_buffer[_idx - 1]): break after = seg_buffer[_idx:] seg_buffer = seg_buffer[:_idx] # Reform into a new segment assert segment.pos_marker try: new_seg = segment.__class__( # Realign the segments within segments=segment._position_segments( tuple(seg_buffer), parent_pos=segment.pos_marker ), pos_marker=segment.pos_marker, # Pass through any additional kwargs **{k: getattr(segment, k) for k in segment.additional_kwargs}, ) except AssertionError as err: # pragma: no cover # An AssertionError on creating a new segment is likely a whitespace # check fail. If possible add information about the fixes we tried to # apply, before re-raising. # NOTE: only available in python 3.11+. if hasattr(err, "add_note"): err.add_note(f" After applying fixes: {fixes_applied}.") raise err # Only validate if there's a match_grammar. Otherwise we may get # strange results (for example with the BracketedSegment). if requires_validate and hasattr(new_seg, "match_grammar"): validated = new_seg.validate_segment_with_reparse(dialect) else: validated = not requires_validate # Return the new segment and any non-code that needs to bubble up # the tree. # NOTE: We pass on whether this segment has been validated. It's # very possible that our parsing here may fail depending on the # type of segment that has been replaced, but if not we rely on # a parent segment still being valid. If we get all the way up # to the root and it's still not valid - that's a problem. return new_seg, before, after, validated def _iter_source_fix_patches( segment: BaseSegment, templated_file: TemplatedFile ) -> Iterator[FixPatch]: """Yield any source patches as fixes now. NOTE: This yields source fixes for the segment and any of its children, so it's important to call it at the right point in the recursion to avoid yielding duplicates. """ for source_fix in segment.source_fixes: yield FixPatch( source_fix.templated_slice, source_fix.edit, patch_category="source", source_slice=source_fix.source_slice, templated_str=templated_file.templated_str[source_fix.templated_slice], source_str=templated_file.source_str[source_fix.source_slice], ) def iter_patches( segment: BaseSegment, templated_file: TemplatedFile ) -> Iterator[FixPatch]: """Iterate through the segments generating fix patches. The patches are generated in TEMPLATED space. This is important so that we defer dealing with any loops until later. At this stage everything *should* happen in templated order. Occasionally we have an insertion around a placeholder, so we also return a hint to deal with that. """ # Does it match? If so we can ignore it. assert segment.pos_marker templated_raw = templated_file.templated_str[segment.pos_marker.templated_slice] matches = segment.raw == templated_raw if matches: # First yield any source fixes yield from _iter_source_fix_patches(segment, templated_file) # Then return. return # If we're here, the segment doesn't match the original. linter_logger.debug( "# Changed Segment Found: %s at %s: Original: [%r] Fixed: [%r]", type(segment).__name__, segment.pos_marker.templated_slice, templated_raw, segment.raw, ) # If it's all literal, then we don't need to recurse. if segment.pos_marker.is_literal(): # First yield any source fixes yield from _iter_source_fix_patches(segment, templated_file) # Then yield the position in the source file and the patch yield FixPatch( source_slice=segment.pos_marker.source_slice, templated_slice=segment.pos_marker.templated_slice, patch_category="literal", fixed_raw=segment.raw, templated_str=templated_file.templated_str[ segment.pos_marker.templated_slice ], source_str=templated_file.source_str[segment.pos_marker.source_slice], ) # Can we go deeper? elif not segment.segments: # It's not literal, but it's also a raw segment. If we're going # to yield a change, we would have done it from the parent, so # we just abort from here. return # pragma: no cover TODO? else: # This segment isn't a literal, but has changed, we need to go deeper. # If there's an end of file segment or indent, ignore them just for the # purposes of patch iteration. # NOTE: This doesn't mutate the underlying `self.segments`. segments = segment.segments while segments and segments[-1].is_type("end_of_file", "indent"): segments = segments[:-1] # Iterate through the child segments source_idx = segment.pos_marker.source_slice.start templated_idx = segment.pos_marker.templated_slice.start insert_buff = "" for seg in segments: # First check for insertions. # At this stage, everything should have a position. assert seg.pos_marker # We know it's an insertion if it has length but not in the templated # file. if seg.raw and seg.pos_marker.is_point(): # Add it to the insertion buffer if it has length: if seg.raw: insert_buff += seg.raw linter_logger.debug( "Appending insertion buffer. %r @idx: %s", insert_buff, templated_idx, ) continue # If we get here, then we know it's an original. Check for deletions at # the point before this segment (vs the TEMPLATED). # Deletions in this sense could also mean source consumption. start_diff = seg.pos_marker.templated_slice.start - templated_idx # Check to see whether there's a discontinuity before the current # segment if start_diff > 0 or insert_buff: # If we have an insert buffer, then it's an edit, otherwise a # deletion. # For the start of the next segment, we need the position of the # first raw, not the pos marker of the whole thing. That accounts # better for loops. first_segment_pos = seg.raw_segments[0].pos_marker yield FixPatch( # Whether the source slice is zero depends on the start_diff. # A non-zero start diff implies a deletion, or more likely # a consumed element of the source. We can use the tracking # markers from the last segment to recreate where this element # should be inserted in both source and template. source_slice=slice( source_idx, first_segment_pos.source_slice.start, ), templated_slice=slice( templated_idx, first_segment_pos.templated_slice.start, ), patch_category="mid_point", fixed_raw=insert_buff, templated_str="", source_str="", ) insert_buff = "" # Now we deal with any changes *within* the segment itself. yield from iter_patches(seg, templated_file=templated_file) # Once we've dealt with any patches from the segment, update # our position markers. source_idx = seg.pos_marker.source_slice.stop templated_idx = seg.pos_marker.templated_slice.stop # After the loop, we check whether there's a trailing deletion # or insert. Also valid if we still have an insertion buffer here. end_diff = segment.pos_marker.templated_slice.stop - templated_idx if end_diff or insert_buff: source_slice = slice( source_idx, segment.pos_marker.source_slice.stop, ) templated_slice = slice( templated_idx, segment.pos_marker.templated_slice.stop, ) # We determine the source_slice directly rather than # inferring it so that we can be very specific that # we ensure that fixes adjacent to source-only slices # (e.g. {% endif %}) are placed appropriately relative # to source-only slices. yield FixPatch( source_slice=source_slice, templated_slice=templated_slice, patch_category="end_point", fixed_raw=insert_buff, templated_str=templated_file.templated_str[templated_slice], source_str=templated_file.source_str[source_slice], ) sqlfluff-2.3.5/src/sqlfluff/core/rules/loader.py000066400000000000000000000030641451700765000216660ustar00rootroot00000000000000"""Methods to load rules.""" import os from glob import glob from importlib import import_module from typing import TYPE_CHECKING, List, Type if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.rules.base import BaseRule def get_rules_from_path( # All rule files are expected in the format of L*.py rules_path: str = os.path.abspath( os.path.join(os.path.dirname(__file__), "../../rules", "L*.py") ), base_module: str = "sqlfluff.rules", ) -> List[Type["BaseRule"]]: """Reads all of the Rule classes from a path into a list.""" # Create a rules dictionary for importing in # sqlfluff/src/sqlfluff/core/rules/__init__.py rules = [] for module in sorted(glob(rules_path)): # Manipulate the module path to extract the filename without the .py rule_id = os.path.splitext(os.path.basename(module))[0] # All rule classes are expected in the format of Rule_L* rule_class_name = f"Rule_{rule_id}" # NOTE: We import the module outside of the try clause to # properly catch any import errors. rule_module = import_module(f"{base_module}.{rule_id}") try: rule_class = getattr(rule_module, rule_class_name) except AttributeError as e: raise AttributeError( "Rule classes must be named in the format of Rule_*. " f"[{rule_class_name}]" ) from e # Add the rules to the rules dictionary for # sqlfluff/src/sqlfluff/core/rules/__init__.py rules.append(rule_class) return rules sqlfluff-2.3.5/src/sqlfluff/core/rules/noqa.py000066400000000000000000000323471451700765000213640ustar00rootroot00000000000000"""Defines container classes for handling noqa comments.""" import fnmatch import logging from dataclasses import dataclass from typing import Dict, List, Optional, Set, Tuple, cast from sqlfluff.core.errors import SQLBaseError, SQLParseError, SQLUnusedNoQaWarning from sqlfluff.core.parser import BaseSegment, RawSegment, RegexLexer # Instantiate the linter logger linter_logger: logging.Logger = logging.getLogger("sqlfluff.linter") @dataclass class NoQaDirective: """Parsed version of a 'noqa' comment.""" line_no: int # Source line number line_pos: int # Source line position rules: Optional[Tuple[str, ...]] # Affected rule names action: Optional[str] # "enable", "disable", or "None" raw_str: str = "" # The raw representation of the directive for warnings. used: bool = False # Has it been used. def _filter_violations_single_line( self, violations: List[SQLBaseError] ) -> List[SQLBaseError]: """Filter a list of violations based on this single line noqa. Also record whether this class was _used_ in any of that filtering. The "ignore" list is assumed to ONLY contain NoQaDirectives with action=None. """ assert not self.action matched_violations = [ v for v in violations if ( v.line_no == self.line_no and (self.rules is None or v.rule_code() in self.rules) ) ] if matched_violations: # Successful match, mark ignore as used. self.used = True return [v for v in violations if v not in matched_violations] else: return violations class IgnoreMask: """Structure to hold a set of 'noqa' directives.""" def __init__(self, ignores: List[NoQaDirective]): self._ignore_list = ignores def __repr__(self): # pragma: no cover return "" # ### Construction class methods. @staticmethod def _parse_noqa( comment: str, line_no: int, line_pos: int, reference_map: Dict[str, Set[str]], ): """Extract ignore mask entries from a comment string.""" # Also trim any whitespace afterward # Comment lines can also have noqa e.g. # --dafhsdkfwdiruweksdkjdaffldfsdlfjksd -- noqa: LT05 # Therefore extract last possible inline ignore. comment = [c.strip() for c in comment.split("--")][-1] if comment.startswith("noqa"): # This is an ignore identifier comment_remainder = comment[4:] if comment_remainder: if not comment_remainder.startswith(":"): return SQLParseError( "Malformed 'noqa' section. Expected 'noqa: [,...]", line_no=line_no, ) comment_remainder = comment_remainder[1:].strip() if comment_remainder: action: Optional[str] if "=" in comment_remainder: action, rule_part = comment_remainder.split("=", 1) if action not in {"disable", "enable"}: # pragma: no cover return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=[,...] | all' " "or 'noqa: disable=[,...] | all", line_no=line_no, ) else: action = None rule_part = comment_remainder if rule_part in {"disable", "enable"}: return SQLParseError( "Malformed 'noqa' section. " "Expected 'noqa: enable=[,...] | all' " "or 'noqa: disable=[,...] | all", line_no=line_no, ) rules: Optional[Tuple[str, ...]] if rule_part != "all": # Rules can be globs therefore we compare to the rule_set to # expand the globs. unexpanded_rules = tuple( r.strip() for r in rule_part.split(",") ) # We use a set to do natural deduplication. expanded_rules: Set[str] = set() for r in unexpanded_rules: matched = False for expanded in ( reference_map[x] for x in fnmatch.filter(reference_map.keys(), r) ): expanded_rules |= expanded matched = True if not matched: # We were unable to expand the glob. # Therefore assume the user is referencing # a special error type (e.g. PRS, LXR, or TMP) # and add this to the list of rules to ignore. expanded_rules.add(r) # Sort for consistency rules = tuple(sorted(expanded_rules)) else: rules = None return NoQaDirective(line_no, line_pos, rules, action, comment) return NoQaDirective(line_no, line_pos, None, None, comment) return None @classmethod def _extract_ignore_from_comment( cls, comment: RawSegment, reference_map: Dict[str, Set[str]], ): """Extract ignore mask entries from a comment segment.""" # Also trim any whitespace comment_content = comment.raw_trimmed().strip() # If we have leading or trailing block comment markers, also strip them. # NOTE: We need to strip block comment markers from the start # to ensure that noqa directives in the following form are followed: # /* noqa: disable=all */ if comment_content.endswith("*/"): comment_content = comment_content[:-2].rstrip() if comment_content.startswith("/*"): comment_content = comment_content[2:].lstrip() comment_line, comment_pos = comment.pos_marker.source_position() result = cls._parse_noqa( comment_content, comment_line, comment_pos, reference_map ) if isinstance(result, SQLParseError): result.segment = comment return result @classmethod def from_tree( cls, tree: BaseSegment, reference_map: Dict[str, Set[str]], ) -> Tuple["IgnoreMask", List[SQLBaseError]]: """Look for inline ignore comments and return NoQaDirectives.""" ignore_buff: List[NoQaDirective] = [] violations: List[SQLBaseError] = [] for comment in tree.recursive_crawl("comment"): if comment.is_type("inline_comment", "block_comment"): ignore_entry = cls._extract_ignore_from_comment( cast(RawSegment, comment), reference_map ) if isinstance(ignore_entry, SQLParseError): violations.append(ignore_entry) elif ignore_entry: ignore_buff.append(ignore_entry) if ignore_buff: linter_logger.info("Parsed noqa directives from file: %r", ignore_buff) return cls(ignore_buff), violations @classmethod def from_source( cls, source: str, inline_comment_regex: RegexLexer, reference_map: Dict[str, Set[str]], ) -> Tuple["IgnoreMask", List[SQLBaseError]]: """Look for inline ignore comments and return NoQaDirectives. Very similar to .from_tree(), but can be run on raw source (i.e. does not require the code to have parsed successfully). """ ignore_buff: List[NoQaDirective] = [] violations: List[SQLBaseError] = [] for idx, line in enumerate(source.split("\n")): match = inline_comment_regex.search(line) if line else None if match: ignore_entry = cls._parse_noqa( line[match[0] : match[1]], idx + 1, match[0], reference_map ) if isinstance(ignore_entry, SQLParseError): violations.append(ignore_entry) # pragma: no cover elif ignore_entry: ignore_buff.append(ignore_entry) if ignore_buff: linter_logger.info("Parsed noqa directives from file: %r", ignore_buff) return cls(ignore_buff), violations # ### Application methods. @staticmethod def _ignore_masked_violations_single_line( violations: List[SQLBaseError], ignore_mask: List[NoQaDirective] ): """Filter a list of violations based on this single line noqa. The "ignore" list is assumed to ONLY contain NoQaDirectives with action=None. """ for ignore in ignore_mask: violations = ignore._filter_violations_single_line(violations) return violations @staticmethod def _should_ignore_violation_line_range( line_no: int, ignore_rules: List[NoQaDirective] ) -> Tuple[bool, Optional[NoQaDirective]]: """Returns whether to ignore a violation at line_no. Loop through the NoQaDirectives to find the state of things at line_no. Assumptions about "ignore_rules": - Contains directives for only ONE RULE, i.e. the rule that was violated at line_no - Sorted in ascending order by line number """ ignore = False last_ignore = None for idx, ignore_rule in enumerate(ignore_rules): if ignore_rule.line_no > line_no: # Peak at the next rule to see if it's a matching disable # and if it is, then mark it as used. if ignore_rule.action == "enable": # Mark as used ignore_rule.used = True break if ignore_rule.action == "enable": # First, if this enable did counteract a # corresponding _disable_, then it has been _used_. if last_ignore: ignore_rule.used = True last_ignore = None ignore = False elif ignore_rule.action == "disable": last_ignore = ignore_rule ignore = True return ignore, last_ignore @classmethod def _ignore_masked_violations_line_range( cls, violations: List[SQLBaseError], ignore_mask: List[NoQaDirective] ): """Returns whether to ignore error for line-range directives. The "ignore" list is assumed to ONLY contain NoQaDirectives where action is "enable" or "disable". """ result = [] for v in violations: # Find the directives that affect the violated rule "v", either # because they specifically reference it or because they don't # specify a list of rules, thus affecting ALL rules. ignore_rule = sorted( ( ignore for ignore in ignore_mask if not ignore.rules or (v.rule_code() in cast(Tuple[str, ...], ignore.rules)) ), key=lambda ignore: ignore.line_no, ) # Determine whether to ignore the violation, based on the relevant # enable/disable directives. ignore, last_ignore = cls._should_ignore_violation_line_range( v.line_no, ignore_rule ) if not ignore: result.append(v) # If there was a previous ignore which mean that we filtered out # a violation, then mark it as used. elif last_ignore: last_ignore.used = True return result def ignore_masked_violations( self, violations: List[SQLBaseError] ) -> List[SQLBaseError]: """Remove any violations specified by ignore_mask. This involves two steps: 1. Filter out violations affected by single-line "noqa" directives. 2. Filter out violations affected by disable/enable "noqa" directives. """ ignore_specific = [ignore for ignore in self._ignore_list if not ignore.action] ignore_range = [ignore for ignore in self._ignore_list if ignore.action] violations = self._ignore_masked_violations_single_line( violations, ignore_specific ) violations = self._ignore_masked_violations_line_range(violations, ignore_range) return violations def generate_warnings_for_unused(self) -> List[SQLBaseError]: """Generates warnings for any unused NoQaDirectives.""" return [ SQLUnusedNoQaWarning( line_no=ignore.line_no, line_pos=ignore.line_pos, description=f"Unused noqa: {ignore.raw_str!r}", ) for ignore in self._ignore_list if not ignore.used ] sqlfluff-2.3.5/src/sqlfluff/core/rules/reference.py000066400000000000000000000021411451700765000223510ustar00rootroot00000000000000"""Components for working with object and table references.""" from typing import Sequence, Tuple def object_ref_matches_table( possible_references: Sequence[Tuple[str, ...]], targets: Sequence[Tuple[str, ...]] ) -> bool: """Return True if any of the possible references matches a target.""" # Simple case: If there are no references, assume okay # (i.e. no mismatch = good). if not possible_references: return True # Simple case: Reference exactly matches a target. if any(pr in targets for pr in possible_references): return True # Tricky case: If one is shorter than the other, check for a suffix match. # (Note this is an "optimistic" check, i.e. it assumes the ignored parts of # the target don't matter. In a SQL context, this is basically assuming # there was an earlier "USE <>" or similar directive. for pr in possible_references: for t in targets: if (len(pr) < len(t) and pr == t[-len(pr) :]) or ( len(t) < len(pr) and t == pr[-len(t) :] ): return True return False sqlfluff-2.3.5/src/sqlfluff/core/templaters/000077500000000000000000000000001451700765000210715ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/templaters/__init__.py000066400000000000000000000015341451700765000232050ustar00rootroot00000000000000"""Templater Code.""" from typing import Iterator, Type # Although these shouldn't usually be instantiated from here # we import them to make sure they get registered. from sqlfluff.core.templaters.base import RawFileSlice, RawTemplater, TemplatedFile from sqlfluff.core.templaters.jinja import JinjaTemplater from sqlfluff.core.templaters.placeholder import PlaceholderTemplater from sqlfluff.core.templaters.python import PythonTemplater def core_templaters() -> Iterator[Type[RawTemplater]]: """Returns the templater tuples for the core templaters.""" yield from [ RawTemplater, JinjaTemplater, PythonTemplater, PlaceholderTemplater, ] __all__ = ( "RawFileSlice", "TemplatedFile", "RawTemplater", "JinjaTemplater", "PythonTemplater", "PlaceholderTemplater", "core_templaters", ) sqlfluff-2.3.5/src/sqlfluff/core/templaters/base.py000066400000000000000000000526071451700765000223670ustar00rootroot00000000000000"""Defines the templaters.""" import logging from bisect import bisect_left from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Optional, Tuple from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLFluffSkipFile from sqlfluff.core.helpers.slice import zero_slice # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") def iter_indices_of_newlines(raw_str: str) -> Iterator[int]: """Find the indices of all newlines in a string.""" init_idx = -1 while True: nl_pos = raw_str.find("\n", init_idx + 1) if nl_pos >= 0: yield nl_pos init_idx = nl_pos else: break # pragma: no cover TODO? def large_file_check(func): """Raise an exception if the file is over a defined size. Designed to be implemented as a decorator on `.process()` methods. If no config is provided or the relevant config value is set to zero then the check is skipped. """ def _wrapped( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, **kwargs ): if config: limit = config.get("large_file_skip_char_limit") if limit: templater_logger.warning( "The config value large_file_skip_char_limit was found set. " "This feature will be removed in a future release, please " "use the more efficient 'large_file_skip_byte_limit' instead." ) if limit and len(in_str) > limit: raise SQLFluffSkipFile( f"Length of file {fname!r} is over {limit} characters. " "Skipping to avoid parser lock. Users can increase this limit " "in their config by setting the 'large_file_skip_char_limit' " "value, or disable by setting it to zero." ) return func(self, in_str=in_str, fname=fname, config=config, **kwargs) return _wrapped class RawFileSlice(NamedTuple): """A slice referring to a raw file.""" raw: str # Source string slice_type: str source_idx: int # Offset from beginning of source string # Block index, incremented on start or end block tags, e.g. "if", "for". # This is used in `BaseRule.discard_unsafe_fixes()` to reject any fixes # which span multiple templated blocks. block_idx: int = 0 # The command of a templated tag, e.g. "if", "for" # This is used in template tracing as a kind of cache to identify the kind # of template element this is without having to re-extract it each time. tag: Optional[str] = None def end_source_idx(self) -> int: """Return the closing index of this slice.""" return self.source_idx + len(self.raw) def source_slice(self) -> slice: """Return a slice object for this slice.""" return slice(self.source_idx, self.end_source_idx()) def is_source_only_slice(self) -> bool: """Based on its slice_type, does it only appear in the *source*? There are some slice types which are automatically source only. There are *also* some which are source only because they render to an empty string. """ # TODO: should any new logic go here? return self.slice_type in ("comment", "block_end", "block_start", "block_mid") class TemplatedFileSlice(NamedTuple): """A slice referring to a templated file.""" slice_type: str source_slice: slice templated_slice: slice class RawSliceBlockInfo(NamedTuple): """Template-related info about the raw slices in a TemplateFile.""" # Given a raw file slace, return its block ID. Useful for identifying # regions of a file with respect to template control structures (for, if). block_ids: Dict[RawFileSlice, int] # List of block IDs that have the following characteristics: # - Loop body # - Containing only literals (no templating) literal_only_loops: List[int] class TemplatedFile: """A templated SQL file. This is the response of a templaters .process() method and contains both references to the original file and also the capability to split up that file when lexing. """ def __init__( self, source_str: str, fname: str, templated_str: Optional[str] = None, sliced_file: Optional[List[TemplatedFileSlice]] = None, raw_sliced: Optional[List[RawFileSlice]] = None, ): """Initialise the TemplatedFile. If no templated_str is provided then we assume that the file is NOT templated and that the templated view is the same as the source view. Args: source_str (str): The source string. fname (str): The file name. templated_str (Optional[str], optional): The templated string. Defaults to None. sliced_file (Optional[List[TemplatedFileSlice]], optional): The sliced file. Defaults to None. raw_sliced (Optional[List[RawFileSlice]], optional): The raw sliced file. Defaults to None. """ self.source_str = source_str # An empty string is still allowed as the templated string. self.templated_str = source_str if templated_str is None else templated_str # If no fname, we assume this is from a string or stdin. self.fname = fname # Assume that no sliced_file, means the file is not templated self.sliced_file: List[TemplatedFileSlice] if sliced_file is None: if self.templated_str != self.source_str: # pragma: no cover raise ValueError("Cannot instantiate a templated file unsliced!") # If we get here and we don't have sliced files, # then it's raw, so create them. self.sliced_file = [ TemplatedFileSlice( "literal", slice(0, len(source_str)), slice(0, len(source_str)) ) ] assert ( raw_sliced is None ), "Templated file was not sliced, but not has raw slices." self.raw_sliced: List[RawFileSlice] = [ RawFileSlice(source_str, "literal", 0) ] else: self.sliced_file = sliced_file assert raw_sliced is not None, "Templated file was sliced, but not raw." self.raw_sliced = raw_sliced # Precalculate newlines, character positions. self._source_newlines = list(iter_indices_of_newlines(self.source_str)) self._templated_newlines = list(iter_indices_of_newlines(self.templated_str)) # Consistency check raw string and slices. pos = 0 rfs: RawFileSlice for rfs in self.raw_sliced: assert rfs.source_idx == pos, ( "TemplatedFile. Consistency fail on running source length" f": {pos} != {rfs.source_idx}" ) pos += len(rfs.raw) assert pos == len(self.source_str), ( "TemplatedFile. Consistency fail on total source length" f": {pos} != {len(self.source_str)}" ) # Consistency check templated string and slices. previous_slice = None tfs: Optional[TemplatedFileSlice] = None for tfs in self.sliced_file: if previous_slice: if tfs.templated_slice.start != previous_slice.templated_slice.stop: raise SQLFluffSkipFile( # pragma: no cover "Templated slices found to be non-contiguous. " f"{tfs.templated_slice} (starting" f" {self.templated_str[tfs.templated_slice]!r})" f" does not follow {previous_slice.templated_slice} " "(starting " f"{self.templated_str[previous_slice.templated_slice]!r}" ")" ) else: if tfs.templated_slice.start != 0: raise SQLFluffSkipFile( # pragma: no cover "First Templated slice not started at index 0 " f"(found slice {tfs.templated_slice})" ) previous_slice = tfs if self.sliced_file and templated_str is not None: if tfs.templated_slice.stop != len(templated_str): raise SQLFluffSkipFile( # pragma: no cover "Length of templated file mismatch with final slice: " f"{len(templated_str)} != {tfs.templated_slice.stop}." ) @classmethod def from_string(cls, raw: str) -> "TemplatedFile": """Create TemplatedFile from a string.""" return cls(source_str=raw, fname="") def __repr__(self) -> str: # pragma: no cover TODO? """Return a string representation of the 'TemplatedFile' object.""" return "" def __str__(self) -> str: """Return the templated file if coerced to string.""" return self.templated_str def get_line_pos_of_char_pos( self, char_pos: int, source: bool = True ) -> Tuple[int, int]: """Get the line number and position of a point in the source file. Args: char_pos: The character position in the relevant file. source: Are we checking the source file (as opposed to the templated file) Returns: line_number, line_position """ if source: ref_str = self._source_newlines else: ref_str = self._templated_newlines nl_idx = bisect_left(ref_str, char_pos) if nl_idx > 0: return nl_idx + 1, char_pos - ref_str[nl_idx - 1] else: # NB: line_pos is char_pos+1 because character position is 0-indexed, # but the line position is 1-indexed. return 1, char_pos + 1 def _find_slice_indices_of_templated_pos( self, templated_pos: int, start_idx: Optional[int] = None, inclusive: bool = True, ) -> Tuple[int, int]: """Find a subset of the sliced file which touch this point. NB: the last_idx is exclusive, as the intent is to use this as a slice. """ start_idx = start_idx or 0 first_idx = None last_idx = start_idx # Work through the sliced file, starting at the start_idx if given # as an optimisation hint. The sliced_file is a list of TemplatedFileSlice # which reference parts of the templated file and where they exist in the # source. for idx, elem in enumerate(self.sliced_file[start_idx:]): last_idx = idx + start_idx if elem[2].stop >= templated_pos: if first_idx is None: first_idx = idx + start_idx if elem[2].start > templated_pos: break elif not inclusive and elem[2].start >= templated_pos: break # If we got to the end add another index else: last_idx += 1 if first_idx is None: # pragma: no cover raise ValueError("Position Not Found") return first_idx, last_idx def raw_slices_spanning_source_slice( self, source_slice: slice ) -> List[RawFileSlice]: """Return a list of the raw slices spanning a set of indices.""" # Special case: The source_slice is at the end of the file. last_raw_slice = self.raw_sliced[-1] if source_slice.start >= last_raw_slice.source_idx + len(last_raw_slice.raw): return [] # First find the start index raw_slice_idx = 0 # Move the raw pointer forward to the start of this patch while ( raw_slice_idx + 1 < len(self.raw_sliced) and self.raw_sliced[raw_slice_idx + 1].source_idx <= source_slice.start ): raw_slice_idx += 1 # Find slice index of the end of this patch. slice_span = 1 while ( raw_slice_idx + slice_span < len(self.raw_sliced) and self.raw_sliced[raw_slice_idx + slice_span].source_idx < source_slice.stop ): slice_span += 1 # Return the raw slices: return self.raw_sliced[raw_slice_idx : raw_slice_idx + slice_span] def templated_slice_to_source_slice( self, template_slice: slice, ) -> slice: """Convert a template slice to a source slice.""" if not self.sliced_file: return template_slice # pragma: no cover TODO? ts_start_sf_start, ts_start_sf_stop = self._find_slice_indices_of_templated_pos( template_slice.start ) ts_start_subsliced_file = self.sliced_file[ts_start_sf_start:ts_start_sf_stop] # Work out the insertion point insertion_point = -1 for elem in ts_start_subsliced_file: # Do slice starts and ends: for slice_elem in ("start", "stop"): if getattr(elem[2], slice_elem) == template_slice.start: # Store the lowest. point = getattr(elem[1], slice_elem) if insertion_point < 0 or point < insertion_point: insertion_point = point # We don't break here, because we might find ANOTHER # later which is actually earlier. # Zero length slice. if template_slice.start == template_slice.stop: # Is it on a join? if insertion_point >= 0: return zero_slice(insertion_point) # It's within a segment. else: if ( ts_start_subsliced_file and ts_start_subsliced_file[0][0] == "literal" ): offset = template_slice.start - ts_start_subsliced_file[0][2].start return zero_slice( ts_start_subsliced_file[0][1].start + offset, ) else: raise ValueError( # pragma: no cover "Attempting a single length slice within a templated section! " f"{template_slice} within {ts_start_subsliced_file}." ) # Otherwise it's a slice with length. # Use a non inclusive match to get the end point. ts_stop_sf_start, ts_stop_sf_stop = self._find_slice_indices_of_templated_pos( template_slice.stop, inclusive=False ) # Update starting position based on insertion point: if insertion_point >= 0: for elem in self.sliced_file[ts_start_sf_start:]: if elem[1].start != insertion_point: ts_start_sf_start += 1 else: break subslices = self.sliced_file[ # Very inclusive slice min(ts_start_sf_start, ts_stop_sf_start) : max( ts_start_sf_stop, ts_stop_sf_stop ) ] if ts_start_sf_start == ts_start_sf_stop: if ts_start_sf_start > len(self.sliced_file): # pragma: no cover # We should never get here raise ValueError("Starting position higher than sliced file position") if ts_start_sf_start < len(self.sliced_file): # pragma: no cover return self.sliced_file[1].source_slice else: return self.sliced_file[-1].source_slice # pragma: no cover else: start_slices = self.sliced_file[ts_start_sf_start:ts_start_sf_stop] if ts_stop_sf_start == ts_stop_sf_stop: # pragma: no cover TODO? stop_slices = [self.sliced_file[ts_stop_sf_start]] else: stop_slices = self.sliced_file[ts_stop_sf_start:ts_stop_sf_stop] # if it's a literal segment then we can get the exact position # otherwise we're greedy. # Start. if insertion_point >= 0: source_start = insertion_point elif start_slices[0][0] == "literal": offset = template_slice.start - start_slices[0][2].start source_start = start_slices[0][1].start + offset else: source_start = start_slices[0][1].start # Stop. if stop_slices[-1][0] == "literal": offset = stop_slices[-1][2].stop - template_slice.stop source_stop = stop_slices[-1][1].stop - offset else: source_stop = stop_slices[-1][1].stop # Does this slice go backward? if source_start > source_stop: # If this happens, it's because one was templated and # the other isn't, or because a loop means that the segments # are in a different order. # Take the widest possible span in this case. source_start = min(elem[1].start for elem in subslices) source_stop = max(elem[1].stop for elem in subslices) source_slice = slice(source_start, source_stop) return source_slice def is_source_slice_literal(self, source_slice: slice) -> bool: """Work out whether a slice of the source file is a literal or not.""" # No sliced file? Everything is literal if not self.raw_sliced: # pragma: no cover TODO? return True # Zero length slice. It's a literal, because it's definitely not templated. if source_slice.start == source_slice.stop: return True is_literal = True for raw_slice in self.raw_sliced: # Reset if we find a literal and we're up to the start # otherwise set false. if raw_slice.source_idx <= source_slice.start: is_literal = raw_slice.slice_type == "literal" elif raw_slice.source_idx >= source_slice.stop: # We've gone past the end. Break and Return. break else: # We're in the middle. Check type if raw_slice.slice_type != "literal": is_literal = False return is_literal def source_only_slices(self) -> List[RawFileSlice]: """Return a list a slices which reference the parts only in the source. All of these slices should be expected to have zero-length in the templated file. The results are NECESSARILY sorted. """ ret_buff = [] for elem in self.raw_sliced: if elem.is_source_only_slice(): ret_buff.append(elem) return ret_buff class RawTemplater: """A templater which does nothing. This also acts as the base templating class. """ name = "raw" templater_selector = "templater" def __init__(self, **kwargs: Dict[str, Any]) -> None: """Placeholder init function. Here we should load any initial config found in the root directory. The init function shouldn't take any arguments at this stage as we assume that it will load its own config. Maybe at this stage we might allow override parameters to be passed to the linter at runtime from the cli - that would be the only time we would pass arguments in here. """ def sequence_files( self, fnames: List[str], config: Optional[FluffConfig] = None, formatter=None ) -> Iterable[str]: """Given files to be processed, return a valid processing sequence.""" # Default is to process in the original order. return fnames @large_file_check def process( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter=None, ) -> Tuple[Optional[TemplatedFile], List]: """Process a string and return a TemplatedFile. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (:obj:`str`): The input string. fname (:obj:`str`, optional): The filename of this string. This is mostly for loading config files at runtime. config (:obj:`FluffConfig`): A specific config to use for this templating operation. Only necessary for some templaters. formatter (:obj:`CallbackFormatter`): Optional object for output. """ return TemplatedFile(in_str, fname=fname), [] @large_file_check def process_with_variants( self, *, in_str: str, fname: str, config=None, formatter=None ) -> Iterator[Tuple[Optional[TemplatedFile], List]]: """Extended version of `process` which returns multiple variants.""" raise NotImplementedError # pragma: no cover def __eq__(self, other: Any) -> bool: """Return true if `other` is of the same class as this one. NB: This is useful in comparing configs. """ return isinstance(other, self.__class__) def config_pairs(self) -> List[Tuple[str, str]]: """Returns info about the given templater for output by the cli. Returns: List[Tuple[str, str]]: A list of tuples containing information about the given templater. Each tuple contains two strings: the string 'templater' and the name of the templater. """ return [("templater", self.name)] sqlfluff-2.3.5/src/sqlfluff/core/templaters/jinja.py000066400000000000000000001201011451700765000225310ustar00rootroot00000000000000"""Defines the templaters.""" import copy import importlib import logging import os.path import pkgutil import sys from functools import reduce from typing import Callable, Dict, Generator, Iterator, List, Optional, Set, Tuple, cast import jinja2.nodes from jinja2 import ( Environment, FileSystemLoader, TemplateError, TemplateSyntaxError, meta, ) from jinja2.exceptions import TemplateNotFound, UndefinedError from jinja2.ext import Extension from jinja2.sandbox import SandboxedEnvironment from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLBaseError, SQLFluffUserError, SQLTemplaterError from sqlfluff.core.helpers.slice import is_zero_slice, slice_length from sqlfluff.core.templaters.base import ( RawFileSlice, TemplatedFile, TemplatedFileSlice, large_file_check, ) from sqlfluff.core.templaters.python import PythonTemplater from sqlfluff.core.templaters.slicers.tracer import JinjaAnalyzer, JinjaTrace # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") class JinjaTemplater(PythonTemplater): """A templater using the jinja2 library. See: https://jinja.palletsprojects.com/ """ name = "jinja" class Libraries: """Mock namespace for user-defined Jinja library.""" pass @staticmethod def _extract_macros_from_template(template, env, ctx): """Take a template string and extract any macros from it. Lovingly inspired by http://codyaray.com/2015/05/auto-load-jinja2-macros Raises: TemplateSyntaxError: If the macro we try to load has invalid syntax. We assume that outer functions will catch this exception and handle it appropriately. """ from jinja2.runtime import Macro # noqa # Iterate through keys exported from the loaded template string context = {} # NOTE: `env.from_string()` will raise TemplateSyntaxError if `template` # is invalid. macro_template = env.from_string(template, globals=ctx) # This is kind of low level and hacky but it works try: for k in macro_template.module.__dict__: attr = getattr(macro_template.module, k) # Is it a macro? If so install it at the name of the macro if isinstance(attr, Macro): context[k] = attr except UndefinedError: # This occurs if any file in the macro path references an # undefined Jinja variable. It's safe to ignore this. Any # meaningful issues will surface later at linting time. pass # Return the context return context @classmethod def _extract_macros_from_path( cls, path: List[str], env: Environment, ctx: Dict ) -> dict: """Take a path and extract macros from it. Args: path (List[str]): A list of paths. env (Environment): The environment object. ctx (Dict): The context dictionary. Returns: dict: A dictionary containing the extracted macros. Raises: ValueError: If a path does not exist. SQLTemplaterError: If there is an error in the Jinja macro file. """ macro_ctx = {} for path_entry in path: # Does it exist? It should as this check was done on config load. if not os.path.exists(path_entry): raise ValueError(f"Path does not exist: {path_entry}") if os.path.isfile(path_entry): # It's a file. Extract macros from it. with open(path_entry) as opened_file: template = opened_file.read() # Update the context with macros from the file. try: macro_ctx.update( cls._extract_macros_from_template(template, env=env, ctx=ctx) ) except TemplateSyntaxError as err: raise SQLTemplaterError( f"Error in Jinja macro file {os.path.relpath(path_entry)}: " f"{err.message}", line_no=err.lineno, line_pos=1, ) from err else: # It's a directory. Iterate through files in it and extract from them. for dirpath, _, files in os.walk(path_entry): for fname in files: if fname.endswith(".sql"): macro_ctx.update( cls._extract_macros_from_path( [os.path.join(dirpath, fname)], env=env, ctx=ctx ) ) return macro_ctx def _extract_macros_from_config(self, config, env, ctx): """Take a config and load any macros from it. Args: config: The config to extract macros from. env: The environment. ctx: The context. Returns: dict: A dictionary containing the extracted macros. """ if config: # This is now a nested section loaded_context = ( config.get_section((self.templater_selector, self.name, "macros")) or {} ) else: # pragma: no cover TODO? loaded_context = {} # Iterate to load macros macro_ctx = {} for value in loaded_context.values(): try: macro_ctx.update( self._extract_macros_from_template(value, env=env, ctx=ctx) ) except TemplateSyntaxError as err: raise SQLFluffUserError( f"Error loading user provided macro:\n`{value}`\n> {err}." ) return macro_ctx def _extract_libraries_from_config(self, config): """Extracts libraries from the given configuration. This function iterates over the modules in the library path and imports them dynamically. The imported modules are then added to a 'Libraries' object, which is returned as a dictionary excluding magic methods. Args: config: The configuration object. Returns: dict: A dictionary containing the extracted libraries. """ # If a more global library_path is set, let that take precedence. library_path = config.get("library_path") or config.get_section( (self.templater_selector, self.name, "library_path") ) if not library_path: return {} libraries = JinjaTemplater.Libraries() # If library_path has __init__.py we parse it as one module, else we parse it # a set of modules is_library_module = os.path.exists(os.path.join(library_path, "__init__.py")) library_module_name = os.path.basename(library_path) # Need to go one level up to parse as a module correctly walk_path = ( os.path.join(library_path, "..") if is_library_module else library_path ) for module_finder, module_name, _ in pkgutil.walk_packages([walk_path]): # skip other modules that can be near module_dir if is_library_module and not module_name.startswith(library_module_name): continue # import_module is deprecated as of python 3.4. This follows roughly # the guidance of the python docs: # https://docs.python.org/3/library/importlib.html#approximating-importlib-import-module spec = module_finder.find_spec(module_name) module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) if "." in module_name: # nested modules have `.` in module_name *module_path, last_module_name = module_name.split(".") # find parent module recursively parent_module = reduce( lambda res, path_part: getattr(res, path_part), module_path, libraries, ) # set attribute on module object to make jinja working correctly setattr(parent_module, last_module_name, module) else: # set attr on `libraries` obj to make it work in jinja nicely setattr(libraries, module_name, module) if is_library_module: # when library is module we have one more root module in hierarchy and we # remove it libraries = getattr(libraries, library_module_name) # remove magic methods from result return {k: v for k, v in libraries.__dict__.items() if not k.startswith("__")} @staticmethod def _generate_dbt_builtins(): """Generate the dbt builtins which are injected in the context.""" # This feels a bit wrong defining these here, they should probably # be configurable somewhere sensible. But for now they're not. # TODO: Come up with a better solution. class ThisEmulator: """A class which emulates the `this` class from dbt.""" name = "this_model" schema = "this_schema" database = "this_database" def __str__(self) -> str: # pragma: no cover TODO? return self.name dbt_builtins = { "ref": lambda model_ref: model_ref, "source": lambda source_name, table: f"{source_name}_{table}", "config": lambda **kwargs: "", "var": lambda variable, default="": "item", # `is_incremental()` renders as True, always in this case. # TODO: This means we'll never parse other parts of the query, # that are only reachable when `is_incremental()` returns False. # We should try to find a solution to that. Perhaps forcing the file # to be parsed TWICE if it uses this variable. "is_incremental": lambda: True, "this": ThisEmulator(), } return dbt_builtins @classmethod def _crawl_tree( cls, tree, variable_names, raw ) -> Generator[SQLTemplaterError, None, None]: """Crawl the tree looking for occurrences of the undeclared values.""" # First iterate through children for elem in tree.iter_child_nodes(): yield from cls._crawl_tree(elem, variable_names, raw) # Then assess self if ( isinstance(tree, jinja2.nodes.Name) and getattr(tree, "name") in variable_names ): line_no: int = getattr(tree, "lineno") tree_name: str = getattr(tree, "name") line = raw.split("\n")[line_no - 1] pos = line.index(tree_name) + 1 yield SQLTemplaterError( f"Undefined jinja template variable: {tree_name!r}", line_no=line_no, line_pos=pos, ) def _get_jinja_env(self, config=None): """Get a properly configured jinja environment. This method returns a properly configured jinja environment. It first checks if the 'ignore' key is present in the config dictionary and if it contains the value 'templating'. If so, it creates a subclass of FileSystemLoader called SafeFileSystemLoader that overrides the get_source method to handle missing templates when templating is ignored. If 'ignore' is not present or does not contain 'templating', it uses the regular FileSystemLoader. It then sets the extensions to ['jinja2.ext.do'] and adds the DBTTestExtension if the _apply_dbt_builtins method returns True. Finally, it returns a SandboxedEnvironment object with the specified settings. Args: config (dict, optional): A dictionary containing configuration settings. Returns: jinja2.Environment: A properly configured jinja environment. """ # We explicitly want to preserve newlines. macros_path = self._get_macros_path(config) ignore_templating = config and "templating" in config.get("ignore") if ignore_templating: class SafeFileSystemLoader(FileSystemLoader): def get_source(self, environment, name, *args, **kwargs): try: if not isinstance(name, DummyUndefined): return super().get_source( environment, name, *args, **kwargs ) raise TemplateNotFound(str(name)) except TemplateNotFound: # When ignore=templating is set, treat missing files # or attempts to load an "Undefined" file as the first # 'base' part of the name / filename rather than failing. templater_logger.debug( "Providing dummy contents for Jinja macro file: %s", name ) value = os.path.splitext(os.path.basename(str(name)))[0] return value, f"{value}.sql", lambda: False loader = SafeFileSystemLoader(macros_path or []) else: loader = FileSystemLoader(macros_path) if macros_path else None extensions = ["jinja2.ext.do"] if self._apply_dbt_builtins(config): extensions.append(DBTTestExtension) return SandboxedEnvironment( keep_trailing_newline=True, # The do extension allows the "do" directive autoescape=False, extensions=extensions, loader=loader, ) def _get_macros_path(self, config: FluffConfig) -> Optional[List[str]]: """Get the list of macros paths from the provided config object. This method searches for a config section specified by the templater_selector, name, and 'load_macros_from_path' keys. If the section is found, it retrieves the value associated with that section and splits it into a list of strings using a comma as the delimiter. The resulting list is stripped of whitespace and empty strings and returned. If the section is not found or the resulting list is empty, it returns None. Args: config (FluffConfig): The config object to search for the macros path section. Returns: Optional[List[str]]: The list of macros paths if found, None otherwise. """ if config: macros_path = config.get_section( (self.templater_selector, self.name, "load_macros_from_path") ) if macros_path: result = [s.strip() for s in macros_path.split(",") if s.strip()] if result: return result return None def _apply_dbt_builtins(self, config: FluffConfig) -> bool: """Check if dbt builtins should be applied from the provided config object. This method searches for a config section specified by the templater_selector, name, and 'apply_dbt_builtins' keys. If the section is found, it returns the value associated with that section. If the section is not found, it returns False. Args: config (FluffConfig): The config object to search for the apply_dbt_builtins section. Returns: bool: True if dbt builtins should be applied, False otherwise. """ if config: return config.get_section( (self.templater_selector, self.name, "apply_dbt_builtins") ) return False def get_context(self, fname=None, config=None, **kw) -> Dict: """Get the templating context from the config. Args: fname (str, optional): The name of the file. config (dict, optional): The configuration. **kw: Additional keyword arguments. Returns: dict: The templating context. """ # Load the context env = kw.pop("env") live_context = super().get_context(fname=fname, config=config) # Apply dbt builtin functions if we're allowed. if config: # first make libraries available in the context # so they can be used by the macros too libraries = self._extract_libraries_from_config(config=config) live_context.update(libraries) if libraries.get("SQLFLUFF_JINJA_FILTERS"): env.filters.update(libraries.get("SQLFLUFF_JINJA_FILTERS")) if self._apply_dbt_builtins(config): # This feels a bit wrong defining these here, they should probably # be configurable somewhere sensible. But for now they're not. # TODO: Come up with a better solution. dbt_builtins = self._generate_dbt_builtins() for name in dbt_builtins: # Only apply if it hasn't already been set at this stage. if name not in live_context: live_context[name] = dbt_builtins[name] # Load macros from path (if applicable) if config: macros_path = self._get_macros_path(config) if macros_path: live_context.update( self._extract_macros_from_path( macros_path, env=env, ctx=live_context ) ) # Load config macros, these will take precedence over macros from the path live_context.update( self._extract_macros_from_config( config=config, env=env, ctx=live_context ) ) return live_context def construct_render_func( self, fname=None, config=None ) -> Tuple[Environment, dict, Callable[[str], str]]: """Builds and returns objects needed to create and run templates. Args: fname (Optional[str]): The name of the file. config (Optional[dict]): The configuration settings. Returns: Tuple[Environment, dict, Callable[[str], str]]: A tuple containing the following: - env (Environment): An instance of the 'Environment' class. - live_context (dict): A dictionary containing the live context. - render_func (Callable[[str], str]): A callable function that is used to instantiate templates. """ # Load the context env = self._get_jinja_env(config) live_context = self.get_context(fname=fname, config=config, env=env) def render_func(in_str: str) -> str: """Used by JinjaTracer to instantiate templates. This function is a closure capturing internal state from process(). Note that creating templates involves quite a bit of state known to _this_ function but not to JinjaTracer. https://www.programiz.com/python-programming/closure """ # Load the template, passing the global context. try: template = env.from_string(in_str, globals=live_context) except TemplateSyntaxError as err: # pragma: no cover # Something in the template didn't parse, return the original # and a violation around what happened. # NOTE: Most parsing exceptions will be captured when we call # env.parse() in the .process() method. Hence this exception # handling should never be called. raise SQLTemplaterError( f"Failure to parse jinja template: {err}.", line_no=err.lineno, ) return template.render() return env, live_context, render_func @large_file_check def process( self, *, in_str: str, fname: str, config: Optional[FluffConfig] = None, formatter=None, ) -> Tuple[Optional[TemplatedFile], list]: """Process a string and return the new string. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (str): The input string. fname (str, optional): The filename of this string. This is mostly for loading config files at runtime. config (FluffConfig): A specific config to use for this templating operation. Only necessary for some templaters. formatter (CallbackFormatter): Optional object for output. Raises: ValueError: If the 'config' argument is not provided. Returns: Tuple[Optional[TemplatedFile], list]: A tuple containing the templated file and a list of violations. """ if not config: # pragma: no cover raise ValueError( "For the jinja templater, the `process()` method requires a config " "object." ) try: env, live_context, render_func = self.construct_render_func( fname=fname, config=config ) except SQLTemplaterError as err: return None, [err] violations: List[SQLBaseError] = [] # Attempt to identify any undeclared variables or syntax errors. # The majority of variables will be found during the _crawl_tree # step rather than this first Exception which serves only to catch # catastrophic errors. try: syntax_tree = env.parse(in_str) potentially_undefined_variables = meta.find_undeclared_variables( syntax_tree ) except Exception as err: unrendered_out = TemplatedFile( source_str=in_str, fname=fname, ) templater_error = SQLTemplaterError( "Failed to parse Jinja syntax. Correct the syntax or select an " "alternative templater." ) # Capture a line number if we can. if isinstance(err, TemplateSyntaxError): templater_error.line_no = err.lineno return unrendered_out, [templater_error] undefined_variables = set() class UndefinedRecorder: """Similar to jinja2.StrictUndefined, but remembers, not fails.""" # Tell Jinja this object is safe to call and does not alter data. # https://jinja.palletsprojects.com/en/2.9.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable unsafe_callable = False # https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable alters_data = False @classmethod def create(cls, name: str) -> "UndefinedRecorder": return UndefinedRecorder(name=name) def __init__(self, name: str) -> None: self.name = name def __str__(self) -> str: """Treat undefined vars as empty, but remember for later.""" undefined_variables.add(self.name) return "" def __getattr__(self, item) -> "UndefinedRecorder": undefined_variables.add(self.name) return UndefinedRecorder(f"{self.name}.{item}") def __call__(self, *args, **kwargs) -> "UndefinedRecorder": return UndefinedRecorder(f"{self.name}()") Undefined = ( UndefinedRecorder if "templating" not in config.get("ignore") else DummyUndefined ) for val in potentially_undefined_variables: if val not in live_context: live_context[val] = Undefined.create(val) # type: ignore try: # Slice the file once rendered. raw_sliced, sliced_file, out_str = self.slice_file( in_str, render_func=render_func, config=config, ) if undefined_variables: # Lets go through and find out where they are: for template_err_val in self._crawl_tree( syntax_tree, undefined_variables, in_str ): violations.append(template_err_val) return ( TemplatedFile( source_str=in_str, templated_str=out_str, fname=fname, sliced_file=sliced_file, raw_sliced=raw_sliced, ), violations, ) except (TemplateError, TypeError) as err: templater_logger.info("Unrecoverable Jinja Error: %s", err, exc_info=True) template_err: SQLBaseError = SQLTemplaterError( ( "Unrecoverable failure in Jinja templating: {}. Have you " "configured your variables? " "https://docs.sqlfluff.com/en/latest/configuration.html" ).format(err), # We don't have actual line number information, but specify # line 1 so users can ignore with "noqa" if they want. (The # default is line 0, which can't be ignored because it's not # a valid line number.) line_no=1, line_pos=1, ) violations.append(template_err) return None, violations def slice_file( self, raw_str: str, render_func: Callable[[str], str], config=None, **kwargs ) -> Tuple[List[RawFileSlice], List[TemplatedFileSlice], str]: """Slice the file to determine regions where we can fix. Args: raw_str (str): The raw string to be sliced. render_func (Callable[[str], str]): The rendering function to be used. config (optional): Optional configuration. **kwargs: Additional keyword arguments. Returns: Tuple[List[RawFileSlice], List[TemplatedFileSlice], str]: A tuple containing a list of raw file slices, a list of templated file slices, and the templated string. """ # The JinjaTracer slicing algorithm is more robust, but it requires # us to create and render a second template (not raw_str). templater_logger.info("Slicing File Template") templater_logger.debug(" Raw String: %r", raw_str[:80]) analyzer = JinjaAnalyzer(raw_str, self._get_jinja_env()) tracer = analyzer.analyze(render_func) trace = tracer.trace(append_to_templated=kwargs.pop("append_to_templated", "")) return trace.raw_sliced, trace.sliced_file, trace.templated_str def _handle_unreached_code( self, in_str: str, render_func: Callable[[str], str], uncovered_slices: Set[int], append_to_templated="", ): """Address uncovered slices by tweaking the template to hit them. Args: in_str (:obj:`str`): The raw source file. render_func (:obj:`callable`): The render func for the templater. uncovered_slices (:obj:`set` of :obj:`int`): Indices of slices in the raw file which are not rendered in the original rendering. These are the slices we'll attempt to hit by modifying the template. NOTE: These are indices in the _sequence of slices_, not _character indices_ in the raw source file. append_to_templated (:obj:`str`, optional): Optional string to append to the templated file. """ analyzer = JinjaAnalyzer(in_str, self._get_jinja_env()) tracer_copy = analyzer.analyze(render_func) max_variants_generated = 10 max_variants_returned = 5 variants: Dict[str, Tuple[int, JinjaTrace]] = {} # Create a mapping of the original source slices before modification so # we can adjust the positions post-modification. original_source_slices = { idx: raw_slice.source_slice() for idx, raw_slice in enumerate(tracer_copy.raw_sliced) } for uncovered_slice in sorted(uncovered_slices)[:max_variants_generated]: tracer_probe = copy.deepcopy(tracer_copy) tracer_trace = copy.deepcopy(tracer_copy) override_raw_slices = [] # Find a path that takes us to 'uncovered_slice'. choices = tracer_probe.move_to_slice(uncovered_slice, 0) for branch, options in choices.items(): tag = tracer_probe.raw_sliced[branch].tag if tag in ("if", "elif"): # Replace the existing "if" of "elif" expression with a new, # hardcoded value that hits the target slice in the template # (here that is options[0]). new_value = "True" if options[0] == branch + 1 else "False" tracer_trace.raw_slice_info[ tracer_probe.raw_sliced[branch] ].alternate_code = f"{{% {tag} {new_value} %}}" override_raw_slices.append(branch) # Render and analyze the template with the overrides. variant_key = tuple( cast(str, tracer_trace.raw_slice_info[rs].alternate_code) if idx in override_raw_slices and tracer_trace.raw_slice_info[rs].alternate_code is not None else rs.raw for idx, rs in enumerate(tracer_trace.raw_sliced) ) # In some cases (especially with nested if statements), we may # generate a variant that duplicates an existing variant. Skip # those. if variant_key not in variants: variant_raw_str = "".join(variant_key) analyzer = JinjaAnalyzer(variant_raw_str, self._get_jinja_env()) tracer_trace = analyzer.analyze(render_func) try: trace = tracer_trace.trace( append_to_templated=append_to_templated, ) except: # noqa: E722 # If we get an error tracing the variant, skip it. This may # happen for a variety of reasons. Basically there's no # guarantee that the variant will be valid Jinja. continue else: # Compute a score for the variant based on the size of initially # uncovered literal slices it hits. # NOTE: We need to map this back to the positions in the original # file, and only have the positions in the modified file here. # That means we go translate back via the slice index in raw file. # First, work out the literal positions in the modified file which # are now covered. _covered_source_positions = { tfs.source_slice.start for tfs in trace.sliced_file if tfs.slice_type == "literal" and not is_zero_slice(tfs.templated_slice) } # Second, convert these back into indices so we can use them to # refer to the unmodified source file. _covered_raw_slice_idxs = [ idx for idx, raw_slice in enumerate(trace.raw_sliced) if raw_slice.source_idx in _covered_source_positions ] score = sum( slice_length(original_source_slices[idx]) for idx in _covered_raw_slice_idxs if idx in uncovered_slices ) variants[variant_raw_str] = (score, trace) # Return the top-scoring variants. sorted_variants: List[Tuple[int, JinjaTrace]] = sorted( variants.values(), key=lambda v: v[0], reverse=True ) for _, trace in sorted_variants[:max_variants_returned]: # :TRICKY: Yield variants that _look like_ they were rendered from # the original template, but actually were rendered from a modified # template. This should ensure that lint issues and fixes for the # variants are handled correctly and can be combined with those from # the original template. # To do this we run through modified slices and adjust their source # slices to correspond with the original version. We do this by referencing # their slice position in the original file, because we know we haven't # changed the number or ordering of slices, just their length/content. adjusted_slices: List[TemplatedFileSlice] = [ tfs._replace(source_slice=original_source_slices[idx]) for idx, tfs in enumerate(trace.sliced_file) ] yield ( tracer_copy.raw_sliced, adjusted_slices, trace.templated_str, ) @large_file_check def process_with_variants( self, *, in_str: str, fname: str, config=None, formatter=None ) -> Iterator[Tuple[Optional[TemplatedFile], List]]: """Process a string and return one or more variant renderings. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (:obj:`str`): The input string. fname (:obj:`str`, optional): The filename of this string. This is mostly for loading config files at runtime. config (:obj:`FluffConfig`): A specific config to use for this templating operation. Only necessary for some templaters. formatter (:obj:`CallbackFormatter`): Optional object for output. """ templated_file, violations = self.process( in_str=in_str, fname=fname, config=config, formatter=formatter ) yield templated_file, violations if not templated_file: return # pragma: no cover # Find uncovered code (if any), tweak the template to hit that code. # First, identify the literals which _are_ covered. covered_literal_positions = { tfs.source_slice.start for tfs in templated_file.sliced_file # It's covered if it's rendered if not is_zero_slice(tfs.templated_slice) } templater_logger.debug( "Covered literal positions %s", covered_literal_positions ) uncovered_literal_idxs = { idx for idx, raw_slice in enumerate(templated_file.raw_sliced) if raw_slice.slice_type == "literal" and raw_slice.source_idx not in covered_literal_positions } templater_logger.debug( "Uncovered literals correspond to slices %s", uncovered_literal_idxs ) # NOTE: No validation required as all validation done in the `.process()` # call above. _, _, render_func = self.construct_render_func(fname=fname, config=config) for raw_sliced, sliced_file, templated_str in self._handle_unreached_code( in_str, render_func, uncovered_literal_idxs ): yield ( TemplatedFile( source_str=in_str, templated_str=templated_str, fname=fname, sliced_file=sliced_file, raw_sliced=raw_sliced, ), violations, ) class DummyUndefined(jinja2.Undefined): """Acts as a dummy value to try and avoid template failures. Inherits from jinja2.Undefined so Jinja's default() filter will treat it as a missing value, even though it has a non-empty value in normal contexts. """ # Tell Jinja this object is safe to call and does not alter data. # https://jinja.palletsprojects.com/en/2.9.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable unsafe_callable = False # https://jinja.palletsprojects.com/en/3.0.x/sandbox/#jinja2.sandbox.SandboxedEnvironment.is_safe_callable alters_data = False def __init__(self, name) -> None: super().__init__() self.name = name def __str__(self) -> str: return self.name.replace(".", "_") @classmethod def create(cls, name) -> "DummyUndefined": """Factory method. When ignoring=templating is configured, use 'name' as the value for undefined variables. We deliberately avoid recording and reporting undefined variables as errors. Using 'name' as the value won't always work, but using 'name', combined with implementing the magic methods (such as __eq__, see above), works well in most cases. """ templater_logger.debug( "Providing dummy value for undefined Jinja variable: %s", name ) result = DummyUndefined(name) return result def __getattr__(self, item): """Intercept any calls to undefined attributes. Args: item (str): The name of the attribute. Returns: object: A dynamically created instance of this class. """ return self.create(f"{self.name}.{item}") # Implement the most common magic methods. This helps avoid # templating errors for undefined variables. # https://www.tutorialsteacher.com/python/magic-methods-in-python def _self_impl(self, *args, **kwargs) -> "DummyUndefined": """Return an instance of the class itself. Args: *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: object: An instance of the class itself. """ return self def _bool_impl(self, *args, **kwargs) -> bool: """Return a boolean value. Args: *args: Variable length argument list. **kwargs: Arbitrary keyword arguments. Returns: bool: A boolean value. """ return True __add__ = _self_impl __sub__ = _self_impl __mul__ = _self_impl __floordiv__ = _self_impl __truediv__ = _self_impl __mod__ = _self_impl __pow__ = _self_impl __pos__ = _self_impl __neg__ = _self_impl __lshift__ = _self_impl __rshift__ = _self_impl __getitem__ = _self_impl __invert__ = _self_impl __call__ = _self_impl __and__ = _bool_impl __or__ = _bool_impl __xor__ = _bool_impl __bool__ = _bool_impl __lt__ = _bool_impl __le__ = _bool_impl __eq__ = _bool_impl __ne__ = _bool_impl __ge__ = _bool_impl __gt__ = _bool_impl def __hash__(self) -> int: # pragma: no cov """Return a constant hash value. Returns: int: A constant hash value. """ # This is called by the "in" operator, among other things. return 0 def __iter__(self): """Return an iterator that contains only the instance of the class itself. Returns: iterator: An iterator. """ return [self].__iter__() class DBTTestExtension(Extension): """Jinja extension to handle the dbt test tag.""" tags = {"test"} def parse(self, parser) -> jinja2.nodes.Macro: """Parses out the contents of the test tag.""" node = jinja2.nodes.Macro(lineno=next(parser.stream).lineno) test_name = parser.parse_assign_target(name_only=True).name parser.parse_signature(node) node.name = f"test_{test_name}" node.body = parser.parse_statements(("name:endtest",), drop_needle=True) return node sqlfluff-2.3.5/src/sqlfluff/core/templaters/placeholder.py000066400000000000000000000210141451700765000237230ustar00rootroot00000000000000"""Defines the placeholder template.""" import logging from typing import Dict, Optional, Tuple import regex from sqlfluff.core.helpers.slice import offset_slice from sqlfluff.core.templaters.base import ( RawFileSlice, RawTemplater, TemplatedFile, TemplatedFileSlice, large_file_check, ) # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") KNOWN_STYLES = { # e.g. WHERE bla = :name "colon": regex.compile(r"(?\w+)(?!:)", regex.UNICODE), # e.g. WHERE bla = table:name - use with caution as more prone to false positives "colon_nospaces": regex.compile(r"(?\w+)", regex.UNICODE), # e.g. WHERE bla = :2 "numeric_colon": regex.compile( r"(?\d+)", regex.UNICODE ), # e.g. WHERE bla = %(name)s "pyformat": regex.compile( r"(?[\w_]+)\)s", regex.UNICODE ), # e.g. WHERE bla = $name or WHERE bla = ${name} "dollar": regex.compile( r"(?[\w_]+)}?", regex.UNICODE ), # e.g. USE ${flyway:database}.schema_name; "flyway_var": regex.compile(r"\${(?P\w+[:\w_]+)}", regex.UNICODE), # e.g. WHERE bla = ? "question_mark": regex.compile(r"(?[\d]+)}?", regex.UNICODE ), # e.g. WHERE bla = %s "percent": regex.compile(r"(?[\w]+)}?", regex.UNICODE), } class PlaceholderTemplater(RawTemplater): """A templater for generic placeholders. Different libraries and tools use different styles of placeholders in order to escape them when running queries. In order to perform parsing of those templated queries, it's necessary to replace these placeholders with user-provided values, which is the job of this templater. See https://www.python.org/dev/peps/pep-0249/#paramstyle for the specifications for Python, they cover most cases. """ name = "placeholder" def __init__(self, override_context=None, **kwargs): self.default_context = dict(test_value="__test__") self.override_context = override_context or {} # copy of the Python templater def get_context(self, config) -> Dict: """Get the templating context from the config.""" # TODO: The config loading should be done outside the templater code. Here # is a silly place. if config: # This is now a nested section loaded_context = ( config.get_section((self.templater_selector, self.name)) or {} ) else: loaded_context = {} live_context = {} live_context.update(self.default_context) live_context.update(loaded_context) live_context.update(self.override_context) if "param_regex" in live_context and "param_style" in live_context: raise ValueError( "Either param_style or param_regex must be provided, not both" ) if "param_regex" in live_context: live_context["__bind_param_regex"] = regex.compile( live_context["param_regex"] ) elif "param_style" in live_context: param_style = live_context["param_style"] if param_style not in KNOWN_STYLES: raise ValueError( 'Unknown param_style "{}", available are: {}'.format( param_style, list(KNOWN_STYLES.keys()) ) ) live_context["__bind_param_regex"] = KNOWN_STYLES[param_style] else: raise ValueError( "No param_regex nor param_style was provided to the placeholder " "templater!" ) return live_context @large_file_check def process( self, *, in_str: str, fname: str, config=None, formatter=None ) -> Tuple[Optional[TemplatedFile], list]: """Process a string and return a TemplatedFile. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (:obj:`str`): The input string. fname (:obj:`str`, optional): The filename of this string. This is mostly for loading config files at runtime. config (:obj:`FluffConfig`): A specific config to use for this templating operation. Only necessary for some templaters. formatter (:obj:`CallbackFormatter`): Optional object for output. """ context = self.get_context(config) template_slices = [] raw_slices = [] last_pos_raw, last_pos_templated = 0, 0 out_str = "" regex = context["__bind_param_regex"] # when the param has no name, use a 1-based index param_counter = 1 for found_param in regex.finditer(in_str): span = found_param.span() if "param_name" not in found_param.groupdict(): param_name = str(param_counter) param_counter += 1 else: param_name = found_param["param_name"] last_literal_length = span[0] - last_pos_raw if param_name in context: replacement = str(context[param_name]) else: replacement = param_name # add the literal to the slices template_slices.append( TemplatedFileSlice( slice_type="literal", source_slice=slice(last_pos_raw, span[0], None), templated_slice=offset_slice( last_pos_templated, last_literal_length, ), ) ) raw_slices.append( RawFileSlice( raw=in_str[last_pos_raw : span[0]], slice_type="literal", source_idx=last_pos_raw, ) ) out_str += in_str[last_pos_raw : span[0]] # add the current replaced element start_template_pos = last_pos_templated + last_literal_length template_slices.append( TemplatedFileSlice( slice_type="templated", source_slice=slice(span[0], span[1]), templated_slice=offset_slice(start_template_pos, len(replacement)), ) ) raw_slices.append( RawFileSlice( raw=in_str[span[0] : span[1]], slice_type="templated", source_idx=span[0], ) ) out_str += replacement # update the indexes last_pos_raw = span[1] last_pos_templated = start_template_pos + len(replacement) # add the last literal, if any if len(in_str) > last_pos_raw: template_slices.append( TemplatedFileSlice( slice_type="literal", source_slice=slice(last_pos_raw, len(in_str)), templated_slice=offset_slice( last_pos_templated, (len(in_str) - last_pos_raw), ), ) ) raw_slices.append( RawFileSlice( raw=in_str[last_pos_raw:], slice_type="literal", source_idx=last_pos_raw, ) ) out_str += in_str[last_pos_raw:] return ( TemplatedFile( # original string source_str=in_str, # string after all replacements templated_str=out_str, # filename fname=fname, # list of TemplatedFileSlice sliced_file=template_slices, # list of RawFileSlice, same size raw_sliced=raw_slices, ), [], # violations, always empty ) sqlfluff-2.3.5/src/sqlfluff/core/templaters/python.py000066400000000000000000001324701451700765000227730ustar00rootroot00000000000000"""Defines the templaters.""" import ast from string import Formatter from typing import ( Any, Callable, Dict, Iterable, Iterator, List, NamedTuple, Optional, Tuple, ) from sqlfluff.core.errors import SQLTemplaterError from sqlfluff.core.helpers.slice import offset_slice, zero_slice from sqlfluff.core.helpers.string import findall from sqlfluff.core.templaters.base import ( RawFileSlice, RawTemplater, TemplatedFile, TemplatedFileSlice, large_file_check, templater_logger, ) class IntermediateFileSlice(NamedTuple): """An intermediate representation of a partially sliced File.""" intermediate_type: str source_slice: slice templated_slice: slice slice_buffer: List[RawFileSlice] def _trim_end( self, templated_str: str, target_end: str = "head" ) -> Tuple["IntermediateFileSlice", List[TemplatedFileSlice]]: """Trim the ends of a intermediate segment.""" target_idx = 0 if target_end == "head" else -1 terminator_types = ("block_start") if target_end == "head" else ("block_end") main_source_slice = self.source_slice main_templated_slice = self.templated_slice slice_buffer = self.slice_buffer end_buffer = [] # Yield any leading literals, comments or blocks. while len(slice_buffer) > 0 and slice_buffer[target_idx].slice_type in ( "literal", "block_start", "block_end", "comment", ): focus = slice_buffer[target_idx] templater_logger.debug(" %s Focus: %s", target_end, focus) # Is it a zero length item? if focus.slice_type in ("block_start", "block_end", "comment"): # Only add the length in the source space. templated_len = 0 else: # Assume it's a literal, check the literal actually matches. templated_len = len(focus.raw) if target_end == "head": check_slice = offset_slice( main_templated_slice.start, templated_len, ) else: check_slice = slice( main_templated_slice.stop - templated_len, main_templated_slice.stop, ) if templated_str[check_slice] != focus.raw: # It doesn't match, we can't use it. break templater_logger.debug(" Nope") break # If it does match, set up the new slices if target_end == "head": division = ( main_source_slice.start + len(focus.raw), main_templated_slice.start + templated_len, ) new_slice = TemplatedFileSlice( focus.slice_type, slice(main_source_slice.start, division[0]), slice(main_templated_slice.start, division[1]), ) end_buffer.append(new_slice) main_source_slice = slice(division[0], main_source_slice.stop) main_templated_slice = slice(division[1], main_templated_slice.stop) else: division = ( main_source_slice.stop - len(focus.raw), main_templated_slice.stop - templated_len, ) new_slice = TemplatedFileSlice( focus.slice_type, slice(division[0], main_source_slice.stop), slice(division[1], main_templated_slice.stop), ) end_buffer.insert(0, new_slice) main_source_slice = slice(main_source_slice.start, division[0]) main_templated_slice = slice(main_templated_slice.start, division[1]) slice_buffer.pop(target_idx) if focus.slice_type in terminator_types: break # Return a new Intermediate slice and the buffer. # NB: Don't check size of slice buffer here. We can do that later. new_intermediate = self.__class__( "compound", main_source_slice, main_templated_slice, slice_buffer ) return new_intermediate, end_buffer def trim_ends( self, templated_str: str ) -> Tuple[ List[TemplatedFileSlice], "IntermediateFileSlice", List[TemplatedFileSlice] ]: """Trim both ends of an intermediate slice.""" # Trim start: new_slice, head_buffer = self._trim_end( templated_str=templated_str, target_end="head" ) # Trim end: new_slice, tail_buffer = new_slice._trim_end( templated_str=templated_str, target_end="tail" ) # Return return head_buffer, new_slice, tail_buffer def try_simple(self) -> TemplatedFileSlice: """Try to turn this intermediate slice into a simple slice.""" # Yield anything simple if len(self.slice_buffer) == 1: return TemplatedFileSlice( self.slice_buffer[0].slice_type, self.source_slice, self.templated_slice, ) else: raise ValueError("IntermediateFileSlice is not simple!") def coalesce(self) -> TemplatedFileSlice: """Coalesce this whole slice into a single one. Brutally.""" return TemplatedFileSlice( PythonTemplater._coalesce_types(self.slice_buffer), self.source_slice, self.templated_slice, ) class PythonTemplater(RawTemplater): """A templater using python format strings. See: https://docs.python.org/3/library/string.html#format-string-syntax For the python templater we don't allow functions or macros because there isn't a good way of doing it securely. Use the jinja templater for this. The python templater also defines a lot of the logic for how to allow fixing and translation in a templated file. """ name = "python" def __init__(self, override_context=None, **kwargs) -> None: self.default_context = dict(test_value="__test__") self.override_context = override_context or {} @staticmethod def infer_type(s) -> Any: """Infer a python type from a string and convert. Given a string value, convert it to a more specific built-in Python type (e.g. int, float, list, dictionary) if possible. """ try: return ast.literal_eval(s) except (SyntaxError, ValueError): return s def get_context(self, fname=None, config=None, **kw) -> Dict: """Get the templating context from the config. This function retrieves the templating context from the config by loading the config and updating the live_context dictionary with the loaded_context and other predefined context dictionaries. It then goes through the loaded_context dictionary and infers the types of the values before returning the live_context dictionary. Args: fname (str, optional): The file name. config (dict, optional): The config dictionary. **kw: Additional keyword arguments. Returns: dict: The templating context. """ # TODO: The config loading should be done outside the templater code. Here # is a silly place. if config: # This is now a nested section loaded_context = ( config.get_section((self.templater_selector, self.name, "context")) or {} ) else: loaded_context = {} live_context = {} live_context.update(self.default_context) live_context.update(loaded_context) live_context.update(self.override_context) # Infer types for k in loaded_context: live_context[k] = self.infer_type(live_context[k]) return live_context @large_file_check def process( self, *, in_str: str, fname: str, config=None, formatter=None ) -> Tuple[Optional[TemplatedFile], List]: """Process a string and return a TemplatedFile. Note that the arguments are enforced as keywords because Templaters can have differences in their `process` method signature. A Templater that only supports reading from a file would need the following signature: process(*, fname, in_str=None, config=None) (arguments are swapped) Args: in_str (:obj:`str`): The input string. fname (:obj:`str`, optional): The filename of this string. This is mostly for loading config files at runtime. config (:obj:`FluffConfig`): A specific config to use for this templating operation. Only necessary for some templaters. formatter (:obj:`CallbackFormatter`): Optional object for output. """ live_context = self.get_context(fname=fname, config=config) def render_func(raw_str: str) -> str: """Render the string using the captured live_context.""" try: rendered_str = raw_str.format(**live_context) except KeyError as err: raise SQLTemplaterError( "Failure in Python templating: {}. Have you configured your " "variables? https://docs.sqlfluff.com/en/stable/" "configuration.html#templating-configuration".format(err) ) return rendered_str raw_sliced, sliced_file, new_str = self.slice_file( in_str, render_func=render_func, config=config, ) return ( TemplatedFile( source_str=in_str, templated_str=new_str, fname=fname, sliced_file=sliced_file, raw_sliced=raw_sliced, ), [], ) def slice_file( self, raw_str: str, render_func: Callable[[str], str], config=None, **kwargs ) -> Tuple[List[RawFileSlice], List[TemplatedFileSlice], str]: """Slice the file to determine regions where we can fix.""" templater_logger.info("Slicing File Template") templater_logger.debug(" Raw String: %r", raw_str) # Render the templated string. # NOTE: This seems excessive in this simple example, but for other templating # engines we need more control over the rendering so may need to call this # method more than once. templated_str = render_func(raw_str) templater_logger.debug(" Templated String: %r", templated_str) # Slice the raw file raw_sliced = list(self._slice_template(raw_str)) templater_logger.debug(" Raw Sliced:") for idx, raw_slice in enumerate(raw_sliced): templater_logger.debug(" %s: %r", idx, raw_slice) # Find the literals literals = [ raw_slice.raw for raw_slice in raw_sliced if raw_slice.slice_type == "literal" ] templater_logger.debug(" Literals: %s", literals) for loop_idx in range(2): templater_logger.debug(" # Slice Loop %s", loop_idx) # Calculate occurrences raw_occurrences = self._substring_occurrences(raw_str, literals) templated_occurrences = self._substring_occurrences(templated_str, literals) templater_logger.debug( " Occurrences: Raw: %s, Templated: %s", raw_occurrences, templated_occurrences, ) # Split on invariants split_sliced = list( self._split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_str, ) ) templater_logger.debug(" Split Sliced:") for idx, split_slice in enumerate(split_sliced): templater_logger.debug(" %s: %r", idx, split_slice) # Deal with uniques and coalesce the rest sliced_file = list( self._split_uniques_coalesce_rest( split_sliced, raw_occurrences, templated_occurrences, templated_str ) ) templater_logger.debug(" Fully Sliced:") for idx, templ_slice in enumerate(sliced_file): templater_logger.debug(" %s: %r", idx, templ_slice) unwrap_wrapped = ( True if config is None else config.get( "unwrap_wrapped_queries", section="templater", default=True ) ) sliced_file, new_templated_str = self._check_for_wrapped( sliced_file, templated_str, unwrap_wrapped=unwrap_wrapped ) if new_templated_str == templated_str: # If we didn't change it then we're done. break else: # If it's not equal, loop around templated_str = new_templated_str return raw_sliced, sliced_file, new_templated_str @classmethod def _check_for_wrapped( cls, slices: List[TemplatedFileSlice], templated_str: str, unwrap_wrapped: bool = True, ) -> Tuple[List[TemplatedFileSlice], str]: """Identify a wrapped query (e.g. dbt test) and handle it. If unwrap_wrapped is true, we trim the wrapping from the templated file. If unwrap_wrapped is false, we add a slice at start and end. """ if not slices: # If there are no slices, return return slices, templated_str first_slice = slices[0] last_slice = slices[-1] if unwrap_wrapped: # If we're unwrapping, there is no need to edit the slices, but we do need # to trim the templated string. We should expect that the template will need # to be re-sliced but we should assume that the function calling this one # will deal with that eventuality. return ( slices, templated_str[ first_slice.templated_slice.start : last_slice.templated_slice.stop ], ) if ( first_slice.source_slice.start == 0 and first_slice.templated_slice.start != 0 ): # This means that there is text at the start of the templated file which # doesn't exist in the raw file. Handle this by adding a templated slice # (though it's not really templated) between 0 and 0 in the raw, and 0 and # the current first slice start index in the templated. slices.insert( 0, TemplatedFileSlice( "templated", slice(0, 0), slice(0, first_slice.templated_slice.start), ), ) if last_slice.templated_slice.stop != len(templated_str): # This means that there is text at the end of the templated file which # doesn't exist in the raw file. Handle this by adding a templated slice # beginning and ending at the end of the raw, and the current last slice # stop and file end in the templated. slices.append( TemplatedFileSlice( "templated", zero_slice(last_slice.source_slice.stop), slice(last_slice.templated_slice.stop, len(templated_str)), ) ) return slices, templated_str @classmethod def _substring_occurrences( cls, in_str: str, substrings: Iterable[str] ) -> Dict[str, List[int]]: """Find every occurrence of the given substrings.""" occurrences = {} for substring in substrings: occurrences[substring] = list(findall(substring, in_str)) return occurrences @staticmethod def _sorted_occurrence_tuples( occurrences: Dict[str, List[int]] ) -> List[Tuple[str, int]]: """Sort a dict of occurrences into a sorted list of tuples.""" return sorted( ((raw, idx) for raw in occurrences.keys() for idx in occurrences[raw]), # Sort first by position, then by lexical (for stability) key=lambda x: (x[1], x[0]), ) @classmethod def _slice_template(cls, in_str: str) -> Iterator[RawFileSlice]: """Slice a templated python string into token tuples. This uses Formatter() as per: https://docs.python.org/3/library/string.html#string.Formatter """ fmt = Formatter() in_idx = 0 for literal_text, field_name, format_spec, conversion in fmt.parse(in_str): if literal_text: escape_chars = cls._sorted_occurrence_tuples( cls._substring_occurrences(literal_text, ["}", "{"]) ) idx = 0 while escape_chars: first_char = escape_chars.pop() # Is there a literal first? if first_char[1] > idx: yield RawFileSlice( literal_text[idx : first_char[1]], "literal", in_idx ) in_idx += first_char[1] - idx # Add the escaped idx = first_char[1] + len(first_char[0]) # We double them here to make the raw yield RawFileSlice( literal_text[first_char[1] : idx] * 2, "escaped", in_idx ) # Will always be 2 in this case. # This is because ALL escape sequences in the python formatter # are two characters which reduce to one. in_idx += 2 # Deal with last one (if present) if literal_text[idx:]: yield RawFileSlice(literal_text[idx:], "literal", in_idx) in_idx += len(literal_text) - idx # Deal with fields if field_name: constructed_token = "{{{field_name}{conv}{spec}}}".format( field_name=field_name, conv=f"!{conversion}" if conversion else "", spec=f":{format_spec}" if format_spec else "", ) yield RawFileSlice(constructed_token, "templated", in_idx) in_idx += len(constructed_token) @classmethod def _split_invariants( cls, raw_sliced: List[RawFileSlice], literals: List[str], raw_occurrences: Dict[str, List[int]], templated_occurrences: Dict[str, List[int]], templated_str: str, ) -> Iterator[IntermediateFileSlice]: """Split a sliced file on its invariant literals. We prioritise the _longest_ invariants first as they are more likely to the the anchors. """ # Calculate invariants invariants = [ literal for literal in literals if len(raw_occurrences[literal]) == 1 and len(templated_occurrences[literal]) == 1 ] # Work through the invariants and make sure they appear # in order. for linv in sorted(invariants, key=len, reverse=True): # Any invariants which have templated positions, relative # to source positions, which aren't in order, should be # ignored. # Is this one still relevant? if linv not in invariants: continue # pragma: no cover source_pos, templ_pos = raw_occurrences[linv], templated_occurrences[linv] # Copy the list before iterating because we're going to edit it. for tinv in invariants.copy(): if tinv != linv: src_dir = source_pos > raw_occurrences[tinv] tmp_dir = templ_pos > templated_occurrences[tinv] # If it's not in the same direction in the source and template # remove it. if src_dir != tmp_dir: # pragma: no cover templater_logger.debug( " Invariant found out of order: %r", tinv ) invariants.remove(tinv) # Set up some buffers buffer: List[RawFileSlice] = [] idx: Optional[int] = None templ_idx = 0 # Loop through for raw_file_slice in raw_sliced: if raw_file_slice.raw in invariants: if buffer: yield IntermediateFileSlice( "compound", slice(idx, raw_file_slice.source_idx), slice(templ_idx, templated_occurrences[raw_file_slice.raw][0]), buffer, ) buffer = [] idx = None yield IntermediateFileSlice( "invariant", offset_slice( raw_file_slice.source_idx, len(raw_file_slice.raw), ), offset_slice( templated_occurrences[raw_file_slice.raw][0], len(raw_file_slice.raw), ), [ RawFileSlice( raw_file_slice.raw, raw_file_slice.slice_type, templated_occurrences[raw_file_slice.raw][0], ) ], ) templ_idx = templated_occurrences[raw_file_slice.raw][0] + len( raw_file_slice.raw ) else: buffer.append( RawFileSlice( raw_file_slice.raw, raw_file_slice.slice_type, raw_file_slice.source_idx, ) ) if idx is None: idx = raw_file_slice.source_idx # If we have a final buffer, yield it if buffer: yield IntermediateFileSlice( "compound", slice((idx or 0), (idx or 0) + sum(len(slc.raw) for slc in buffer)), slice(templ_idx, len(templated_str)), buffer, ) @staticmethod def _filter_occurrences( file_slice: slice, occurrences: Dict[str, List[int]] ) -> Dict[str, List[int]]: """Filter a dict of occurrences to just those within a slice.""" filtered = { key: [ pos for pos in occurrences[key] if pos >= file_slice.start and pos < file_slice.stop ] for key in occurrences.keys() } return {key: filtered[key] for key in filtered.keys() if filtered[key]} @staticmethod def _coalesce_types(elems: List[RawFileSlice]) -> str: """Coalesce to the priority type.""" # Make a set of types types = {elem.slice_type for elem in elems} # Replace block types with templated for typ in list(types): if typ.startswith("block_"): # pragma: no cover types.remove(typ) types.add("templated") # Take the easy route if they're all the same type if len(types) == 1: return types.pop() # Then deal with priority priority = ["templated", "escaped", "literal"] for p in priority: if p in types: return p raise RuntimeError( f"Exhausted priorities in _coalesce_types! {types!r}" ) # pragma: no cover @classmethod def _split_uniques_coalesce_rest( cls, split_file: List[IntermediateFileSlice], raw_occurrences: Dict[str, List[int]], templ_occurrences: Dict[str, List[int]], templated_str: str, ) -> Iterator[TemplatedFileSlice]: """Within each of the compound sections split on unique literals. For everything else we coalesce to the dominant type. Returns: Iterable of the type of segment, the slice in the raw file and the slice in the templated file. """ # A buffer to capture tail segments tail_buffer: List[TemplatedFileSlice] = [] templater_logger.debug(" _split_uniques_coalesce_rest: %s", split_file) for int_file_slice in split_file: # Yield anything from the tail buffer if tail_buffer: # pragma: no cover templater_logger.debug( " Yielding Tail Buffer [start]: %s", tail_buffer ) yield from tail_buffer tail_buffer = [] # Check whether we're handling a zero length slice. if ( int_file_slice.templated_slice.stop - int_file_slice.templated_slice.start == 0 ): # pragma: no cover point_combo = int_file_slice.coalesce() templater_logger.debug( " Yielding Point Combination: %s", point_combo ) yield point_combo continue # Yield anything simple try: simple_elem = int_file_slice.try_simple() templater_logger.debug(" Yielding Simple: %s", simple_elem) yield simple_elem continue except ValueError: pass # Trim ends and overwrite the current working copy. head_buffer, int_file_slice, tail_buffer = int_file_slice.trim_ends( templated_str=templated_str ) if head_buffer: yield from head_buffer # pragma: no cover # Have we consumed the whole thing? if not int_file_slice.slice_buffer: continue # pragma: no cover # Try to yield simply again (post trim) try: # pragma: no cover simple_elem = int_file_slice.try_simple() templater_logger.debug(" Yielding Simple: %s", simple_elem) yield simple_elem continue except ValueError: pass templater_logger.debug(" Intermediate Slice: %s", int_file_slice) # Generate the coalesced version in case we need it coalesced = int_file_slice.coalesce() # Look for anchors raw_occs = cls._filter_occurrences( int_file_slice.source_slice, raw_occurrences ) templ_occs = cls._filter_occurrences( int_file_slice.templated_slice, templ_occurrences ) # Do we have any uniques to split on? # NB: We use `get` on the templated occurrences, because it's possible # that because of an if statement, something is in the source, but # not in the templated at all. In that case, we shouldn't use it. one_way_uniques = [ key for key in raw_occs.keys() if len(raw_occs[key]) == 1 and len(templ_occs.get(key, [])) >= 1 ] two_way_uniques = [ key for key in one_way_uniques if len(templ_occs[key]) == 1 ] # if we don't have anything to anchor on, then just return (coalescing # types) if not raw_occs or not templ_occs or not one_way_uniques: templater_logger.debug( " No Anchors or Uniques. Yielding Whole: %s", coalesced ) yield coalesced continue # Deal with the inner segment itself. templater_logger.debug( " Intermediate Slice [post trim]: %s: %r", int_file_slice, templated_str[int_file_slice.templated_slice], ) templater_logger.debug(" One Way Uniques: %s", one_way_uniques) templater_logger.debug(" Two Way Uniques: %s", two_way_uniques) # Hang onto the starting position, which we'll advance as we go. starts = ( int_file_slice.source_slice.start, int_file_slice.templated_slice.start, ) # Deal with two way uniques first, because they are easier. # If we do find any we use recursion, because we'll want to do # all of the above checks again. if two_way_uniques: # Yield the uniques and coalesce anything between. bookmark_idx = 0 for idx, raw_slice in enumerate(int_file_slice.slice_buffer): pos = 0 unq: Optional[str] = None # Does this element contain one of our uniques? If so, where? for unique in two_way_uniques: if unique in raw_slice.raw: pos = raw_slice.raw.index(unique) unq = unique if unq: # Yes it does. Handle it. # Get the position of the unique section. unique_position = ( raw_occs[unq][0], templ_occs[unq][0], ) templater_logger.debug( " Handling Unique: %r, %s, %s, %r", unq, pos, unique_position, raw_slice, ) # Handle full slices up to this one if idx > bookmark_idx: # Recurse to deal with any loops separately yield from cls._split_uniques_coalesce_rest( [ IntermediateFileSlice( "compound", # slice up to this unique slice(starts[0], unique_position[0] - pos), slice(starts[1], unique_position[1] - pos), int_file_slice.slice_buffer[bookmark_idx:idx], ) ], raw_occs, templ_occs, templated_str, ) # Handle any potential partial slice if we're part way through # this one. if pos > 0: yield TemplatedFileSlice( raw_slice.slice_type, slice(unique_position[0] - pos, unique_position[0]), slice(unique_position[1] - pos, unique_position[1]), ) # Handle the unique itself and update the bookmark starts = ( unique_position[0] + len(unq), unique_position[1] + len(unq), ) yield TemplatedFileSlice( raw_slice.slice_type, slice(unique_position[0], starts[0]), slice(unique_position[1], starts[1]), ) # Move the bookmark after this position bookmark_idx = idx + 1 # Handle any remnant after the unique. if raw_slice.raw[pos + len(unq) :]: remnant_length = len(raw_slice.raw) - (len(unq) + pos) _starts = starts starts = ( starts[0] + remnant_length, starts[1] + remnant_length, ) yield TemplatedFileSlice( raw_slice.slice_type, slice(_starts[0], starts[0]), slice(_starts[1], starts[1]), ) if bookmark_idx == 0: # pragma: no cover # This is a SAFETY VALVE. In Theory we should never be here # and if we are it implies an error elsewhere. This clause # should stop any potential infinite recursion in its tracks # by simply classifying the whole of the current block as # templated and just stopping here. # Bugs triggering this eventuality have been observed in 0.4.0. templater_logger.info( " Safety Value Info: %s, %r", two_way_uniques, templated_str[int_file_slice.templated_slice], ) templater_logger.warning( " Python templater safety value unexpectedly triggered. " "Please report your raw and compiled query on github for " "debugging." ) # NOTE: If a bug is reported here, this will incorrectly # classify more of the query as "templated" than it should. yield coalesced continue # At the end of the loop deal with any remaining slices. # The above "Safety Valve"TM should keep us safe from infinite # recursion. if len(int_file_slice.slice_buffer) > bookmark_idx: # Recurse to deal with any loops separately yield from cls._split_uniques_coalesce_rest( [ IntermediateFileSlice( "compound", # Slicing is easy here, we have no choice slice(starts[0], int_file_slice.source_slice.stop), slice(starts[1], int_file_slice.templated_slice.stop), # Calculate the subsection to deal with. int_file_slice.slice_buffer[ bookmark_idx : len(int_file_slice.slice_buffer) ], ) ], raw_occs, templ_occs, templated_str, ) # We continue here because the buffer should be exhausted, # and if there's more to do we'll do it in the recursion. continue # If we get here, then there ARE uniques, but they are only ONE WAY. # This means loops. Loops are tricky. # We're very unlikely to get here (impossible?) with just python # formatting, but this class is also the base for the jinja templater # (and others?) so it may be used there. # One way uniques give us landmarks to try and estimate what to do with # them. owu_templ_tuples = cls._sorted_occurrence_tuples( # pragma: no cover {key: templ_occs[key] for key in one_way_uniques} ) templater_logger.debug( # pragma: no cover " Handling One Way Uniques: %s", owu_templ_tuples ) # Hang onto out *ending* position too from here. stops = ( # pragma: no cover int_file_slice.source_slice.stop, int_file_slice.templated_slice.stop, ) # OWU in this context refers to "One Way Unique" this_owu_idx: Optional[int] = None # pragma: no cover last_owu_idx: Optional[int] = None # pragma: no cover # Iterate through occurrence tuples of the one-way uniques. for raw, template_idx in owu_templ_tuples: # pragma: no cover raw_idx = raw_occs[raw][0] raw_len = len(raw) # Find the index of this owu in the slice_buffer, store the previous last_owu_idx = this_owu_idx try: this_owu_idx = next( idx for idx, slc in enumerate(int_file_slice.slice_buffer) if slc.raw == raw ) except StopIteration: # pragma: no cover # This can happen if the unique was detected, but was introduced # by a templater step. This is a false positive. Skip and move on. templater_logger.info( "One Way Unique %r not found in slice buffer. Skipping...", raw ) continue templater_logger.debug( " Handling OWU: %r @%s (raw @%s) [this_owu_idx: %s, " "last_owu_dx: %s]", raw, template_idx, raw_idx, this_owu_idx, last_owu_idx, ) if template_idx > starts[1]: # Yield the bit before this literal. We yield it # all as a tuple, because if we could do any better # we would have done it by now. # Can we identify a meaningful portion of the patch # to recurse a split? sub_section: Optional[List[RawFileSlice]] = None # If it's the start, the slicing is easy if ( starts[1] == int_file_slice.templated_slice.stop ): # pragma: no cover TODO? sub_section = int_file_slice.slice_buffer[:this_owu_idx] # If we are AFTER the previous in the template, then it's # also easy. [assuming it's not the same owu] elif ( raw_idx > starts[0] and last_owu_idx != this_owu_idx ): # pragma: no cover if last_owu_idx: sub_section = int_file_slice.slice_buffer[ last_owu_idx + 1 : this_owu_idx ] else: sub_section = int_file_slice.slice_buffer[:this_owu_idx] # If we succeeded in one of the above, we can also recurse # and be more intelligent with the other sections. if sub_section: templater_logger.debug( " Attempting Subsplit [pre]: %s, %r", sub_section, templated_str[slice(starts[1], template_idx)], ) yield from cls._split_uniques_coalesce_rest( [ IntermediateFileSlice( "compound", # Slicing is easy here, we have no choice slice(starts[0], raw_idx), slice(starts[1], template_idx), sub_section, ) ], raw_occs, templ_occs, templated_str, ) # Otherwise, it's the tricky case. else: # In this case we've found a literal, coming AFTER another # in the templated version, but BEFORE (or the same) in the # raw version. This only happens during loops, but it means # that identifying exactly what the intervening bit refers # to is a bit arbitrary. In this case we're going to OVER # estimate and refer to the whole loop segment. # TODO: Maybe this should make two chunks instead, one # working backward, and one working forward. But that's # a job for another day. # First find where we are starting this remainder # in the template (as an index in the buffer). # Any segments *after* cur_idx are involved. if last_owu_idx is None or last_owu_idx + 1 >= len( int_file_slice.slice_buffer ): cur_idx = 0 else: cur_idx = last_owu_idx + 1 # We need to know how many block_ends are after this. block_ends = sum( slc.slice_type == "block_end" for slc in int_file_slice.slice_buffer[cur_idx:] ) # We can allow up to this number of preceding block starts block_start_indices = [ idx for idx, slc in enumerate( int_file_slice.slice_buffer[:cur_idx] ) if slc.slice_type == "block_start" ] # Trim anything which we're not allowed to use. if len(block_start_indices) > block_ends: # pragma: no cover offset = block_start_indices[-1 - block_ends] + 1 elem_sub_buffer = int_file_slice.slice_buffer[offset:] cur_idx -= offset else: elem_sub_buffer = int_file_slice.slice_buffer # We also need to know whether any of the *starting* # segments are involved. # Anything up to start_idx (exclusive) is included. include_start = raw_idx > elem_sub_buffer[0].source_idx # The ending point of this slice, is already decided. end_point = elem_sub_buffer[-1].end_source_idx() # If start_idx is None, we're in luck. We don't need to include # the beginning. if include_start: start_point = elem_sub_buffer[0].source_idx # Otherwise we know it's looped round, we need to include the # whole slice. else: # pragma: no cover start_point = elem_sub_buffer[cur_idx].source_idx tricky = TemplatedFileSlice( "templated", slice(start_point, end_point), slice(starts[1], template_idx), ) templater_logger.debug( " Yielding Tricky Case : %s", tricky, ) yield tricky # Yield the literal owu_literal_slice = TemplatedFileSlice( "literal", offset_slice(raw_idx, raw_len), offset_slice(template_idx, raw_len), ) templater_logger.debug( " Yielding Unique: %r, %s", raw, owu_literal_slice, ) yield owu_literal_slice # Update our bookmark starts = ( raw_idx + raw_len, template_idx + raw_len, ) if starts[1] < stops[1] and last_owu_idx is not None: # pragma: no cover # Yield the end bit templater_logger.debug(" Attempting Subsplit [post].") yield from cls._split_uniques_coalesce_rest( [ IntermediateFileSlice( "compound", # Slicing is easy here, we have no choice slice(raw_idx + raw_len, stops[0]), slice(starts[1], stops[1]), int_file_slice.slice_buffer[last_owu_idx + 1 :], ) ], raw_occs, templ_occs, templated_str, ) # Yield anything from the tail buffer if tail_buffer: # pragma: no cover templater_logger.debug( " Yielding Tail Buffer [end]: %s", tail_buffer ) yield from tail_buffer sqlfluff-2.3.5/src/sqlfluff/core/templaters/slicers/000077500000000000000000000000001451700765000225355ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/core/templaters/slicers/__init__.py000066400000000000000000000001111451700765000246370ustar00rootroot00000000000000"""Modules for slicing and mapping between the raw and templated SQL.""" sqlfluff-2.3.5/src/sqlfluff/core/templaters/slicers/tracer.py000066400000000000000000000747721451700765000244100ustar00rootroot00000000000000"""'Trace' Jinja template execution to map output back to the raw template. This is a newer slicing algorithm that handles cases heuristic.py does not. """ # Import annotations for py 3.7 to allow `regex.Match[str]` from __future__ import annotations import logging from dataclasses import dataclass, field from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union, cast import regex from jinja2 import Environment from jinja2.exceptions import TemplateSyntaxError from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFileSlice # Instantiate the templater logger templater_logger = logging.getLogger("sqlfluff.templater") class JinjaTrace(NamedTuple): """Returned by JinjaTracer.trace().""" # Template output templated_str: str # Raw (i.e. before rendering) Jinja template sliced into tokens raw_sliced: List[RawFileSlice] # Rendered Jinja template (i.e. output) mapped back to rwa_str source sliced_file: List[TemplatedFileSlice] @dataclass class RawSliceInfo: """JinjaTracer-specific info about each RawFileSlice.""" unique_alternate_id: Optional[str] alternate_code: Optional[str] next_slice_indices: List[int] = field(default_factory=list) inside_block: bool = field(default=False) # {% block %} class JinjaTracer: """Records execution path of a Jinja template.""" def __init__( self, raw_str: str, raw_sliced: List[RawFileSlice], raw_slice_info: Dict[RawFileSlice, RawSliceInfo], sliced_file: List[TemplatedFileSlice], render_func: Callable[[str], str], ): # Input self.raw_str = raw_str self.raw_sliced = raw_sliced self.raw_slice_info = raw_slice_info self.sliced_file = sliced_file self.render_func = render_func # Internal bookkeeping self.program_counter: int = 0 self.source_idx: int = 0 def trace( self, append_to_templated: str = "", ) -> JinjaTrace: """Executes raw_str. Returns template output and trace.""" trace_template_str = "".join( cast(str, self.raw_slice_info[rs].alternate_code) if self.raw_slice_info[rs].alternate_code is not None else rs.raw for rs in self.raw_sliced ) trace_template_output = self.render_func(trace_template_str) # Split output by section. Each section has two possible formats. trace_entries: List[regex.Match[str]] = list( regex.finditer(r"\0", trace_template_output) ) # If the file has no templated entries, we should just iterate # through the raw slices to add all the placeholders. if not trace_entries: for raw_idx, _ in enumerate(self.raw_sliced): self.record_trace(0, raw_idx) for match_idx, match in enumerate(trace_entries): pos1 = match.span()[0] try: pos2 = trace_entries[match_idx + 1].span()[0] except IndexError: pos2 = len(trace_template_output) p = trace_template_output[pos1 + 1 : pos2] m_id = regex.match(r"^([0-9a-f]+)(_(\d+))?", p) if not m_id: raise ValueError( # pragma: no cover "Internal error. Trace template output does not match expected " "format." ) if m_id.group(3): # E.g. "00000000000000000000000000000001_83". The number after # "_" is the length (in characters) of a corresponding literal # in raw_str. alt_id, slice_length = m_id.group(1), int(m_id.group(3)) else: # E.g. "00000000000000000000000000000002 a < 10". The characters # after the slice ID are executable code from raw_str. alt_id, slice_length = m_id.group(0), len(p[len(m_id.group(0)) + 1 :]) target_slice_idx = self.find_slice_index(alt_id) target_inside_block = self.raw_slice_info[ self.raw_sliced[target_slice_idx] ].inside_block if not target_inside_block: # Normal case: Walk through the template. self.move_to_slice(target_slice_idx, slice_length) else: # {% block %} executes code elsewhere in the template but does # not move there. It's a bit like macro invocation. self.record_trace(slice_length, target_slice_idx) # TRICKY: The 'append_to_templated' parameter is only used by the dbt # templater, passing "\n" for this parameter if we need to add one back. # (The Jinja templater does not pass this parameter, so # 'append_to_templated' gets the default value of "", empty string.) # For more detail, see the comments near the call to slice_file() in # plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py. templated_str = self.render_func(self.raw_str) + append_to_templated return JinjaTrace(templated_str, self.raw_sliced, self.sliced_file) def find_slice_index(self, slice_identifier: Union[int, str]) -> int: """Given a slice identifier, return its index. A slice identifier is a string like 00000000000000000000000000000002. """ raw_slices_search_result = [ idx for idx, rs in enumerate(self.raw_sliced) if self.raw_slice_info[rs].unique_alternate_id == slice_identifier ] if len(raw_slices_search_result) != 1: raise ValueError( # pragma: no cover f"Internal error. Unable to locate slice for {slice_identifier}." ) return raw_slices_search_result[0] def move_to_slice( self, target_slice_idx: int, target_slice_length: int, ) -> Dict[int, List[int]]: """Given a template location, walk execution to that point. This updates the internal `program_counter` to the appropriate location. Returns: :obj:`dict`: For each step in the template, a :obj:`list` of which steps are accessible. In many cases each step will only have one accessible next step (the following one), however for branches in the program there may be more than one. """ step_candidates = {} while self.program_counter < len(self.raw_sliced): self.record_trace( target_slice_length if self.program_counter == target_slice_idx else 0 ) current_raw_slice = self.raw_sliced[self.program_counter] if self.program_counter == target_slice_idx: # Reached the target slice. Go to next location and stop. self.program_counter += 1 break # Choose the next step. # We could simply go to the next slice (sequential execution). candidates = [self.program_counter + 1] # If we have other options, consider those. candidates.extend( filter( # They're a valid possibility if # they don't take us past the target. lambda idx: idx <= target_slice_idx, self.raw_slice_info[current_raw_slice].next_slice_indices, ) ) # Choose the candidate that takes us closest to the target. candidates.sort(key=lambda c: abs(target_slice_idx - c)) # Save all the candidates for each step so we can return them later. step_candidates[self.program_counter] = candidates # Step forward to the best step found. self.program_counter = candidates[0] # Return the candidates at each step. return step_candidates def record_trace( self, target_slice_length: int, slice_idx: Optional[int] = None, slice_type: Optional[str] = None, ) -> None: """Add the specified (default: current) location to the trace. Args: target_slice_length (int): The length of the target slice. slice_idx (Optional[int], optional): The index of the slice. Defaults to None. slice_type (Optional[str], optional): The type of the slice. Defaults to None. """ if slice_idx is None: slice_idx = self.program_counter if slice_type is None: slice_type = self.raw_sliced[slice_idx].slice_type self.sliced_file.append( TemplatedFileSlice( slice_type, slice( self.raw_sliced[slice_idx].source_idx, self.raw_sliced[slice_idx + 1].source_idx if slice_idx + 1 < len(self.raw_sliced) else len(self.raw_str), ), slice(self.source_idx, self.source_idx + target_slice_length), ) ) if target_slice_length: self.source_idx += target_slice_length class JinjaAnalyzer: """Analyzes a Jinja template to prepare for tracing.""" re_open_tag = regex.compile(r"^\s*({[{%])[\+\-]?\s*") re_close_tag = regex.compile(r"\s*[\+\-]?([}%]})\s*$") def __init__(self, raw_str: str, env: Environment) -> None: # Input self.raw_str: str = raw_str self.env = env # Output self.raw_sliced: List[RawFileSlice] = [] self.raw_slice_info: Dict[RawFileSlice, RawSliceInfo] = {} self.sliced_file: List[TemplatedFileSlice] = [] # Internal bookkeeping self.slice_id: int = 0 # {% set %} or {% macro %} or {% call %} self.inside_set_macro_or_call: bool = False self.inside_block = False # {% block %} self.stack: List[int] = [] self.idx_raw: int = 0 def next_slice_id(self) -> str: """Returns a new, unique slice ID.""" result = "{0:#0{1}x}".format(self.slice_id, 34)[2:] self.slice_id += 1 return result def slice_info_for_literal(self, length: int, prefix: str = "") -> RawSliceInfo: """Returns a RawSliceInfo for a literal. In the alternate template, literals are replaced with a uniquely numbered, easy-to-parse literal. JinjaTracer uses this output as a "breadcrumb trail" to deduce the execution path through the template. This is important even if the original literal (i.e. in the raw SQL file) was empty, as is the case when Jinja whitespace control is used (e.g. "{%- endif -%}"), because fewer breadcrumbs means JinjaTracer has to *guess* the path, in which case it assumes simple, straight-line execution, which can easily be wrong with loops and conditionals. """ unique_alternate_id = self.next_slice_id() alternate_code = f"\0{prefix}{unique_alternate_id}_{length}" return self.make_raw_slice_info( unique_alternate_id, alternate_code, inside_block=self.inside_block ) def update_inside_set_call_macro_or_block( self, block_type: str, trimmed_parts: List[str], m_open: Optional[regex.Match[str]], m_close: Optional[regex.Match[str]], tag_contents: List[str], ) -> Tuple[Optional[RawSliceInfo], str]: """Based on block tag, update whether in a set/call/macro/block section.""" if block_type == "block_start" and trimmed_parts[0] in ( "block", "call", "macro", "set", ): # Jinja supports two forms of {% set %}: # - {% set variable = value %} # - {% set variable %}value{% endset %} # https://jinja.palletsprojects.com/en/2.10.x/templates/#block-assignments # When the second format is used, set one of the fields # 'inside_set_or_macro' or 'inside_block' to True. This info is # used elsewhere, as other code inside these regions require # special handling. (Generally speaking, JinjaAnalyzer ignores # the contents of these blocks, treating them like opaque templated # regions.) try: # Entering a set/macro block. Build a source string consisting # of just this one Jinja command and see if it parses. If so, # it's a standalone command. OTOH, if it fails with "Unexpected # end of template", it was the opening command for a block. self.env.from_string( f"{self.env.block_start_string} {' '.join(trimmed_parts)} " f"{self.env.block_end_string}" ) # Here we should mutate the block type to just templated # so we don't treat it as a block. # https://github.com/sqlfluff/sqlfluff/issues/3750 block_type = "templated" except TemplateSyntaxError as e: if ( isinstance(e.message, str) and "Unexpected end of template" in e.message ): # It was opening a block, thus we're inside a set, macro, or # block. if trimmed_parts[0] == "block": self.inside_block = True else: result = None if trimmed_parts[0] == "call": assert m_open and m_close result = self.track_call(m_open, m_close, tag_contents) self.inside_set_macro_or_call = True return result, block_type else: raise # pragma: no cover elif block_type == "block_end": if trimmed_parts[0] in ("endcall", "endmacro", "endset"): # Exiting a set or macro or block. self.inside_set_macro_or_call = False elif trimmed_parts[0] == "endblock": # Exiting a {% block %} block. self.inside_block = False return None, block_type def make_raw_slice_info( self, unique_alternate_id: Optional[str], alternate_code: Optional[str], inside_block: bool = False, ) -> RawSliceInfo: """Create RawSliceInfo as given, or "empty" if in set/macro block.""" if not self.inside_set_macro_or_call: return RawSliceInfo(unique_alternate_id, alternate_code, [], inside_block) else: return RawSliceInfo(None, None, [], False) # We decide the "kind" of element we're dealing with using its _closing_ # tag rather than its opening tag. The types here map back to similar types # of sections in the python slicer. block_types = { "variable_end": "templated", "block_end": "block", "comment_end": "comment", # Raw tags should behave like blocks. Note that # raw_end and raw_begin are whole tags rather # than blocks and comments where we get partial # tags. "raw_end": "block", "raw_begin": "block", } def analyze(self, render_func: Callable[[str], str]) -> JinjaTracer: """Slice template in jinja.""" # str_buff and str_parts are two ways we keep track of tokens received # from Jinja. str_buff concatenates them together, while str_parts # accumulates the individual strings. We generally prefer using # str_parts. That's because Jinja doesn't just split on whitespace, so # by keeping tokens as Jinja returns them, the code is more robust. # Consider the following: # {% set col= "col1" %} # Note there's no space after col. Jinja splits this up for us. If we # simply concatenated the parts together and later split on whitespace, # we'd need some ugly, fragile logic to handle various whitespace # possibilities: # {% set col= "col1" %} # {% set col = "col1" %} # {% set col ="col1" %} # By using str_parts and letting Jinja handle this, it just works. str_buff = "" str_parts = [] # https://jinja.palletsprojects.com/en/2.11.x/api/#jinja2.Environment.lex block_idx = 0 for _, elem_type, raw in self.env.lex(self.raw_str): if elem_type == "data": self.track_literal(raw, block_idx) continue str_buff += raw str_parts.append(raw) if elem_type.endswith("_begin"): self.handle_left_whitespace_stripping(raw, block_idx) raw_slice_info: RawSliceInfo = self.make_raw_slice_info(None, None) tag_contents = [] # raw_end and raw_begin behave a little differently in # that the whole tag shows up in one go rather than getting # parts of the tag at a time. m_open = None m_close = None if elem_type.endswith("_end") or elem_type == "raw_begin": block_type = self.block_types[elem_type] block_tag = None # Handle starts and ends of blocks if block_type in ("block", "templated"): m_open = self.re_open_tag.search(str_parts[0]) m_close = self.re_close_tag.search(str_parts[-1]) if m_open and m_close: tag_contents = self.extract_tag_contents( str_parts, m_close, m_open, str_buff ) if block_type == "block" and tag_contents: block_type = self.extract_block_type(tag_contents[0]) block_tag = tag_contents[0] if block_type == "templated" and tag_contents: assert m_open and m_close raw_slice_info = self.track_templated( m_open, m_close, tag_contents ) ( raw_slice_info_temp, block_type, ) = self.update_inside_set_call_macro_or_block( block_type, tag_contents, m_open, m_close, tag_contents ) if raw_slice_info_temp: raw_slice_info = raw_slice_info_temp m_strip_right = regex.search( r"\s+$", raw, regex.MULTILINE | regex.DOTALL ) if block_type == "block_start": block_idx += 1 if elem_type.endswith("_end") and raw.startswith("-") and m_strip_right: # Right whitespace was stripped after closing block. Split # off the trailing whitespace into a separate slice. The # desired behavior is to behave similarly as the left # stripping case. Note that the stakes are a bit lower here, # because lex() hasn't *omitted* any characters from the # strings it returns, it has simply grouped them differently # than we want. trailing_chars = len(m_strip_right.group(0)) self.raw_sliced.append( RawFileSlice( str_buff[:-trailing_chars], block_type, self.idx_raw, block_idx, block_tag, ) ) self.raw_slice_info[self.raw_sliced[-1]] = raw_slice_info slice_idx = len(self.raw_sliced) - 1 self.idx_raw += len(str_buff) - trailing_chars if block_type == "block_end": block_idx += 1 self.raw_sliced.append( RawFileSlice( str_buff[-trailing_chars:], "literal", self.idx_raw, block_idx, ) ) self.raw_slice_info[ self.raw_sliced[-1] ] = self.slice_info_for_literal(0) self.idx_raw += trailing_chars else: self.raw_sliced.append( RawFileSlice( str_buff, block_type, self.idx_raw, block_idx, block_tag, ) ) self.raw_slice_info[self.raw_sliced[-1]] = raw_slice_info slice_idx = len(self.raw_sliced) - 1 self.idx_raw += len(str_buff) if block_type == "block_end": block_idx += 1 if block_type.startswith("block"): self.track_block_end(block_type, tag_contents[0]) self.update_next_slice_indices( slice_idx, block_type, tag_contents[0] ) str_buff = "" str_parts = [] return JinjaTracer( self.raw_str, self.raw_sliced, self.raw_slice_info, self.sliced_file, render_func, ) def track_templated( self, m_open: regex.Match[str], m_close: regex.Match[str], tag_contents: List[str], ) -> RawSliceInfo: """Compute tracking info for Jinja templated region, e.g. {{ foo }}. Args: m_open (regex.Match): A regex match object representing the opening tag. m_close (regex.Match): A regex match object representing the closing tag. tag_contents (List[str]): A list of strings representing the contents of the tag. Returns: RawSliceInfo: A RawSliceInfo object containing the computed tracking info. """ unique_alternate_id = self.next_slice_id() open_ = m_open.group(1) close_ = m_close.group(1) # Here, we still need to evaluate the original tag contents, e.g. in # case it has intentional side effects, but also return a slice ID # for tracking. alternate_code = ( f"\0{unique_alternate_id} {open_} " f"{''.join(tag_contents)} {close_}" ) return self.make_raw_slice_info(unique_alternate_id, alternate_code) def track_call( self, m_open: regex.Match[str], m_close: regex.Match[str], tag_contents: List[str], ) -> RawSliceInfo: """Set up tracking for "{% call ... %}". Args: m_open (regex.Match): A regex match object representing the opening tag. m_close (regex.Match): A regex match object representing the closing tag. tag_contents (List[str]): A list of strings representing the contents of the tag. Returns: RawSliceInfo: A RawSliceInfo object containing the computed tracking info. """ unique_alternate_id = self.next_slice_id() open_ = m_open.group(1) close_ = m_close.group(1) # Here, we still need to evaluate the original tag contents, e.g. in # case it has intentional side effects, but also return a slice ID # for tracking. alternate_code = ( f"\0{unique_alternate_id} {open_} " f"{''.join(tag_contents)} {close_}" ) return self.make_raw_slice_info(unique_alternate_id, alternate_code) def track_literal(self, raw: str, block_idx: int) -> None: """Set up tracking for a Jinja literal.""" self.raw_sliced.append( RawFileSlice( raw, "literal", self.idx_raw, block_idx, ) ) # Replace literal text with a unique ID. self.raw_slice_info[self.raw_sliced[-1]] = self.slice_info_for_literal( len(raw), "" ) self.idx_raw += len(raw) @staticmethod def extract_block_type(tag_name: str) -> str: """Determine block type.""" # :TRICKY: Syntactically, the Jinja {% include %} directive looks like # a block, but its behavior is basically syntactic sugar for # {{ open("somefile).read() }}. Thus, treat it as templated code. # It's a similar situation with {% import %} and {% from ... import %}. if tag_name in ["include", "import", "from", "do"]: block_type = "templated" elif tag_name.startswith("end"): block_type = "block_end" elif tag_name.startswith("el"): # else, elif block_type = "block_mid" else: block_type = "block_start" return block_type @staticmethod def extract_tag_contents( str_parts: List[str], m_close: regex.Match[str], m_open: regex.Match[str], str_buff: str, ) -> List[str]: """Given Jinja tag info, return the stuff inside the braces. I.e. Trim off the brackets and the whitespace. Args: str_parts (List[str]): A list of string parts. m_close (regex.Match[str]): The regex match for the closing tag. m_open (regex.Match[str]): The regex match for the opening tag. str_buff (str): The string buffer. Returns: List[str]: The trimmed parts inside the Jinja tag. """ if len(str_parts) >= 3: # Handle a tag received as individual parts. trimmed_parts = str_parts[1:-1] if trimmed_parts[0].isspace(): del trimmed_parts[0] if trimmed_parts[-1].isspace(): del trimmed_parts[-1] else: # Handle a tag received in one go. trimmed_content = str_buff[len(m_open.group(0)) : -len(m_close.group(0))] trimmed_parts = trimmed_content.split() return trimmed_parts def track_block_end(self, block_type: str, tag_name: str) -> None: """On ending a 'for' or 'if' block, set up tracking. Args: block_type (str): The type of block ('for' or 'if'). tag_name (str): The name of the tag. """ if block_type == "block_end" and tag_name in ( "endfor", "endif", ): # Replace RawSliceInfo for this slice with one that has alternate ID # and code for tracking. This ensures, for instance, that if a file # ends with "{% endif %} (with no newline following), that we still # generate a TemplateSliceInfo for it. unique_alternate_id = self.next_slice_id() alternate_code = f"{self.raw_sliced[-1].raw}\0{unique_alternate_id}_0" self.raw_slice_info[self.raw_sliced[-1]] = self.make_raw_slice_info( unique_alternate_id, alternate_code ) def update_next_slice_indices( self, slice_idx: int, block_type: str, tag_name: str ) -> None: """Based on block, update conditional jump info.""" if block_type == "block_start" and tag_name in ( "for", "if", ): self.stack.append(slice_idx) return None elif not self.stack: return None _idx = self.stack[-1] _raw_slice = self.raw_sliced[_idx] _slice_info = self.raw_slice_info[_raw_slice] if block_type == "block_mid": # Record potential forward jump over this block. _slice_info.next_slice_indices.append(slice_idx) self.stack.pop() self.stack.append(slice_idx) elif block_type == "block_end" and tag_name in ( "endfor", "endif", ): if not self.inside_set_macro_or_call: # Record potential forward jump over this block. _slice_info.next_slice_indices.append(slice_idx) self.stack.pop() if _raw_slice.slice_type == "block_start" and _raw_slice.tag == "for": # Record potential backward jump to the loop beginning. self.raw_slice_info[ self.raw_sliced[slice_idx] ].next_slice_indices.append(_idx + 1) def handle_left_whitespace_stripping(self, token: str, block_idx: int) -> None: """If block open uses whitespace stripping, record it. When a "begin" tag (whether block, comment, or data) uses whitespace stripping (https://jinja.palletsprojects.com/en/3.0.x/templates/#whitespace-control) the Jinja lex() function handles this by discarding adjacent whitespace from 'raw_str'. For more insight, see the tokeniter() function in this file: https://github.com/pallets/jinja/blob/main/src/jinja2/lexer.py We want to detect and correct for this in order to: - Correctly update "idx" (if this is wrong, that's a potential DISASTER because lint fixes use this info to update the source file, and incorrect values often result in CORRUPTING the user's file so it's no longer valid SQL. :-O - Guarantee that the slices we return fully "cover" the contents of 'in_str'. We detect skipped characters by looking ahead in in_str for the token just returned from lex(). The token text will either be at the current 'idx_raw' position (if whitespace stripping did not occur) OR it'll be farther along in 'raw_str', but we're GUARANTEED that lex() only skips over WHITESPACE; nothing else. """ # Find the token returned. Did lex() skip over any characters? num_chars_skipped = self.raw_str.index(token, self.idx_raw) - self.idx_raw if not num_chars_skipped: return # Yes. It skipped over some characters. Compute a string # containing the skipped characters. skipped_str = self.raw_str[self.idx_raw : self.idx_raw + num_chars_skipped] # Sanity check: Verify that Jinja only skips over # WHITESPACE, never anything else. if not skipped_str.isspace(): # pragma: no cover templater_logger.warning( "Jinja lex() skipped non-whitespace: %s", skipped_str ) # Treat the skipped whitespace as a literal. self.raw_sliced.append( RawFileSlice(skipped_str, "literal", self.idx_raw, block_idx) ) self.raw_slice_info[self.raw_sliced[-1]] = self.slice_info_for_literal(0) self.idx_raw += num_chars_skipped sqlfluff-2.3.5/src/sqlfluff/core/timing.py000066400000000000000000000055311451700765000205560ustar00rootroot00000000000000"""Timing summary class.""" from collections import defaultdict from typing import Dict, List, Optional, Set, Tuple, Union class TimingSummary: """An object for tracking the timing of similar steps across many files.""" def __init__(self, steps: Optional[List[str]] = None): self.steps = steps self._timings: List[Dict[str, float]] = [] def add(self, timing_dict: Dict[str, float]) -> None: """Add a timing dictionary to the summary.""" self._timings.append(timing_dict) if not self.steps: self.steps = list(timing_dict.keys()) def summary(self) -> Dict[str, Dict[str, float]]: """Generate a summary for display.""" vals: Dict[str, List[float]] = defaultdict(list) if not self.steps: # pragma: no cover return {} for timing_dict in self._timings: for step in self.steps: if step in timing_dict: vals[step].append(timing_dict[step]) summary = {} for step in self.steps: if vals[step]: summary[step] = { "cnt": len(vals[step]), "sum": sum(vals[step]), "min": min(vals[step]), "max": max(vals[step]), "avg": sum(vals[step]) / len(vals[step]), } return summary class RuleTimingSummary: """An object for tracking the timing of rules across many files.""" def __init__(self) -> None: self._timings: List[Tuple[str, str, float]] = [] def add(self, rule_timings: List[Tuple[str, str, float]]) -> None: """Add a set of rule timings.""" # Add records to the main list. self._timings.extend(rule_timings) def summary( self, threshold: float = 0.5 ) -> Dict[str, Dict[str, Union[float, str]]]: """Generate a summary for display.""" keys: Set[Tuple[str, str]] = set() vals: Dict[Tuple[str, str], List[float]] = defaultdict(list) for code, name, time in self._timings: vals[(code, name)].append(time) keys.add((code, name)) summary: Dict[str, Dict[str, Union[float, str]]] = {} for code, name in sorted(keys): timings = vals[(code, name)] # For brevity, if the total time taken is less than # `threshold`, then don't display. if sum(timings) < threshold: continue # NOTE: This summary isn't covered in tests, it's tricky # to force it to exist in a test environment without # making things complicated. summary[f"{code}: {name}"] = { # pragma: no cover "sum (n)": f"{sum(timings):.2f} ({len(timings)})", "min": min(timings), "max": max(timings), } return summary sqlfluff-2.3.5/src/sqlfluff/dialects/000077500000000000000000000000001451700765000175515ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/dialects/__init__.py000066400000000000000000000003211451700765000216560ustar00rootroot00000000000000"""Dialects, segregated to make imports manageable. NOTE: dialects should not be imported directly from this module, but should be accessed instead using the selector methods in `sqlfluff.core.dialects`. """ sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_ansi.py000066400000000000000000004064501451700765000225530ustar00rootroot00000000000000"""The core ANSI dialect. This is the core SQL grammar. We'll probably extend this or make it pluggable for other dialects. Here we encode the structure of the language. There shouldn't be any underlying "machinery" here, that should all be defined elsewhere. A lot of the inspiration for this sql grammar is taken from the cockroach labs full sql grammar. In particular their way for dividing up the expression grammar. Check out their docs, they're awesome. https://www.cockroachlabs.com/docs/stable/sql-grammar.html#select_stmt """ from enum import Enum from typing import Generator, List, NamedTuple, Optional, Set, Tuple, Union, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseFileSegment, BaseSegment, BinaryOperatorSegment, Bracketed, BracketedSegment, CodeSegment, CommentSegment, ComparisonOperatorSegment, CompositeBinaryOperatorSegment, CompositeComparisonOperatorSegment, Conditional, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, KeywordSegment, LiteralKeywordSegment, LiteralSegment, Matchable, MultiStringParser, NewlineSegment, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WhitespaceSegment, WordSegment, ) from sqlfluff.dialects.dialect_ansi_keywords import ( ansi_reserved_keywords, ansi_unreserved_keywords, ) ansi_dialect = Dialect("ansi", root_segment_name="FileSegment") ansi_dialect.set_lexer_matchers( [ # Match all forms of whitespace except newlines and carriage returns: # https://stackoverflow.com/questions/3469080/match-whitespace-but-not-newlines # This pattern allows us to also match non-breaking spaces (#2189). RegexLexer("whitespace", r"[^\S\r\n]+", WhitespaceSegment), RegexLexer( "inline_comment", r"(--|#)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--", "#")}, ), RegexLexer( "block_comment", r"\/\*([^\*]|\*(?!\/))*\*\/", CommentSegment, subdivider=RegexLexer( "newline", r"\r\n|\n", NewlineSegment, ), trim_post_subdivide=RegexLexer( "whitespace", r"[^\S\r\n]+", WhitespaceSegment, ), ), RegexLexer("single_quote", r"'([^'\\]|\\.|'')*'", CodeSegment), RegexLexer("double_quote", r'"([^"\\]|\\.)*"', CodeSegment), RegexLexer("back_quote", r"`[^`]*`", CodeSegment), # See https://www.geeksforgeeks.org/postgresql-dollar-quoted-string-constants/ RegexLexer("dollar_quote", r"\$(\w*)\$[^\1]*?\$\1\$", CodeSegment), # Numeric literal matches integers, decimals, and exponential formats, # Pattern breakdown: # (?> Atomic grouping # (https://www.regular-expressions.info/atomic.html). # \d+\.\d+ e.g. 123.456 # |\d+\.(?![\.\w]) e.g. 123. # (N.B. negative lookahead assertion to ensure we # don't match range operators `..` in Exasol, and # that in bigquery we don't match the "." # in "asd-12.foo"). # |\.\d+ e.g. .456 # |\d+ e.g. 123 # ) # (\.?[eE][+-]?\d+)? Optional exponential. # ( # (?<=\.) If matched character ends with . (e.g. 123.) then # don't worry about word boundary check. # |(?=\b) Check that we are at word boundary to avoid matching # valid naked identifiers (e.g. 123column). # ) RegexLexer( "numeric_literal", r"(?>\d+\.\d+|\d+\.(?![\.\w])|\.\d+|\d+)(\.?[eE][+-]?\d+)?((?<=\.)|(?=\b))", LiteralSegment, ), RegexLexer("like_operator", r"!?~~?\*?", ComparisonOperatorSegment), RegexLexer("newline", r"\r\n|\n", NewlineSegment), StringLexer("casting_operator", "::", CodeSegment), StringLexer("equals", "=", CodeSegment), StringLexer("greater_than", ">", CodeSegment), StringLexer("less_than", "<", CodeSegment), StringLexer("not", "!", CodeSegment), StringLexer("dot", ".", CodeSegment), StringLexer("comma", ",", CodeSegment), StringLexer("plus", "+", CodeSegment), StringLexer("minus", "-", CodeSegment), StringLexer("divide", "/", CodeSegment), StringLexer("percent", "%", CodeSegment), StringLexer("question", "?", CodeSegment), StringLexer("ampersand", "&", CodeSegment), StringLexer("vertical_bar", "|", CodeSegment), StringLexer("caret", "^", CodeSegment), StringLexer("star", "*", CodeSegment), StringLexer("start_bracket", "(", CodeSegment), StringLexer("end_bracket", ")", CodeSegment), StringLexer("start_square_bracket", "[", CodeSegment), StringLexer("end_square_bracket", "]", CodeSegment), StringLexer("start_curly_bracket", "{", CodeSegment), StringLexer("end_curly_bracket", "}", CodeSegment), StringLexer("colon", ":", CodeSegment), StringLexer("semicolon", ";", CodeSegment), # This is the "fallback" lexer for anything else which looks like SQL. RegexLexer("word", r"[0-9a-zA-Z_]+", WordSegment), ] ) # Set the bare functions ansi_dialect.sets("bare_functions").update( ["current_timestamp", "current_time", "current_date"] ) # Set the datetime units ansi_dialect.sets("datetime_units").update( [ "DAY", "DAYOFYEAR", "HOUR", "MILLISECOND", "MINUTE", "MONTH", "QUARTER", "SECOND", "WEEK", "WEEKDAY", "YEAR", ] ) ansi_dialect.sets("date_part_function_name").update(["DATEADD"]) # Set Keywords ansi_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", ansi_unreserved_keywords ) ansi_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", ansi_reserved_keywords ) # Bracket pairs (a set of tuples). # (name, startref, endref, persists) # NOTE: The `persists` value controls whether this type # of bracket is persisted during matching to speed up other # parts of the matching process. Round brackets are the most # common and match the largest areas and so are sufficient. ansi_dialect.bracket_sets("bracket_pairs").update( [ ("round", "StartBracketSegment", "EndBracketSegment", True), ("square", "StartSquareBracketSegment", "EndSquareBracketSegment", False), ("curly", "StartCurlyBracketSegment", "EndCurlyBracketSegment", False), ] ) # Set the value table functions. These are functions that, if they appear as # an item in "FROM", are treated as returning a COLUMN, not a TABLE. Apparently, # among dialects supported by SQLFluff, only BigQuery has this concept, but this # set is defined in the ANSI dialect because: # - It impacts core linter rules (see AL04 and several other rules that subclass # from it) and how they interpret the contents of table_expressions # - At least one other database (DB2) has the same value table function, # UNNEST(), as BigQuery. DB2 is not currently supported by SQLFluff. ansi_dialect.sets("value_table_functions").update([]) ansi_dialect.add( # Real segments DelimiterGrammar=Ref("SemicolonSegment"), SemicolonSegment=StringParser(";", SymbolSegment, type="statement_terminator"), ColonSegment=StringParser(":", SymbolSegment, type="colon"), SliceSegment=StringParser(":", SymbolSegment, type="slice"), # NOTE: The purpose of the colon_delimiter is that it has different layout rules. # It assumes no whitespace on either side. ColonDelimiterSegment=StringParser(":", SymbolSegment, type="colon_delimiter"), StartBracketSegment=StringParser("(", SymbolSegment, type="start_bracket"), EndBracketSegment=StringParser(")", SymbolSegment, type="end_bracket"), StartSquareBracketSegment=StringParser( "[", SymbolSegment, type="start_square_bracket" ), EndSquareBracketSegment=StringParser("]", SymbolSegment, type="end_square_bracket"), StartCurlyBracketSegment=StringParser( "{", SymbolSegment, type="start_curly_bracket" ), EndCurlyBracketSegment=StringParser("}", SymbolSegment, type="end_curly_bracket"), CommaSegment=StringParser(",", SymbolSegment, type="comma"), DotSegment=StringParser(".", SymbolSegment, type="dot"), StarSegment=StringParser("*", SymbolSegment, type="star"), TildeSegment=StringParser("~", SymbolSegment, type="tilde"), ParameterSegment=StringParser("?", SymbolSegment, type="parameter"), CastOperatorSegment=StringParser("::", SymbolSegment, type="casting_operator"), PlusSegment=StringParser("+", SymbolSegment, type="binary_operator"), MinusSegment=StringParser("-", SymbolSegment, type="binary_operator"), PositiveSegment=StringParser("+", SymbolSegment, type="sign_indicator"), NegativeSegment=StringParser("-", SymbolSegment, type="sign_indicator"), DivideSegment=StringParser("/", SymbolSegment, type="binary_operator"), MultiplySegment=StringParser("*", SymbolSegment, type="binary_operator"), ModuloSegment=StringParser("%", SymbolSegment, type="binary_operator"), SlashSegment=StringParser("/", SymbolSegment, type="slash"), AmpersandSegment=StringParser("&", SymbolSegment, type="ampersand"), PipeSegment=StringParser("|", SymbolSegment, type="pipe"), BitwiseXorSegment=StringParser("^", SymbolSegment, type="binary_operator"), LikeOperatorSegment=TypedParser( "like_operator", ComparisonOperatorSegment, type="like_operator" ), RawNotSegment=StringParser("!", SymbolSegment, type="raw_comparison_operator"), RawEqualsSegment=StringParser("=", SymbolSegment, type="raw_comparison_operator"), RawGreaterThanSegment=StringParser( ">", SymbolSegment, type="raw_comparison_operator" ), RawLessThanSegment=StringParser("<", SymbolSegment, type="raw_comparison_operator"), # The following functions can be called without parentheses per ANSI specification BareFunctionSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("bare_functions"), CodeSegment, type="bare_function", ) ), # The strange regex here it to make sure we don't accidentally match numeric # literals. We also use a regex to explicitly exclude disallowed keywords. NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z0-9_]*[A-Z][A-Z0-9_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), ParameterNameSegment=RegexParser( r"\"?[A-Z][A-Z0-9_]*\"?", CodeSegment, type="parameter" ), FunctionNameIdentifierSegment=TypedParser( "word", WordSegment, type="function_name_identifier" ), # Maybe data types should be more restrictive? DatatypeIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: OneOf( RegexParser( r"[A-Z_][A-Z0-9_]*", CodeSegment, type="data_type_identifier", anti_template=r"^(NOT)$", # TODO - this is a stopgap until we implement explicit data types ), Ref("SingleIdentifierGrammar", exclude=Ref("NakedIdentifierSegment")), ), ), # Ansi Intervals DatetimeUnitSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("datetime_units"), CodeSegment, type="date_part", ) ), DatePartFunctionName=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("date_part_function_name"), CodeSegment, type="function_name_identifier", ) ), QuotedIdentifierSegment=TypedParser( "double_quote", IdentifierSegment, type="quoted_identifier" ), QuotedLiteralSegment=TypedParser( "single_quote", LiteralSegment, type="quoted_literal" ), SingleQuotedIdentifierSegment=TypedParser( "single_quote", IdentifierSegment, type="quoted_identifier" ), NumericLiteralSegment=TypedParser( "numeric_literal", LiteralSegment, type="numeric_literal" ), # NullSegment is defined separately to the keyword, so we can give it a different # type NullLiteralSegment=StringParser("null", LiteralKeywordSegment, type="null_literal"), NanLiteralSegment=StringParser("nan", LiteralKeywordSegment, type="null_literal"), TrueSegment=StringParser("true", LiteralKeywordSegment, type="boolean_literal"), FalseSegment=StringParser("false", LiteralKeywordSegment, type="boolean_literal"), # We use a GRAMMAR here not a Segment. Otherwise, we get an unnecessary layer SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), terminators=[Ref("DotSegment")], ), BooleanLiteralGrammar=OneOf(Ref("TrueSegment"), Ref("FalseSegment")), # We specifically define a group of arithmetic operators to make it easier to # override this if some dialects have different available operators ArithmeticBinaryOperatorGrammar=OneOf( Ref("PlusSegment"), Ref("MinusSegment"), Ref("DivideSegment"), Ref("MultiplySegment"), Ref("ModuloSegment"), Ref("BitwiseAndSegment"), Ref("BitwiseOrSegment"), Ref("BitwiseXorSegment"), Ref("BitwiseLShiftSegment"), Ref("BitwiseRShiftSegment"), ), SignedSegmentGrammar=OneOf(Ref("PositiveSegment"), Ref("NegativeSegment")), StringBinaryOperatorGrammar=OneOf(Ref("ConcatSegment")), BooleanBinaryOperatorGrammar=OneOf( Ref("AndOperatorGrammar"), Ref("OrOperatorGrammar") ), ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("GreaterThanSegment"), Ref("LessThanSegment"), Ref("GreaterThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment"), Ref("NotEqualToSegment"), Ref("LikeOperatorSegment"), Sequence("IS", "DISTINCT", "FROM"), Sequence("IS", "NOT", "DISTINCT", "FROM"), ), # hookpoint for other dialects # e.g. EXASOL str to date cast with DATE '2021-01-01' # Give it a different type as needs to be single quotes and # should not be changed by rules (e.g. rule CV10) DateTimeLiteralGrammar=Sequence( OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), # Hookpoint for other dialects # e.g. INTO is optional in BIGQUERY MergeIntoLiteralGrammar=Sequence("MERGE", "INTO"), LiteralGrammar=OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("QualifiedNumericLiteralSegment"), # NB: Null is included in the literals, because it is a keyword which # can otherwise be easily mistaken for an identifier. Ref("NullLiteralSegment"), Ref("DateTimeLiteralGrammar"), Ref("ArrayLiteralSegment"), Ref("TypedArrayLiteralSegment"), Ref("ObjectLiteralSegment"), ), AndOperatorGrammar=StringParser("AND", BinaryOperatorSegment), OrOperatorGrammar=StringParser("OR", BinaryOperatorSegment), NotOperatorGrammar=StringParser("NOT", KeywordSegment, type="keyword"), # This is a placeholder for other dialects. PreTableFunctionKeywordsGrammar=Nothing(), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), ), # This pattern is used in a lot of places. # Defined here to avoid repetition. BracketedColumnReferenceListGrammar=Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), OrReplaceGrammar=Sequence("OR", "REPLACE"), TemporaryTransientGrammar=OneOf("TRANSIENT", Ref("TemporaryGrammar")), TemporaryGrammar=OneOf("TEMP", "TEMPORARY"), IfExistsGrammar=Sequence("IF", "EXISTS"), IfNotExistsGrammar=Sequence("IF", "NOT", "EXISTS"), LikeGrammar=OneOf("LIKE", "RLIKE", "ILIKE"), UnionGrammar=Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), IsClauseGrammar=OneOf( Ref("NullLiteralSegment"), Ref("NanLiteralSegment"), Ref("BooleanLiteralGrammar"), ), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "OVERLAPS", Ref("SetOperatorSegment"), "FETCH", ), # Define these as grammars to allow child dialects to enable them (since they are # non-standard keywords) IsNullGrammar=Nothing(), NotNullGrammar=Nothing(), CollateGrammar=Nothing(), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), "FETCH", ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", "FETCH", ), GroupByClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "HAVING", "QUALIFY", "WINDOW", "FETCH", ), HavingClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "QUALIFY", "WINDOW", "FETCH", ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", "FETCH", ), PrimaryKeyGrammar=Sequence("PRIMARY", "KEY"), ForeignKeyGrammar=Sequence("FOREIGN", "KEY"), UniqueKeyGrammar=Sequence("UNIQUE"), # Odd syntax, but prevents eager parameters being confused for data types FunctionParameterGrammar=OneOf( Sequence( Ref("ParameterNameSegment", optional=True), OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")), ), OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")), ), AutoIncrementGrammar=Sequence("AUTO_INCREMENT"), # Base Expression element is the right thing to reference for everything # which functions as an expression, but could include literals. BaseExpressionElementGrammar=OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("IntervalExpressionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), Sequence( Ref("DatatypeSegment"), Ref("LiteralGrammar"), ), # These terminators allow better performance by giving a signal # of a likely complete match if they come after a match. For # example "123," only needs to match against the LiteralGrammar # and because a comma follows, never be matched against # ExpressionSegment or FunctionSegment, which are both much # more complicated. terminators=[ Ref("CommaSegment"), Ref.keyword("AS"), # TODO: We can almost certainly add a few more here. ], ), FilterClauseGrammar=Sequence( "FILTER", Bracketed(Sequence("WHERE", Ref("ExpressionSegment"))) ), IgnoreRespectNullsGrammar=Sequence(OneOf("IGNORE", "RESPECT"), "NULLS"), FrameClauseUnitGrammar=OneOf("ROWS", "RANGE"), JoinTypeKeywordsGrammar=OneOf( "CROSS", "INNER", Sequence( OneOf( "FULL", "LEFT", "RIGHT", ), Ref.keyword("OUTER", optional=True), ), optional=True, ), # It's as a sequence to allow to parametrize that in Postgres dialect with LATERAL JoinKeywordsGrammar=Sequence("JOIN"), # NATURAL joins are not supported in all dialects (e.g. not in Bigquery # or T-SQL). So define here to allow override with Nothing() for those. NaturalJoinKeywordsGrammar=Sequence( "NATURAL", OneOf( # Note that NATURAL joins do not support CROSS joins "INNER", Sequence( OneOf("LEFT", "RIGHT", "FULL"), Ref.keyword("OUTER", optional=True), optional=True, ), optional=True, ), ), # This can be overwritten by dialects ExtendedNaturalJoinKeywordsGrammar=Nothing(), NestedJoinGrammar=Nothing(), ReferentialActionGrammar=OneOf( "RESTRICT", "CASCADE", Sequence("SET", "NULL"), Sequence("NO", "ACTION"), Sequence("SET", "DEFAULT"), ), DropBehaviorGrammar=OneOf("RESTRICT", "CASCADE", optional=True), ColumnConstraintDefaultGrammar=OneOf( Ref("ShorthandCastSegment"), Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), ), ReferenceDefinitionGrammar=Sequence( "REFERENCES", Ref("TableReferenceSegment"), # Foreign columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence( "MATCH", OneOf( "FULL", "PARTIAL", "SIMPLE", ), optional=True, ), AnySetOf( # ON DELETE clause, e.g. ON DELETE NO ACTION Sequence( "ON", "DELETE", Ref("ReferentialActionGrammar"), ), # ON UPDATE clause, e.g. ON UPDATE SET NULL Sequence( "ON", "UPDATE", Ref("ReferentialActionGrammar"), ), ), ), TrimParametersGrammar=OneOf("BOTH", "LEADING", "TRAILING"), DefaultValuesGrammar=Sequence("DEFAULT", "VALUES"), ObjectReferenceDelimiterGrammar=OneOf( Ref("DotSegment"), # NOTE: The double dot syntax allows for default values. Sequence(Ref("DotSegment"), Ref("DotSegment")), ), ObjectReferenceTerminatorGrammar=OneOf( "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ), AlterTableOptionsGrammar=OneOf( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")), ), # Add things Sequence( OneOf("ADD", "MODIFY"), Ref.keyword("COLUMN", optional=True), Ref("ColumnDefinitionSegment"), OneOf( Sequence(OneOf("FIRST", "AFTER"), Ref("ColumnReferenceSegment")), # Bracketed Version of the same Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), # Rename Sequence( "RENAME", OneOf("AS", "TO", optional=True), Ref("TableReferenceSegment"), ), ), ) class FileSegment(BaseFileSegment): """A segment representing a whole file or script. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. """ match_grammar = Delimited( Ref("StatementSegment"), delimiter=AnyNumberOf(Ref("DelimiterGrammar"), min_times=1), allow_gaps=True, allow_trailing=True, ) def get_table_references(self) -> Set[str]: """Use parsed tree to extract table references.""" references = set() for stmt in self.get_children("statement"): stmt = cast(StatementSegment, stmt) references |= stmt.get_table_references() return references class IntervalExpressionSegment(BaseSegment): """An interval expression segment.""" type = "interval_expression" match_grammar: Matchable = Sequence( "INTERVAL", OneOf( # The Numeric Version Sequence( Ref("NumericLiteralSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("DatetimeUnitSegment")), ), # The String version Ref("QuotedLiteralSegment"), ), ) class ArrayTypeSegment(BaseSegment): """Prefix for array literals specifying the type. Often "ARRAY" or "ARRAY" """ type = "array_type" match_grammar: Matchable = Nothing() class SizedArrayTypeSegment(BaseSegment): """Array type with a size.""" type = "sized_array_type" match_grammar = Sequence( Ref("ArrayTypeSegment"), Ref("ArrayAccessorSegment"), ) class ArrayLiteralSegment(BaseSegment): """An array literal segment. An unqualified array literal: e.g. [1, 2, 3] """ type = "array_literal" match_grammar: Matchable = Bracketed( Delimited(Ref("BaseExpressionElementGrammar"), optional=True), bracket_type="square", ) class TypedArrayLiteralSegment(BaseSegment): """An array literal segment.""" type = "typed_array_literal" match_grammar: Matchable = Sequence( Ref("ArrayTypeSegment"), Ref("ArrayLiteralSegment"), ) class StructTypeSegment(BaseSegment): """Expression to construct a STRUCT datatype. (Used in BigQuery for example) """ type = "struct_type" match_grammar: Matchable = Nothing() class StructLiteralSegment(BaseSegment): """An array literal segment. An unqualified struct literal: e.g. (1, 2 as foo, 3) NOTE: This rarely exists without a preceding type and exists mostly for structural & layout reasons. """ type = "struct_literal" match_grammar: Matchable = Bracketed( Delimited( Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), ) class TypedStructLiteralSegment(BaseSegment): """An array literal segment.""" type = "typed_struct_literal" match_grammar: Matchable = Sequence( Ref("StructTypeSegment"), Ref("StructLiteralSegment"), ) class EmptyStructLiteralBracketsSegment(BaseSegment): """An empty struct literal segment - `()`. NOTE: This is only to set the right type so spacing rules are applied correctly. """ type = "struct_literal" match_grammar: Matchable = Bracketed() class EmptyStructLiteralSegment(BaseSegment): """An empty array literal segment - `STRUCT()`.""" type = "typed_struct_literal" match_grammar: Matchable = Sequence( Ref("StructTypeSegment"), Ref("EmptyStructLiteralBracketsSegment"), ) class ObjectLiteralSegment(BaseSegment): """An object literal segment.""" type = "object_literal" match_grammar: Matchable = Bracketed( Delimited( Ref("ObjectLiteralElementSegment"), optional=True, ), bracket_type="curly", ) class ObjectLiteralElementSegment(BaseSegment): """An object literal element segment.""" type = "object_literal_element" match_grammar: Matchable = Sequence( Ref("QuotedLiteralSegment"), Ref("ColonSegment"), Ref("BaseExpressionElementGrammar"), ) class TimeZoneGrammar(BaseSegment): """Casting to Time Zone.""" type = "time_zone_grammar" match_grammar = AnyNumberOf( Sequence("AT", "TIME", "ZONE", Ref("ExpressionSegment")), ) class BracketedArguments(BaseSegment): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ type = "bracketed_arguments" match_grammar = Bracketed( # The brackets might be empty for some cases... Delimited(Ref("LiteralGrammar"), optional=True), ) class DatatypeSegment(BaseSegment): """A data type segment. Supports timestamp with(out) time zone. Doesn't currently support intervals. """ type = "data_type" match_grammar: Matchable = OneOf( Sequence( OneOf("TIME", "TIMESTAMP"), Bracketed(Ref("NumericLiteralSegment"), optional=True), Sequence(OneOf("WITH", "WITHOUT"), "TIME", "ZONE", optional=True), ), Sequence( "DOUBLE", "PRECISION", ), Sequence( OneOf( Sequence( OneOf("CHARACTER", "BINARY"), OneOf("VARYING", Sequence("LARGE", "OBJECT")), ), Sequence( # Some dialects allow optional qualification of data types with # schemas Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=False, optional=True, ), Ref("DatatypeIdentifierSegment"), allow_gaps=False, ), ), # There may be no brackets for some data types Ref("BracketedArguments", optional=True), OneOf( "UNSIGNED", # UNSIGNED MySQL Ref("CharCharacterSetGrammar"), optional=True, ), ), ) # hookpoint ansi_dialect.add(CharCharacterSetGrammar=Nothing()) class ObjectReferenceSegment(BaseSegment): """A reference to an object.""" type = "object_reference" # match grammar (don't allow whitespace) match_grammar: Matchable = Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[Ref("ObjectReferenceTerminatorGrammar")], allow_gaps=False, ) class ObjectReferencePart(NamedTuple): """Details about a table alias.""" part: str # Name of the part # Segment(s) comprising the part. Usuaully just one segment, but could # be multiple in dialects (e.g. BigQuery) that support unusual # characters in names (e.g. "-") segments: List[BaseSegment] @classmethod def _iter_reference_parts(cls, elem) -> Generator[ObjectReferencePart, None, None]: """Extract the elements of a reference and yield.""" # trim on quotes and split out any dots. for part in elem.raw_trimmed().split("."): yield cls.ObjectReferencePart(part, [elem]) def iter_raw_references(self) -> Generator[ObjectReferencePart, None, None]: """Generate a list of reference strings and elements. Each reference is an ObjectReferencePart. If some are split, then a segment may appear twice, but the substring will only appear once. """ # Extract the references from those identifiers (because some may be quoted) for elem in self.recursive_crawl("identifier"): yield from self._iter_reference_parts(elem) def is_qualified(self) -> bool: """Return if there is more than one element to the reference.""" return len(list(self.iter_raw_references())) > 1 def qualification(self) -> str: """Return the qualification type of this reference.""" return "qualified" if self.is_qualified() else "unqualified" class ObjectReferenceLevel(Enum): """Labels for the "levels" of a reference. Note: Since SQLFluff does not have access to database catalog information, interpreting references will often be ambiguous. Typical example: The first part *may* refer to a schema, but that is almost always optional if referring to an object in some default or currently "active" schema. For this reason, use of this enum is optional and intended mainly to clarify the intent of the code -- no guarantees! Additionally, the terminology may vary by dialect, e.g. in BigQuery, "project" would be a more accurate term than "schema". """ OBJECT = 1 TABLE = 2 SCHEMA = 3 def extract_possible_references( self, level: Union[ObjectReferenceLevel, int] ) -> List[ObjectReferencePart]: """Extract possible references of a given level. "level" may be (but is not required to be) a value from the ObjectReferenceLevel enum defined above. NOTE: The base implementation here returns at most one part, but dialects such as BigQuery that support nesting (e.g. STRUCT) may return multiple reference parts. """ level = self._level_to_int(level) refs = list(self.iter_raw_references()) if len(refs) >= level: return [refs[-level]] return [] def extract_possible_multipart_references( self, levels: List[Union[ObjectReferenceLevel, int]] ) -> List[Tuple[ObjectReferencePart, ...]]: """Extract possible multipart references, e.g. schema.table.""" levels_tmp = [self._level_to_int(level) for level in levels] min_level = min(levels_tmp) max_level = max(levels_tmp) refs = list(self.iter_raw_references()) if len(refs) >= max_level: return [tuple(refs[-max_level : 1 - min_level])] return [] @staticmethod def _level_to_int(level: Union[ObjectReferenceLevel, int]) -> int: # If it's an ObjectReferenceLevel, get the value. Otherwise, assume it's # an int. level = getattr(level, "value", level) assert isinstance(level, int) return level class TableReferenceSegment(ObjectReferenceSegment): """A reference to an table, CTE, subquery or alias.""" type = "table_reference" class SchemaReferenceSegment(ObjectReferenceSegment): """A reference to a schema.""" type = "schema_reference" class DatabaseReferenceSegment(ObjectReferenceSegment): """A reference to a database.""" type = "database_reference" class IndexReferenceSegment(ObjectReferenceSegment): """A reference to an index.""" type = "index_reference" class CollationReferenceSegment(ObjectReferenceSegment): """A reference to a collation.""" type = "collation_reference" # Some dialects like PostgreSQL want an identifier only, and quoted # literals aren't allowed. Other dialects like Snowflake only accept # a quoted string literal. We'll be a little overly-permissive and # accept either... it shouldn't be too greedy since this segment generally # occurs only in a Sequence after the "COLLATE" keyword. match_grammar: Matchable = OneOf( Ref("QuotedLiteralSegment"), Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[Ref("ObjectReferenceTerminatorGrammar")], allow_gaps=False, ), ) class RoleReferenceSegment(ObjectReferenceSegment): """A reference to a role, user, or account.""" type = "role_reference" match_grammar: Matchable = Ref("SingleIdentifierGrammar") class TablespaceReferenceSegment(ObjectReferenceSegment): """A reference to a tablespace.""" type = "tablespace_reference" class ExtensionReferenceSegment(ObjectReferenceSegment): """A reference to an extension.""" type = "extension_reference" class ColumnReferenceSegment(ObjectReferenceSegment): """A reference to column, field or alias.""" type = "column_reference" class SequenceReferenceSegment(ObjectReferenceSegment): """A reference to a sequence.""" type = "sequence_reference" class TagReferenceSegment(ObjectReferenceSegment): """A reference to a tag.""" type = "tag_reference" class TriggerReferenceSegment(ObjectReferenceSegment): """A reference to a trigger.""" type = "trigger_reference" class SingleIdentifierListSegment(BaseSegment): """A comma delimited list of identifiers.""" type = "identifier_list" match_grammar: Matchable = Delimited(Ref("SingleIdentifierGrammar")) class ArrayAccessorSegment(BaseSegment): """An array accessor e.g. [3:4].""" type = "array_accessor" match_grammar: Matchable = Bracketed( Delimited( OneOf(Ref("NumericLiteralSegment"), Ref("ExpressionSegment")), delimiter=Ref("SliceSegment"), ), bracket_type="square", parse_mode=ParseMode.GREEDY, ) class AliasedObjectReferenceSegment(BaseSegment): """A reference to an object with an `AS` clause.""" type = "object_reference" match_grammar: Matchable = Sequence( Ref("ObjectReferenceSegment"), Ref("AliasExpressionSegment") ) ansi_dialect.add( # This is a hook point to allow subclassing for other dialects AliasedTableReferenceGrammar=Sequence( Ref("TableReferenceSegment"), Ref("AliasExpressionSegment") ) ) class AliasExpressionSegment(BaseSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. """ type = "alias_expression" match_grammar: Matchable = Sequence( Indent, Ref.keyword("AS", optional=True), OneOf( Sequence( Ref("SingleIdentifierGrammar"), # Column alias in VALUES clause Bracketed(Ref("SingleIdentifierListSegment"), optional=True), ), Ref("SingleQuotedIdentifierSegment"), ), Dedent, ) class ShorthandCastSegment(BaseSegment): """A casting operation using '::'.""" type = "cast_expression" match_grammar: Matchable = Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf( Sequence( Ref("CastOperatorSegment"), Ref("DatatypeSegment"), Ref("TimeZoneGrammar", optional=True), ), min_times=1, ), ) class QualifiedNumericLiteralSegment(BaseSegment): """A numeric literal with one + or - sign preceding. The qualified numeric literal is a compound of a raw literal and a plus/minus sign. We do it this way rather than at the lexing step because the lexer doesn't deal well with ambiguity. """ type = "numeric_literal" match_grammar: Matchable = Sequence( Ref("SignedSegmentGrammar"), Ref("NumericLiteralSegment"), ) class AggregateOrderByClause(BaseSegment): """An order by clause for an aggregate fucntion. Defined as a class to allow a specific type for rule AM06 """ type = "aggregate_order_by" match_grammar: Matchable = Ref("OrderByClauseSegment") ansi_dialect.add( # FunctionContentsExpressionGrammar intended as a hook to override # in other dialects. FunctionContentsExpressionGrammar=Ref("ExpressionSegment"), FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # Trim function Sequence( Ref("TrimParametersGrammar"), Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")), "FROM", Ref("ExpressionSegment"), ), # An extract-like or substring-like function Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), "FROM", Ref("ExpressionSegment"), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref( "AggregateOrderByClause" ), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake).. Sequence(Ref.keyword("SEPARATOR"), Ref("LiteralGrammar")), # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), Ref("IgnoreRespectNullsGrammar"), Ref("IndexColumnDefinitionSegment"), Ref("EmptyStructLiteralSegment"), ), PostFunctionGrammar=OneOf( # Optional OVER suffix for window functions. # This is supported in bigquery & postgres (and its derivatives) # and so is included here for now. Ref("OverClauseSegment"), # Filter clause supported by both Postgres and SQLite Ref("FilterClauseGrammar"), ), ) class OverClauseSegment(BaseSegment): """An OVER clause for window functions.""" type = "over_clause" match_grammar: Matchable = Sequence( Indent, Ref("IgnoreRespectNullsGrammar", optional=True), "OVER", OneOf( Ref("SingleIdentifierGrammar"), # Window name Bracketed( Ref("WindowSpecificationSegment", optional=True), parse_mode=ParseMode.GREEDY, ), ), Dedent, ) class WindowSpecificationSegment(BaseSegment): """Window specification within OVER(...).""" type = "window_specification" match_grammar: Matchable = Sequence( Ref( "SingleIdentifierGrammar", optional=True, exclude=Ref.keyword("PARTITION") ), # "Base" window name Ref("PartitionClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("FrameClauseSegment", optional=True), optional=True, ) class FunctionNameSegment(BaseSegment): """Function name, including any prefix bits, e.g. project or schema.""" type = "function_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name OneOf( Ref("FunctionNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), terminators=[Ref("BracketedSegment")], ), allow_gaps=False, ) class FunctionSegment(BaseSegment): """A scalar or aggregate function. Maybe in the future we should distinguish between aggregate functions and other functions. For now, we treat them the same because they look the same for our purposes. """ type = "function" match_grammar: Matchable = OneOf( Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Sequence( Ref("DatePartFunctionNameSegment"), Bracketed( Delimited( Ref("DatetimeUnitSegment"), Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), ), parse_mode=ParseMode.GREEDY, ), ), ), Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), parse_mode=ParseMode.GREEDY, ), ), Ref("PostFunctionGrammar", optional=True), ), ) class PartitionClauseSegment(BaseSegment): """A `PARTITION BY` for window functions.""" type = "partitionby_clause" match_grammar: Matchable = Sequence( "PARTITION", "BY", Indent, # Brackets are optional in a partition by statement OptionallyBracketed(Delimited(Ref("ExpressionSegment"))), Dedent, ) class FrameClauseSegment(BaseSegment): """A frame clause for window functions. https://docs.oracle.com/cd/E17952_01/mysql-8.0-en/window-functions-frames.html """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), Sequence("INTERVAL", Ref("QuotedLiteralSegment")), "UNBOUNDED", ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) ansi_dialect.add( # This is a hook point to allow subclassing for other dialects PostTableExpressionGrammar=Nothing() ) class FromExpressionElementSegment(BaseSegment): """A table expression.""" type = "from_expression_element" match_grammar: Matchable = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("SamplingExpressionSegment"), Ref("JoinLikeClauseGrammar"), ), optional=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/arrays#flattening_arrays Sequence("WITH", "OFFSET", Ref("AliasExpressionSegment"), optional=True), Ref("SamplingExpressionSegment", optional=True), Ref("PostTableExpressionGrammar", optional=True), ) def get_eventual_alias(self) -> AliasInfo: """Return the eventual table name referred to by this table expression. Returns: :obj:`tuple` of (:obj:`str`, :obj:`BaseSegment`, :obj:`bool`) containing a string representation of the alias, a reference to the segment containing it, and whether it's an alias. """ # Get any table expressions tbl_expression = self.get_child("table_expression") if not tbl_expression: # pragma: no cover _bracketed = self.get_child("bracketed") if _bracketed: tbl_expression = _bracketed.get_child("table_expression") # For TSQL nested, bracketed tables get the first table as reference if tbl_expression and not tbl_expression.get_child("object_reference"): _bracketed = tbl_expression.get_child("bracketed") if _bracketed: tbl_expression = _bracketed.get_child("table_expression") # Work out the references ref: Optional[ObjectReferenceSegment] = None if tbl_expression: _ref = tbl_expression.get_child("object_reference") if _ref: ref = cast(ObjectReferenceSegment, _ref) # Handle any aliases alias_expression = self.get_child("alias_expression") if alias_expression: # If it has an alias, return that segment = alias_expression.get_child("identifier") if segment: return AliasInfo( segment.raw, segment, True, self, alias_expression, ref ) # If not return the object name (or None if there isn't one) if ref: references: List = list(ref.iter_raw_references()) # Return the last element of the reference. if references: penultimate_ref: ObjectReferenceSegment.ObjectReferencePart = ( references[-1] ) return AliasInfo( penultimate_ref.part, penultimate_ref.segments[0], False, self, None, ref, ) # No references or alias return AliasInfo( "", None, False, self, None, ref, ) class FromExpressionSegment(BaseSegment): """A from expression segment.""" type = "from_expression" match_grammar: Matchable = OptionallyBracketed( Sequence( Indent, OneOf( # check first for MLTableExpression, # because of possible FunctionSegment in # MainTableExpression Ref("MLTableExpressionSegment"), Ref("FromExpressionElementSegment"), Bracketed(Ref("FromExpressionSegment")), terminators=[Sequence("ORDER", "BY"), Sequence("GROUP", "BY")], ), Dedent, Conditional(Indent, indented_joins=True), AnyNumberOf( Sequence( OneOf(Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar")), ), optional=True, terminators=[Sequence("ORDER", "BY"), Sequence("GROUP", "BY")], ), Conditional(Dedent, indented_joins=True), ) ) class TableExpressionSegment(BaseSegment): """The main table expression e.g. within a FROM clause.""" type = "table_expression" match_grammar: Matchable = OneOf( Ref("ValuesClauseSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("TableReferenceSegment"), # Nested Selects Bracketed(Ref("SelectableGrammar")), Bracketed(Ref("MergeStatementSegment")), ) class WildcardIdentifierSegment(ObjectReferenceSegment): """Any identifier of the form a.b.*. This inherits iter_raw_references from the ObjectReferenceSegment. """ type = "wildcard_identifier" match_grammar: Matchable = Sequence( # *, blah.*, blah.blah.*, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("ObjectReferenceDelimiterGrammar"), allow_gaps=True, ) ), Ref("StarSegment"), allow_gaps=False, ) def iter_raw_references(self): """Generate a list of reference strings and elements. Each element is a tuple of (str, segment). If some are split, then a segment may appear twice, but the substring will only appear once. """ # Extract the references from those identifiers (because some may be quoted) for elem in self.recursive_crawl("identifier", "star"): yield from self._iter_reference_parts(elem) class WildcardExpressionSegment(BaseSegment): """A star (*) expression for a SELECT clause. This is separate from the identifier to allow for some dialects which extend this logic to allow REPLACE, EXCEPT or similar clauses e.g. BigQuery. """ type = "wildcard_expression" match_grammar: Matchable = Sequence( # *, blah.*, blah.blah.*, etc. Ref("WildcardIdentifierSegment") ) class SelectClauseElementSegment(BaseSegment): """An element in the targets of a select statement.""" type = "select_clause_element" # Important to split elements before parsing, otherwise debugging is really hard. match_grammar = OneOf( # *, blah.*, blah.blah.*, etc. Ref("WildcardExpressionSegment"), Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ) def get_alias(self) -> Optional[ColumnAliasInfo]: """Get info on alias within SELECT clause element.""" alias_expression_segment = next(self.recursive_crawl("alias_expression"), None) if alias_expression_segment is None: # Return None if no alias expression is found. return None alias_identifier_segment = next( (s for s in alias_expression_segment.segments if s.is_type("identifier")), None, ) if alias_identifier_segment is None: # Return None if no alias identifier expression is found. # Happened in the past due to bad syntax return None # pragma: no cover # Get segment being aliased. aliased_segment = next( s for s in self.segments if not s.is_whitespace and not s.is_meta and s != alias_expression_segment ) # Find all the columns being aliased. column_reference_segments = [] if aliased_segment.is_type("column_reference"): column_reference_segments.append(aliased_segment) else: column_reference_segments.extend( aliased_segment.recursive_crawl("column_reference") ) return ColumnAliasInfo( alias_identifier_name=alias_identifier_segment.raw, aliased_segment=aliased_segment, column_reference_segments=column_reference_segments, ) class SelectClauseModifierSegment(BaseSegment): """Things that come after SELECT but before the columns.""" type = "select_clause_modifier" match_grammar: Matchable = OneOf( "DISTINCT", "ALL", ) class SelectClauseSegment(BaseSegment): """A group of elements in a select target statement.""" type = "select_clause" match_grammar: Matchable = Sequence( "SELECT", Ref("SelectClauseModifierSegment", optional=True), Indent, Delimited( Ref("SelectClauseElementSegment"), allow_trailing=True, ), Dedent, terminators=[Ref("SelectClauseTerminatorGrammar")], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class JoinClauseSegment(BaseSegment): """Any number of join clauses, including the `JOIN` keyword.""" type = "join_clause" match_grammar: Matchable = OneOf( # NB These qualifiers are optional Sequence( Ref("JoinTypeKeywordsGrammar", optional=True), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), AnyNumberOf(Ref("NestedJoinGrammar")), Dedent, Sequence( # Using nested sequence here so we only get the indents # if we also have content. Conditional(Indent, indented_using_on=True), # NB: this is optional OneOf( # ON clause Ref("JoinOnConditionSegment"), # USING clause Sequence( "USING", Indent, Bracketed( # NB: We don't use BracketedColumnReferenceListGrammar # here because we're just using SingleIdentifierGrammar, # rather than ObjectReferenceSegment or # ColumnReferenceSegment. # This is a) so that we don't lint it as a reference and # b) because the column will probably be returned anyway # during parsing. Delimited(Ref("SingleIdentifierGrammar")), parse_mode=ParseMode.GREEDY, ), Dedent, ), # Unqualified joins *are* allowed. They just might not # be a good idea. ), Conditional(Dedent, indented_using_on=True), optional=True, ), ), # Note NATURAL joins do not support Join conditions Sequence( Ref("NaturalJoinKeywordsGrammar"), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, ), # Sometimes, a natural join might already include the keyword Sequence( Ref("ExtendedNaturalJoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, ), ) def get_eventual_aliases(self) -> List[Tuple[BaseSegment, AliasInfo]]: """Return the eventual table name referred to by this join clause.""" buff = [] from_expression = self.get_child("from_expression_element") # As per grammar above, there will always be a FromExpressionElementSegment assert from_expression alias: AliasInfo = cast( FromExpressionElementSegment, from_expression ).get_eventual_alias() # Only append if non-null. A None reference, may # indicate a generator expression or similar. if alias: buff.append((from_expression, alias)) # In some dialects, like TSQL, join clauses can have nested join clauses # recurse into them - but not if part of a sub-select statement (see #3144) for join_clause in self.recursive_crawl( "join_clause", no_recursive_seg_type="select_statement" ): if join_clause is self: # If the starting segment itself matches the list of types we're # searching for, recursive_crawl() will return it. Skip that. continue aliases: List[Tuple[BaseSegment, AliasInfo]] = cast( JoinClauseSegment, join_clause ).get_eventual_aliases() # Only append if non-null. A None reference, may # indicate a generator expression or similar. if aliases: buff = buff + aliases return buff class JoinOnConditionSegment(BaseSegment): """The `ON` condition within a `JOIN` clause.""" type = "join_on_condition" match_grammar: Matchable = Sequence( "ON", Conditional(ImplicitIndent, indented_on_contents=True), OptionallyBracketed(Ref("ExpressionSegment")), Conditional(Dedent, indented_on_contents=True), ) ansi_dialect.add( # This is a hook point to allow subclassing for other dialects JoinLikeClauseGrammar=Nothing(), ) class FromClauseSegment(BaseSegment): """A `FROM` clause like in `SELECT`. NOTE: this is a delimited set of table expressions, with a variable number of optional join clauses with those table expressions. The delimited aspect is the higher of the two such that the following is valid (albeit unusual): ``` SELECT * FROM a JOIN b, c JOIN d ``` """ type = "from_clause" match_grammar: Matchable = Sequence( "FROM", Delimited( Ref("FromExpressionSegment"), ), ) def get_eventual_aliases(self) -> List[Tuple[BaseSegment, AliasInfo]]: """List the eventual aliases of this from clause. Comes as a list of tuples (table expr, tuple (string, segment, bool)). """ buff: List[Tuple[BaseSegment, AliasInfo]] = [] direct_table_children = [] join_clauses = [] for from_expression in self.get_children("from_expression"): direct_table_children += from_expression.get_children( "from_expression_element" ) join_clauses += from_expression.get_children("join_clause") # Iterate through the potential sources of aliases for clause in direct_table_children: alias: AliasInfo = cast( FromExpressionElementSegment, clause ).get_eventual_alias() # Only append if non-null. A None reference, may # indicate a generator expression or similar. table_expr = ( clause if clause in direct_table_children else clause.get_child("from_expression_element") ) if alias: assert table_expr buff.append((table_expr, alias)) for clause in join_clauses: aliases: List[Tuple[BaseSegment, AliasInfo]] = cast( JoinClauseSegment, clause ).get_eventual_aliases() # Only append if non-null. A None reference, may # indicate a generator expression or similar. if aliases: buff = buff + aliases return buff class WhenClauseSegment(BaseSegment): """A 'WHEN' clause for a 'CASE' statement.""" type = "when_clause" match_grammar: Matchable = Sequence( "WHEN", # NOTE: The nested sequence here is to ensure the correct # placement of the meta segments when templated elements # are present. # https://github.com/sqlfluff/sqlfluff/issues/3988 Sequence( ImplicitIndent, Ref("ExpressionSegment"), Dedent, ), Conditional(Indent, indented_then=True), "THEN", Conditional(ImplicitIndent, indented_then_contents=True), Ref("ExpressionSegment"), Conditional(Dedent, indented_then_contents=True), Conditional(Dedent, indented_then=True), ) class ElseClauseSegment(BaseSegment): """An 'ELSE' clause for a 'CASE' statement.""" type = "else_clause" match_grammar: Matchable = Sequence( "ELSE", ImplicitIndent, Ref("ExpressionSegment"), Dedent ) class CaseExpressionSegment(BaseSegment): """A `CASE WHEN` clause.""" type = "case_expression" match_grammar: Matchable = OneOf( Sequence( "CASE", ImplicitIndent, AnyNumberOf( Ref("WhenClauseSegment"), reset_terminators=True, terminators=[Ref.keyword("ELSE"), Ref.keyword("END")], ), Ref( "ElseClauseSegment", optional=True, reset_terminators=True, terminators=[Ref.keyword("END")], ), Dedent, "END", ), Sequence( "CASE", Ref("ExpressionSegment"), ImplicitIndent, AnyNumberOf( Ref("WhenClauseSegment"), reset_terminators=True, terminators=[Ref.keyword("ELSE"), Ref.keyword("END")], ), Ref( "ElseClauseSegment", optional=True, reset_terminators=True, terminators=[Ref.keyword("END")], ), Dedent, "END", ), terminators=[Ref("CommaSegment"), Ref("BinaryOperatorGrammar")], ) ansi_dialect.add( # Expression_A_Grammar # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.html#a_expr # The upstream grammar is defined recursively, which if implemented naively # will cause SQLFluff to overflow the stack from recursive function calls. # To work around this, the a_expr grammar is reworked a bit into sub-grammars # that effectively provide tail recursion. Expression_A_Unary_Operator_Grammar=OneOf( # This grammar corresponds to the unary operator portion of the initial # recursive block on the Cockroach Labs a_expr grammar. It includes the # unary operator matching sub-block, but not the recursive call to a_expr. Ref( "SignedSegmentGrammar", exclude=Sequence(Ref("QualifiedNumericLiteralSegment")), ), Ref("TildeSegment"), Ref("NotOperatorGrammar"), # used in CONNECT BY clauses (EXASOL, Snowflake, Postgres...) "PRIOR", ), Tail_Recurse_Expression_A_Grammar=Sequence( # This should be used instead of a recursive call to Expression_A_Grammar # whenever the repeating element in Expression_A_Grammar makes a recursive # call to itself at the _end_. If it's in the middle then you still need # to recurse into Expression_A_Grammar normally. AnyNumberOf( Ref("Expression_A_Unary_Operator_Grammar"), terminators=[Ref("BinaryOperatorGrammar")], ), Ref("Expression_C_Grammar"), ), Expression_A_Grammar=Sequence( # Grammar always starts with optional unary operator, plus c_expr. This # section must always match the tail recurse grammar. Ref("Tail_Recurse_Expression_A_Grammar"), # As originally pictured in the diagram, the grammar then repeats itself # for any number of times with a loop. AnyNumberOf( OneOf( # This corresponds to the big repeating block in the diagram that # has like dozens and dozens of possibilities. Some of them are # recursive. If the item __ends__ with a recursive call to "a_expr", # use Ref("Tail_Recurse_Expression_A_Grammar") instead so that the # stack depth can be minimized. If the item has a recursive call # in the middle of the expression, you'll need to recurse # Expression_A_Grammar normally. # # We need to add a lot more here... Sequence( Sequence( Ref.keyword("NOT", optional=True), Ref("LikeGrammar"), ), Ref("Expression_A_Grammar"), Sequence( Ref.keyword("ESCAPE"), Ref("Tail_Recurse_Expression_A_Grammar"), optional=True, ), ), Sequence( Ref("BinaryOperatorGrammar"), Ref("Tail_Recurse_Expression_A_Grammar"), ), Sequence( Ref.keyword("NOT", optional=True), "IN", Bracketed( OneOf( Delimited( Ref("Expression_A_Grammar"), ), Ref("SelectableGrammar"), ), parse_mode=ParseMode.GREEDY, ), ), Sequence( Ref.keyword("NOT", optional=True), "IN", Ref("FunctionSegment"), # E.g. UNNEST() ), Sequence( "IS", Ref.keyword("NOT", optional=True), Ref("IsClauseGrammar"), ), Ref("IsNullGrammar"), Ref("NotNullGrammar"), Ref("CollateGrammar"), Sequence( Ref.keyword("NOT", optional=True), "BETWEEN", Ref("Expression_B_Grammar"), "AND", Ref("Tail_Recurse_Expression_A_Grammar"), ), ) ), ), # Expression_B_Grammar: Does not directly feed into Expression_A_Grammar # but is used for a BETWEEN statement within Expression_A_Grammar. # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#b_expr # # We use a similar trick as seen with Expression_A_Grammar to avoid recursion # by using a tail recursion grammar. See the comments for a_expr to see how # that works. Expression_B_Unary_Operator_Grammar=OneOf( Ref( "SignedSegmentGrammar", exclude=Sequence(Ref("QualifiedNumericLiteralSegment")), ), Ref("TildeSegment"), ), Tail_Recurse_Expression_B_Grammar=Sequence( # Only safe to use if the recursive call is at the END of the repeating # element in the main b_expr portion AnyNumberOf(Ref("Expression_B_Unary_Operator_Grammar")), Ref("Expression_C_Grammar"), ), Expression_B_Grammar=Sequence( # Always start with tail recursion element! Ref("Tail_Recurse_Expression_B_Grammar"), AnyNumberOf( OneOf( Sequence( OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), ), Ref("Tail_Recurse_Expression_B_Grammar"), ), # TODO: Add more things from b_expr here ), ), ), # Expression_C_Grammar # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#c_expr Expression_C_Grammar=OneOf( Sequence("EXISTS", Bracketed(Ref("SelectableGrammar"))), # should be first priority, otherwise EXISTS() would be matched as a function Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf(Ref("TimeZoneGrammar"), optional=True), ), Ref("ShorthandCastSegment"), terminators=[Ref("CommaSegment")], ), # Expression_D_Grammar # https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#d_expr Expression_D_Grammar=Sequence( OneOf( Ref("BareFunctionSegment"), Ref("FunctionSegment"), Bracketed( OneOf( # We're using the expression segment here rather than the grammar so # that in the parsed structure we get nested elements. Ref("ExpressionSegment"), Ref("SelectableGrammar"), Delimited( Ref( "ColumnReferenceSegment" ), # WHERE (a,b,c) IN (select a,b,c FROM...) Ref( "FunctionSegment" ), # WHERE (a, substr(b,1,3)) IN (select c,d FROM...) Ref("LiteralGrammar"), # WHERE (a, 2) IN (SELECT b, c FROM ...) Ref("LocalAliasSegment"), # WHERE (LOCAL.a, LOCAL.b) IN (...) ), ), parse_mode=ParseMode.GREEDY, ), # Allow potential select statement without brackets Ref("SelectStatementSegment"), Ref("LiteralGrammar"), Ref("IntervalExpressionSegment"), Ref("TypedStructLiteralSegment"), Ref("ArrayExpressionSegment"), Ref("ColumnReferenceSegment"), # For triggers, we allow "NEW.*" but not just "*" nor "a.b.*" # So can't use WildcardIdentifierSegment nor WildcardExpressionSegment Sequence( Ref("SingleIdentifierGrammar"), Ref("ObjectReferenceDelimiterGrammar"), Ref("StarSegment"), ), Sequence( Ref("StructTypeSegment"), Bracketed(Delimited(Ref("ExpressionSegment"))), ), Sequence( Ref("DatatypeSegment"), # Don't use the full LiteralGrammar here # because only some of them are applicable. # Notably we shouldn't use QualifiedNumericLiteralSegment # here because it looks like an arithmetic operation. OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("NullLiteralSegment"), Ref("DateTimeLiteralGrammar"), ), ), Ref("LocalAliasSegment"), terminators=[Ref("CommaSegment")], ), Ref("AccessorGrammar", optional=True), allow_gaps=True, ), AccessorGrammar=AnyNumberOf(Ref("ArrayAccessorSegment")), ) class EqualsSegment(CompositeComparisonOperatorSegment): """Equals operator.""" match_grammar: Matchable = Ref("RawEqualsSegment") class GreaterThanSegment(CompositeComparisonOperatorSegment): """Greater than operator.""" match_grammar: Matchable = Ref("RawGreaterThanSegment") class LessThanSegment(CompositeComparisonOperatorSegment): """Less than operator.""" match_grammar: Matchable = Ref("RawLessThanSegment") class GreaterThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Greater than or equal to operator.""" match_grammar: Matchable = Sequence( Ref("RawGreaterThanSegment"), Ref("RawEqualsSegment"), allow_gaps=False ) class LessThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Less than or equal to operator.""" match_grammar: Matchable = Sequence( Ref("RawLessThanSegment"), Ref("RawEqualsSegment"), allow_gaps=False ) class NotEqualToSegment(CompositeComparisonOperatorSegment): """Not equal to operator.""" match_grammar: Matchable = OneOf( Sequence(Ref("RawNotSegment"), Ref("RawEqualsSegment"), allow_gaps=False), Sequence( Ref("RawLessThanSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False ), ) class ConcatSegment(CompositeBinaryOperatorSegment): """Concat operator.""" match_grammar: Matchable = Sequence( Ref("PipeSegment"), Ref("PipeSegment"), allow_gaps=False ) class BitwiseAndSegment(CompositeBinaryOperatorSegment): """Bitwise and operator.""" match_grammar: Matchable = Ref("AmpersandSegment") class BitwiseOrSegment(CompositeBinaryOperatorSegment): """Bitwise or operator.""" match_grammar: Matchable = Ref("PipeSegment") class BitwiseLShiftSegment(CompositeBinaryOperatorSegment): """Bitwise left-shift operator.""" match_grammar: Matchable = Sequence( Ref("RawLessThanSegment"), Ref("RawLessThanSegment"), allow_gaps=False ) class BitwiseRShiftSegment(CompositeBinaryOperatorSegment): """Bitwise right-shift operator.""" match_grammar: Matchable = Sequence( Ref("RawGreaterThanSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False ) class ExpressionSegment(BaseSegment): """An expression, either arithmetic or boolean. NB: This is potentially VERY recursive and mostly uses the grammars above. This version also doesn't bound itself first, and so is potentially VERY SLOW. I don't really like this solution. We rely on elements of the expression to bound themselves rather than bounding at the expression level. Trying to bound the ExpressionSegment itself has been too unstable and not resilient enough to other bugs. """ type = "expression" match_grammar: Matchable = Ref("Expression_A_Grammar") class WhereClauseSegment(BaseSegment): """A `WHERE` clause like in `SELECT` or `INSERT`.""" type = "where_clause" match_grammar: Matchable = Sequence( "WHERE", # NOTE: The indent here is implicit to allow # constructions like: # # WHERE a # AND b # # to be valid without forcing an indent between # "WHERE" and "a". ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class OrderByClauseSegment(BaseSegment): """A `ORDER BY` clause like in `SELECT`.""" type = "orderby_clause" match_grammar: Matchable = Sequence( "ORDER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), # NB: This isn't really ANSI, and isn't supported in Mysql, but # is supported in enough other dialects for it to make sense here # for now. Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), terminators=["LIMIT", Ref("FrameClauseUnitGrammar")], ), Dedent, ) class RollupFunctionNameSegment(BaseSegment): """ROLLUP function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = StringParser( "ROLLUP", CodeSegment, type="function_name_identifier", ) class CubeFunctionNameSegment(BaseSegment): """ROLLUP function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = StringParser( "CUBE", CodeSegment, type="function_name_identifier", ) class GroupingSetsClauseSegment(BaseSegment): """`GROUPING SETS` clause within the `GROUP BY` clause.""" type = "grouping_sets_clause" match_grammar = Sequence( "GROUPING", "SETS", Bracketed( Delimited( Ref("CubeRollupClauseSegment"), Ref("GroupingExpressionList"), ) ), ) class GroupingExpressionList(BaseSegment): """A `GROUP BY` clause expression list like in `ROLLUP`.""" type = "grouping_expression_list" match_grammar: Matchable = Sequence( Indent, Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY ROLLUP(1)` Ref("NumericLiteralSegment"), # Can `GROUP BY ROLLUP(coalesce(col, 1))` Ref("ExpressionSegment"), Bracketed(), # Allows empty parentheses ), terminators=[Ref("GroupByClauseTerminatorGrammar")], ), Dedent, ) class CubeRollupClauseSegment(BaseSegment): """`CUBE` / `ROLLUP` clause within the `GROUP BY` clause.""" type = "cube_rollup_clause" match_grammar = Sequence( OneOf(Ref("CubeFunctionNameSegment"), Ref("RollupFunctionNameSegment")), Bracketed( Ref("GroupingExpressionList"), ), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar: Matchable = Sequence( "GROUP", "BY", OneOf( Ref("CubeRollupClauseSegment"), # We could replace this next bit with a GroupingExpressionList # reference (renaming that to a more generic name), to avoid # repeating this bit of code, but I would rather keep it flat # to avoid changing regular `GROUP BY` clauses. Sequence( Indent, Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), terminators=[Ref("GroupByClauseTerminatorGrammar")], ), Dedent, ), ), ) class HavingClauseSegment(BaseSegment): """A `HAVING` clause like in `SELECT`.""" type = "having_clause" match_grammar: Matchable = Sequence( "HAVING", ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class LimitClauseSegment(BaseSegment): """A `LIMIT` clause like in `SELECT`.""" type = "limit_clause" match_grammar: Matchable = Sequence( "LIMIT", Indent, OptionallyBracketed( OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), "ALL", ) ), OneOf( Sequence( "OFFSET", OneOf( # Allow a number by itself OR Ref("NumericLiteralSegment"), # An arbitrary expression Ref("ExpressionSegment"), ), ), Sequence( Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), optional=True, ), Dedent, ) class OverlapsClauseSegment(BaseSegment): """An `OVERLAPS` clause like in `SELECT.""" type = "overlaps_clause" match_grammar: Matchable = Sequence( "OVERLAPS", OneOf( Sequence( Bracketed( Ref("DateTimeLiteralGrammar"), Ref("CommaSegment"), Ref("DateTimeLiteralGrammar"), ) ), Ref("ColumnReferenceSegment"), ), ) class NamedWindowSegment(BaseSegment): """A WINDOW clause.""" type = "named_window" match_grammar: Matchable = Sequence( "WINDOW", Indent, Delimited( Ref("NamedWindowExpressionSegment"), ), Dedent, ) class FetchClauseSegment(BaseSegment): """A `FETCH` clause like in `SELECT.""" type = "fetch_clause" match_grammar: Matchable = Sequence( "FETCH", OneOf( "FIRST", "NEXT", ), Ref("NumericLiteralSegment", optional=True), OneOf("ROW", "ROWS"), "ONLY", ) class NamedWindowExpressionSegment(BaseSegment): """Named window expression.""" type = "named_window_expression" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Window name "AS", OneOf( Ref("SingleIdentifierGrammar"), # Window name Bracketed( Ref("WindowSpecificationSegment"), parse_mode=ParseMode.GREEDY, ), ), ) class ValuesClauseSegment(BaseSegment): """A `VALUES` clause like in `INSERT`.""" type = "values_clause" match_grammar: Matchable = Sequence( OneOf("VALUE", "VALUES"), Delimited( Sequence( # MySQL uses `ROW` in it's value statement. # Currently SQLFluff doesn't differentiate between # Values statement: # https://dev.mysql.com/doc/refman/8.0/en/values.html # and Values() function (used in INSERT statements): # https://dev.mysql.com/doc/refman/8.0/en/miscellaneous-functions.html#function_values # TODO: split these out in future. Ref.keyword("ROW", optional=True), Bracketed( Delimited( "DEFAULT", Ref("LiteralGrammar"), Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), ), ), ) class UnorderedSelectStatementSegment(BaseSegment): """A `SELECT` statement without any ORDER clauses or later. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ type = "select_statement" match_grammar: Matchable = Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("OverlapsClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), Ref("OrderByClauseSegment"), Ref("LimitClauseSegment"), ], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class SelectStatementSegment(BaseSegment): """A `SELECT` statement.""" type = "select_statement" # Inherit most of the parse grammar from the unordered version. match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ], # Overwrite the terminators, because we want to remove some. replace_terminators=True, terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), ], ) ansi_dialect.add( # Things that behave like select statements SelectableGrammar=OneOf( OptionallyBracketed(Ref("WithCompoundStatementSegment")), Ref("NonWithSelectableGrammar"), Bracketed(Ref("SelectableGrammar")), ), # Things that behave like select statements, which can form part of with # expressions. NonWithSelectableGrammar=OneOf( Ref("SetExpressionSegment"), OptionallyBracketed(Ref("SelectStatementSegment")), Ref("NonSetSelectableGrammar"), ), # Things that do not behave like select statements, which can form part of with # expressions. NonWithNonSelectableGrammar=OneOf( Ref("UpdateStatementSegment"), Ref("InsertStatementSegment"), Ref("DeleteStatementSegment"), ), # Things that behave like select statements, which can form part of set expressions. NonSetSelectableGrammar=OneOf( Ref("ValuesClauseSegment"), Ref("UnorderedSelectStatementSegment"), # If it's bracketed, we can have the full select statement here, # otherwise we can't because any order by clauses should belong # to the set expression. Bracketed(Ref("SelectStatementSegment")), Bracketed(Ref("NonSetSelectableGrammar")), ), ) class CTEColumnList(BaseSegment): """Bracketed column list portion of a CTE definition.""" type = "cte_column_list" match_grammar = Bracketed( Ref("SingleIdentifierListSegment"), ) class CTEDefinitionSegment(BaseSegment): """A CTE Definition from a WITH statement. `tab (col1,col2) AS (SELECT a,b FROM x)` """ type = "common_table_expression" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), Ref("CTEColumnList", optional=True), Ref.keyword("AS", optional=True), Bracketed( # Ephemeral here to subdivide the query. Ref("SelectableGrammar"), parse_mode=ParseMode.GREEDY, ), ) def get_identifier(self) -> IdentifierSegment: """Gets the identifier of this CTE. Note: it blindly gets the first identifier it finds which given the structure of a CTE definition is usually the right one. """ _identifier = self.get_child("identifier") # There will always be one, given the grammar above. assert _identifier return cast(IdentifierSegment, _identifier) class WithCompoundStatementSegment(BaseSegment): """A `SELECT` statement preceded by a selection of `WITH` clauses. `WITH tab (col1,col2) AS (SELECT a,b FROM x)` """ type = "with_compound_statement" # match grammar match_grammar: Matchable = Sequence( "WITH", Ref.keyword("RECURSIVE", optional=True), Conditional(Indent, indented_ctes=True), Delimited( Ref("CTEDefinitionSegment"), terminators=["SELECT"], allow_trailing=True, ), Conditional(Dedent, indented_ctes=True), OneOf( Ref("NonWithSelectableGrammar"), Ref("NonWithNonSelectableGrammar"), ), ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Minus, Except or Intersect.""" type = "set_operator" match_grammar: Matchable = OneOf( Ref("UnionGrammar"), Sequence( OneOf( "INTERSECT", "EXCEPT", ), Ref.keyword("ALL", optional=True), ), "MINUS", exclude=Sequence("EXCEPT", Bracketed(Anything())), ) class SetExpressionSegment(BaseSegment): """A set expression with either Union, Minus, Except or Intersect.""" type = "set_expression" # match grammar match_grammar: Matchable = Sequence( Ref("NonSetSelectableGrammar"), AnyNumberOf( Sequence( Ref("SetOperatorSegment"), Ref("NonSetSelectableGrammar"), ), min_times=1, ), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement.""" type = "insert_statement" match_grammar: Matchable = Sequence( "INSERT", # Maybe OVERWRITE is just snowflake? # (It's also Hive but that has full insert grammar implementation) Ref.keyword("OVERWRITE", optional=True), "INTO", Ref("TableReferenceSegment"), OneOf( # As SelectableGrammar can be bracketed too, the parse gets confused, # so we need slightly odd syntax here to allow those to parse (rather # than just add optional=True to BracketedColumnReferenceListGrammar). Ref("SelectableGrammar"), Sequence( Ref("BracketedColumnReferenceListGrammar"), Ref("SelectableGrammar"), ), # This is part of ANSI SQL since SQL-92 Ref("DefaultValuesGrammar"), ), ) class MergeStatementSegment(BaseSegment): """A `MERGE` statement.""" type = "merge_statement" match_grammar = Sequence( Ref("MergeIntoLiteralGrammar"), Indent, OneOf( Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar"), ), Dedent, "USING", Indent, OneOf( Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar"), Sequence( Bracketed( Ref("SelectableGrammar"), ), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, Conditional(Indent, indented_using_on=True), Ref("JoinOnConditionSegment"), Conditional(Dedent, indented_using_on=True), Ref("MergeMatchSegment"), ) class MergeMatchSegment(BaseSegment): """Contains dialect specific merge operations. Hookpoint for dialect specific behavior e.g. UpdateClause / DeleteClause, multiple MergeMatchedClauses """ type = "merge_match" match_grammar: Matchable = AnyNumberOf( Ref("MergeMatchedClauseSegment"), Ref("MergeNotMatchedClauseSegment"), min_times=1, ) class MergeMatchedClauseSegment(BaseSegment): """The `WHEN MATCHED` clause within a `MERGE` statement.""" type = "merge_when_matched_clause" match_grammar: Matchable = Sequence( "WHEN", "MATCHED", Sequence("AND", Ref("ExpressionSegment"), optional=True), "THEN", Indent, OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), Dedent, ) class MergeNotMatchedClauseSegment(BaseSegment): """The `WHEN NOT MATCHED` clause within a `MERGE` statement.""" type = "merge_when_not_matched_clause" match_grammar: Matchable = Sequence( "WHEN", "NOT", "MATCHED", Sequence("AND", Ref("ExpressionSegment"), optional=True), "THEN", Indent, Ref("MergeInsertClauseSegment"), Dedent, ) class MergeUpdateClauseSegment(BaseSegment): """`UPDATE` clause within the `MERGE` statement.""" type = "merge_update_clause" match_grammar: Matchable = Sequence( "UPDATE", Indent, Ref("SetClauseListSegment"), Dedent, ) class MergeInsertClauseSegment(BaseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar: Matchable = Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, Ref("ValuesClauseSegment", optional=True), ) class MergeDeleteClauseSegment(BaseSegment): """`DELETE` clause within the `MERGE` statement.""" type = "merge_delete_clause" match_grammar: Matchable = Ref.keyword("DELETE") class TransactionStatementSegment(BaseSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement.""" type = "transaction_statement" match_grammar: Matchable = Sequence( # COMMIT [ WORK ] [ AND [ NO ] CHAIN ] # ROLLBACK [ WORK ] [ AND [ NO ] CHAIN ] # BEGIN | END TRANSACTION | WORK # NOTE: "TO SAVEPOINT" is not yet supported # https://docs.snowflake.com/en/sql-reference/sql/begin.html # https://www.postgresql.org/docs/current/sql-end.html OneOf("START", "BEGIN", "COMMIT", "ROLLBACK", "END"), OneOf("TRANSACTION", "WORK", optional=True), Sequence("NAME", Ref("SingleIdentifierGrammar"), optional=True), Sequence("AND", Ref.keyword("NO", optional=True), "CHAIN", optional=True), ) class ColumnConstraintSegment(BaseSegment): """A column option; each CREATE TABLE column can have 0 or more.""" type = "column_constraint_segment" # Column constraint from # https://www.postgresql.org/docs/12/sql-createtable.html match_grammar: Matchable = Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( # DEFAULT "DEFAULT", Ref("ColumnConstraintDefaultGrammar"), ), Ref("PrimaryKeyGrammar"), Ref("UniqueKeyGrammar"), # UNIQUE Ref("AutoIncrementGrammar"), Ref("ReferenceDefinitionGrammar"), # REFERENCES reftable [ ( refcolumn) ]x Ref("CommentClauseSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment") ), # https://www.sqlite.org/datatype3.html#collation ), ) class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class IndexColumnDefinitionSegment(BaseSegment): """A column definition for CREATE INDEX.""" type = "index_column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name OneOf("ASC", "DESC", optional=True), ) class TableConstraintSegment(BaseSegment): """A table constraint, e.g. for CREATE TABLE.""" type = "table_constraint" # Later add support for CHECK constraint, others? # e.g. CONSTRAINT constraint_1 PRIMARY KEY(column_1) match_grammar: Matchable = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( # UNIQUE ( column_name [, ... ] ) "UNIQUE", Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? ), Sequence( # PRIMARY KEY ( column_name [, ... ] ) index_parameters Ref("PrimaryKeyGrammar"), # Columns making up PRIMARY KEY constraint Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] ), ), ) class TableEndClauseSegment(BaseSegment): """Allow for additional table endings. (like WITHOUT ROWID for SQLite) """ type = "table_end_clause_segment" match_grammar: Matchable = Nothing() class ArrayExpressionSegment(BaseSegment): """Expression to construct a ARRAY from a subquery. (Yes in BigQuery for example) NOTE: This differs from an array _literal_ in that it takes the form of an expression. """ type = "array_expression" match_grammar: Matchable = Nothing() class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement.""" type = "create_table_statement" # https://crate.io/docs/sql-99/en/latest/chapters/18.html # https://www.postgresql.org/docs/12/sql-createtable.html match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), ) ), Ref("CommentClauseSegment", optional=True), ), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), Ref("TableEndClauseSegment", optional=True), ) class CommentClauseSegment(BaseSegment): """A comment clause. e.g. COMMENT 'view/table/column description' """ type = "comment_clause" match_grammar: Matchable = Sequence("COMMENT", Ref("QuotedLiteralSegment")) class CreateSchemaStatementSegment(BaseSegment): """A `CREATE SCHEMA` statement.""" type = "create_schema_statement" match_grammar: Matchable = Sequence( "CREATE", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), ) class SetSchemaStatementSegment(BaseSegment): """A `SET SCHEMA` statement.""" type = "set_schema_statement" match_grammar: Matchable = Sequence( "SET", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), ) class DropSchemaStatementSegment(BaseSegment): """A `DROP SCHEMA` statement.""" type = "drop_schema_statement" match_grammar: Matchable = Sequence( "DROP", "SCHEMA", Ref("IfExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class DropTypeStatementSegment(BaseSegment): """A `DROP TYPE` statement.""" type = "drop_type_statement" match_grammar: Matchable = Sequence( "DROP", "TYPE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class CreateDatabaseStatementSegment(BaseSegment): """A `CREATE DATABASE` statement.""" type = "create_database_statement" match_grammar: Matchable = Sequence( "CREATE", "DATABASE", Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), ) class DropDatabaseStatementSegment(BaseSegment): """A `DROP DATABASE` statement.""" type = "drop_database_statement" match_grammar: Matchable = Sequence( "DROP", "DATABASE", Ref("IfExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class CreateIndexStatementSegment(BaseSegment): """A `CREATE INDEX` statement.""" type = "create_index_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Sequence( Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ), ) ), ) class AlterTableStatementSegment(BaseSegment): """An `ALTER TABLE` statement.""" type = "alter_table_statement" # Based loosely on: # https://dev.mysql.com/doc/refman/8.0/en/alter-table.html # TODO: Flesh this out with more detail. match_grammar: Matchable = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Delimited( Ref("AlterTableOptionsGrammar"), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" # https://crate.io/docs/sql-99/en/latest/chapters/18.html#create-view-statement # https://dev.mysql.com/doc/refman/8.0/en/create-view.html # https://www.postgresql.org/docs/12/sql-createview.html match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class DropTableStatementSegment(BaseSegment): """A `DROP TABLE` statement.""" type = "drop_table_statement" match_grammar: Matchable = Sequence( "DROP", Ref("TemporaryGrammar", optional=True), "TABLE", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class DropViewStatementSegment(BaseSegment): """A `DROP VIEW` statement.""" type = "drop_view_statement" match_grammar: Matchable = Sequence( "DROP", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class DropUserStatementSegment(BaseSegment): """A `DROP USER` statement.""" type = "drop_user_statement" match_grammar: Matchable = Sequence( "DROP", "USER", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), ) class TruncateStatementSegment(BaseSegment): """`TRUNCATE TABLE` statement.""" type = "truncate_table" match_grammar: Matchable = Sequence( "TRUNCATE", Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ) class DropIndexStatementSegment(BaseSegment): """A `DROP INDEX` statement.""" type = "drop_index_statement" # DROP INDEX [IF EXISTS] {RESTRICT | CASCADE} match_grammar: Matchable = Sequence( "DROP", "INDEX", Ref("IfExistsGrammar", optional=True), Ref("IndexReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class AccessStatementSegment(BaseSegment): """A `GRANT` or `REVOKE` statement. In order to help reduce code duplication we decided to implement other dialect specific grants (like Snowflake) here too which will help with maintainability. We also note that this causes the grammar to be less "correct", but the benefits outweigh the con in our opinion. Grant specific information: * https://www.postgresql.org/docs/9.0/sql-grant.html * https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Revoke specific information: * https://www.postgresql.org/docs/9.0/sql-revoke.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-role.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege-share.html """ type = "access_statement" # Privileges that can be set on the account (specific to snowflake) _global_permissions = OneOf( Sequence( "CREATE", OneOf( "ROLE", "USER", "WAREHOUSE", "DATABASE", "INTEGRATION", ), ), Sequence("APPLY", "MASKING", "POLICY"), Sequence("EXECUTE", "TASK"), Sequence("MANAGE", "GRANTS"), Sequence("MONITOR", OneOf("EXECUTION", "USAGE")), ) _schema_object_names = [ "TABLE", "VIEW", "STAGE", "FUNCTION", "PROCEDURE", "ROUTINE", "SEQUENCE", "STREAM", "TASK", ] _schema_object_types = OneOf( *_schema_object_names, Sequence("MATERIALIZED", "VIEW"), Sequence("EXTERNAL", "TABLE"), Sequence("FILE", "FORMAT"), ) # We reuse the object names above and simply append an `S` to the end of them to get # plurals _schema_object_types_plural = OneOf( *[f"{object_name}S" for object_name in _schema_object_names] ) _permissions = Sequence( OneOf( Sequence( "CREATE", OneOf( "SCHEMA", Sequence("MASKING", "POLICY"), "PIPE", _schema_object_types, ), ), Sequence("IMPORTED", "PRIVILEGES"), "APPLY", "CONNECT", "CREATE", "DELETE", "EXECUTE", "INSERT", "MODIFY", "MONITOR", "OPERATE", "OWNERSHIP", "READ", "REFERENCE_USAGE", "REFERENCES", "SELECT", "TEMP", "TEMPORARY", "TRIGGER", "TRUNCATE", "UPDATE", "USAGE", "USE_ANY_ROLE", "WRITE", Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), Ref("BracketedColumnReferenceListGrammar", optional=True), ) # All of the object types that we can grant permissions on. # This list will contain ansi sql objects as well as dialect specific ones. _objects = OneOf( "ACCOUNT", Sequence( OneOf( Sequence("RESOURCE", "MONITOR"), "WAREHOUSE", "DATABASE", "DOMAIN", "INTEGRATION", "LANGUAGE", "SCHEMA", "ROLE", "TABLESPACE", "TYPE", Sequence( "FOREIGN", OneOf("SERVER", Sequence("DATA", "WRAPPER")), ), Sequence("ALL", "SCHEMAS", "IN", "DATABASE"), Sequence("FUTURE", "SCHEMAS", "IN", "DATABASE"), _schema_object_types, Sequence("ALL", _schema_object_types_plural, "IN", "SCHEMA"), Sequence( "FUTURE", _schema_object_types_plural, "IN", OneOf("DATABASE", "SCHEMA"), ), optional=True, ), Delimited( Ref("ObjectReferenceSegment"), Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), terminators=["TO", "FROM"], ), ), Sequence("LARGE", "OBJECT", Ref("NumericLiteralSegment")), ) match_grammar: Matchable = OneOf( # Based on https://www.postgresql.org/docs/13/sql-grant.html # and https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Sequence( "GRANT", OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), "ON", _objects, ), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")), # In the case where a role is granted non-explicitly, # e.g. GRANT ROLE_NAME TO OTHER_ROLE_NAME # See https://www.postgresql.org/docs/current/sql-grant.html Ref("ObjectReferenceSegment"), ), "TO", OneOf("GROUP", "USER", "ROLE", "SHARE", optional=True), Delimited( OneOf(Ref("RoleReferenceSegment"), Ref("FunctionSegment"), "PUBLIC"), ), OneOf( Sequence("WITH", "GRANT", "OPTION"), Sequence("WITH", "ADMIN", "OPTION"), Sequence("COPY", "CURRENT", "GRANTS"), optional=True, ), Sequence( "GRANTED", "BY", OneOf( "CURRENT_USER", "SESSION_USER", Ref("ObjectReferenceSegment"), ), optional=True, ), ), # Based on https://www.postgresql.org/docs/12/sql-revoke.html Sequence( "REVOKE", Sequence("GRANT", "OPTION", "FOR", optional=True), OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), "ON", _objects, ), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")), Ref("ObjectReferenceSegment"), ), "FROM", OneOf("GROUP", "USER", "ROLE", "SHARE", optional=True), Delimited( Ref("ObjectReferenceSegment"), ), Ref("DropBehaviorGrammar", optional=True), ), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. DELETE FROM [ WHERE ] """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar: Matchable = Sequence( "DELETE", Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), ) class UpdateStatementSegment(BaseSegment): """An `Update` statement. UPDATE
SET [ WHERE ] """ type = "update_statement" match_grammar: Matchable = Sequence( "UPDATE", Ref("TableReferenceSegment"), # SET is not a reserved word in all dialects (e.g. RedShift) # So specifically exclude as an allowed implicit alias to avoid parsing errors Ref("AliasExpressionSegment", exclude=Ref.keyword("SET"), optional=True), Ref("SetClauseListSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), ) class SetClauseListSegment(BaseSegment): """SQL 1992 set clause list. ::= [ { }... ] ::= ::= | | DEFAULT ::= """ type = "set_clause_list" match_grammar: Matchable = Sequence( "SET", Indent, Ref("SetClauseSegment"), # set clause AnyNumberOf( Ref("CommaSegment"), Ref("SetClauseSegment"), ), Dedent, ) class SetClauseSegment(BaseSegment): """SQL 1992 set clause. ::= ::= | | DEFAULT ::= """ type = "set_clause" match_grammar: Matchable = Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), Ref("ValuesClauseSegment"), "DEFAULT", ), ) class CreateCastStatementSegment(BaseSegment): """A `CREATE CAST` statement. https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#_11_63_user_defined_cast_definition """ type = "create_cast_statement" match_grammar: Matchable = Sequence( "CREATE", "CAST", Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), "WITH", Ref.keyword("SPECIFIC", optional=True), OneOf( "ROUTINE", "FUNCTION", "PROCEDURE", Sequence( OneOf("INSTANCE", "STATIC", "CONSTRUCTOR", optional=True), "METHOD", ), ), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), Sequence("FOR", Ref("ObjectReferenceSegment"), optional=True), Sequence("AS", "ASSIGNMENT", optional=True), ) class DropCastStatementSegment(BaseSegment): """A `DROP CAST` statement. https://jakewheat.github.io/sql-overview/sql-2016-foundation-grammar.html#_11_64_drop_user_defined_cast_statement """ type = "drop_cast_statement" match_grammar: Matchable = Sequence( "DROP", "CAST", Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), Ref("DropBehaviorGrammar", optional=True), ) class FunctionDefinitionGrammar(BaseSegment): """This is the body of a `CREATE FUNCTION AS` statement.""" type = "function_definition" match_grammar: Matchable = Sequence( "AS", Ref("QuotedLiteralSegment"), Sequence( "LANGUAGE", Ref("NakedIdentifierSegment"), optional=True, ), ) class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement. This version in the ANSI dialect should be a "common subset" of the structure of the code for those dialects. postgres: https://www.postgresql.org/docs/9.1/sql-createfunction.html snowflake: https://docs.snowflake.com/en/sql-reference/sql/create-function.html bigquery: https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions """ type = "create_function_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence( # Optional function return type "RETURNS", Ref("DatatypeSegment"), optional=True, ), Ref("FunctionDefinitionGrammar"), ) class FunctionParameterListGrammar(BaseSegment): """The parameters for a function ie. `(string, number)`.""" type = "function_parameter_list" # Function parameter list match_grammar: Matchable = Bracketed( Delimited( Ref("FunctionParameterGrammar"), optional=True, ), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement.""" type = "drop_function_statement" match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), ) class CreateModelStatementSegment(BaseSegment): """A BigQuery `CREATE MODEL` statement.""" type = "create_model_statement" # https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "MODEL", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence( "OPTIONS", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( # This covers many but not all the extensive list of # possible 'CREATE MODEL' options. Ref("LiteralGrammar"), # Single value Bracketed( # E.g. input_label_cols: list of column names Delimited(Ref("QuotedLiteralSegment")), bracket_type="square", optional=True, ), ), ), ) ), optional=True, ), "AS", Ref("SelectableGrammar"), ) class CreateUserStatementSegment(BaseSegment): """A `CREATE USER` statement. A very simple create user syntax which can be extended by other dialects. """ type = "create_user_statement" match_grammar: Matchable = Sequence( "CREATE", "USER", Ref("RoleReferenceSegment"), ) class CreateRoleStatementSegment(BaseSegment): """A `CREATE ROLE` statement. A very simple create role syntax which can be extended by other dialects. """ type = "create_role_statement" match_grammar: Matchable = Sequence( "CREATE", "ROLE", Ref("RoleReferenceSegment"), ) class DropRoleStatementSegment(BaseSegment): """A `DROP ROLE` statement with CASCADE option.""" type = "drop_role_statement" match_grammar = Sequence( "DROP", "ROLE", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ) class DropModelStatementSegment(BaseSegment): """A `DROP MODEL` statement.""" type = "drop_MODELstatement" # DROP MODEL [IF EXISTS} # https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model match_grammar: Matchable = Sequence( "DROP", "MODEL", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ) class MLTableExpressionSegment(BaseSegment): """An ML table expression.""" type = "ml_table_expression" # E.g. ML.WEIGHTS(MODEL `project.dataset.model`) match_grammar: Matchable = Sequence( "ML", Ref("DotSegment"), Ref("SingleIdentifierGrammar"), Bracketed( Sequence("MODEL", Ref("ObjectReferenceSegment")), Sequence( Ref("CommaSegment"), Bracketed( Ref("SelectableGrammar"), ), optional=True, ), ), ) class StatementSegment(BaseSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar: Matchable = OneOf( Ref("SelectableGrammar"), Ref("MergeStatementSegment"), Ref("InsertStatementSegment"), Ref("TransactionStatementSegment"), Ref("DropTableStatementSegment"), Ref("DropViewStatementSegment"), Ref("CreateUserStatementSegment"), Ref("DropUserStatementSegment"), Ref("TruncateStatementSegment"), Ref("AccessStatementSegment"), Ref("CreateTableStatementSegment"), Ref("CreateRoleStatementSegment"), Ref("DropRoleStatementSegment"), Ref("AlterTableStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("DropSchemaStatementSegment"), Ref("DropTypeStatementSegment"), Ref("CreateDatabaseStatementSegment"), Ref("DropDatabaseStatementSegment"), Ref("CreateIndexStatementSegment"), Ref("DropIndexStatementSegment"), Ref("CreateViewStatementSegment"), Ref("DeleteStatementSegment"), Ref("UpdateStatementSegment"), Ref("CreateCastStatementSegment"), Ref("DropCastStatementSegment"), Ref("CreateFunctionStatementSegment"), Ref("DropFunctionStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), Ref("DescribeStatementSegment"), Ref("UseStatementSegment"), Ref("ExplainStatementSegment"), Ref("CreateSequenceStatementSegment"), Ref("AlterSequenceStatementSegment"), Ref("DropSequenceStatementSegment"), Ref("CreateTriggerStatementSegment"), Ref("DropTriggerStatementSegment"), terminators=[Ref("DelimiterGrammar")], ) def get_table_references(self) -> Set[str]: """Use parsed tree to extract table references.""" table_refs = { tbl_ref.raw for tbl_ref in self.recursive_crawl("table_reference") } cte_refs = { cast(CTEDefinitionSegment, cte_def).get_identifier().raw for cte_def in self.recursive_crawl("common_table_expression") } # External references are any table references which aren't # also cte aliases. return table_refs - cte_refs class WithNoSchemaBindingClauseSegment(BaseSegment): """WITH NO SCHEMA BINDING clause for Redshift's Late Binding Views. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_VIEW.html """ type = "with_no_schema_binding_clause" match_grammar: Matchable = Sequence( "WITH", "NO", "SCHEMA", "BINDING", ) class WithDataClauseSegment(BaseSegment): """WITH [NO] DATA clause for Postgres' MATERIALIZED VIEWS. https://www.postgresql.org/docs/9.3/sql-creatematerializedview.html """ type = "with_data_clause" match_grammar: Matchable = Sequence("WITH", Sequence("NO", optional=True), "DATA") class DescribeStatementSegment(BaseSegment): """A `Describe` statement. DESCRIBE """ type = "describe_statement" match_grammar: Matchable = Sequence( "DESCRIBE", Ref("NakedIdentifierSegment"), Ref("ObjectReferenceSegment"), ) class UseStatementSegment(BaseSegment): """A `USE` statement.""" type = "use_statement" match_grammar: Matchable = Sequence( "USE", Ref("DatabaseReferenceSegment"), ) class ExplainStatementSegment(BaseSegment): """An `Explain` statement. EXPLAIN explainable_stmt """ type = "explain_statement" explainable_stmt: Matchable = OneOf( Ref("SelectableGrammar"), Ref("InsertStatementSegment"), Ref("UpdateStatementSegment"), Ref("DeleteStatementSegment"), ) match_grammar: Matchable = Sequence( "EXPLAIN", explainable_stmt, ) class CreateSequenceOptionsSegment(BaseSegment): """Options for Create Sequence statement. https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_6015.htm """ type = "create_sequence_options_segment" match_grammar: Matchable = OneOf( Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")), Sequence( "START", Ref.keyword("WITH", optional=True), Ref("NumericLiteralSegment") ), OneOf( Sequence("MINVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MINVALUE"), ), OneOf( Sequence("MAXVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MAXVALUE"), ), OneOf(Sequence("CACHE", Ref("NumericLiteralSegment")), "NOCACHE"), OneOf("CYCLE", "NOCYCLE"), OneOf("ORDER", "NOORDER"), ) class CreateSequenceStatementSegment(BaseSegment): """Create Sequence statement. https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_6015.htm """ type = "create_sequence_statement" match_grammar: Matchable = Sequence( "CREATE", "SEQUENCE", Ref("SequenceReferenceSegment"), AnyNumberOf(Ref("CreateSequenceOptionsSegment"), optional=True), ) class AlterSequenceOptionsSegment(BaseSegment): """Options for Alter Sequence statement. https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_2011.htm """ type = "alter_sequence_options_segment" match_grammar: Matchable = OneOf( Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")), OneOf( Sequence("MINVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MINVALUE"), ), OneOf( Sequence("MAXVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MAXVALUE"), ), OneOf(Sequence("CACHE", Ref("NumericLiteralSegment")), "NOCACHE"), OneOf("CYCLE", "NOCYCLE"), OneOf("ORDER", "NOORDER"), ) class AlterSequenceStatementSegment(BaseSegment): """Alter Sequence Statement. https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_2011.htm """ type = "alter_sequence_statement" match_grammar: Matchable = Sequence( "ALTER", "SEQUENCE", Ref("SequenceReferenceSegment"), AnyNumberOf(Ref("AlterSequenceOptionsSegment")), ) class DropSequenceStatementSegment(BaseSegment): """Drop Sequence Statement. https://docs.oracle.com/cd/E11882_01/server.112/e41084/statements_9001.htm """ type = "drop_sequence_statement" match_grammar: Matchable = Sequence( "DROP", "SEQUENCE", Ref("SequenceReferenceSegment") ) class DatePartFunctionNameSegment(BaseSegment): """DATEADD function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar: Matchable = Ref("DatePartFunctionName") class CreateTriggerStatementSegment(BaseSegment): """Create Trigger Statement. https://www.postgresql.org/docs/14/sql-createtrigger.html Edited as per notes in above - what doesn't match ANSI """ type = "create_trigger" match_grammar: Matchable = Sequence( "CREATE", "TRIGGER", Ref("TriggerReferenceSegment"), OneOf("BEFORE", "AFTER", Sequence("INSTEAD", "OF"), optional=True), Delimited( "INSERT", "DELETE", Sequence( "UPDATE", "OF", Delimited( Ref("ColumnReferenceSegment"), terminators=["OR", "ON"], ), ), delimiter="OR", terminators=["ON"], ), "ON", Ref("TableReferenceSegment"), AnyNumberOf( Sequence( "REFERENCING", "OLD", "ROW", "AS", Ref("ParameterNameSegment"), "NEW", "ROW", "AS", Ref("ParameterNameSegment"), ), Sequence("FROM", Ref("TableReferenceSegment")), OneOf( Sequence("NOT", "DEFERRABLE"), Sequence( Ref.keyword("DEFERRABLE", optional=True), OneOf( Sequence("INITIALLY", "IMMEDIATE"), Sequence("INITIALLY", "DEFERRED"), ), ), ), Sequence( "FOR", Ref.keyword("EACH", optional=True), OneOf("ROW", "STATEMENT") ), Sequence("WHEN", Bracketed(Ref("ExpressionSegment"))), ), Sequence( "EXECUTE", "PROCEDURE", Ref("FunctionNameIdentifierSegment"), Bracketed(Ref("FunctionContentsGrammar", optional=True)), optional=True, ), ) class DropTriggerStatementSegment(BaseSegment): """Drop Trigger Statement. Taken from specification in https://www.postgresql.org/docs/14/sql-droptrigger.html Edited as per notes in above - what doesn't match ANSI """ type = "drop_trigger" match_grammar: Matchable = Sequence( "DROP", "TRIGGER", Ref("IfExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), ) class SamplingExpressionSegment(BaseSegment): """A sampling expression.""" type = "sample_expression" match_grammar: Matchable = Sequence( "TABLESAMPLE", OneOf("BERNOULLI", "SYSTEM"), Bracketed(Ref("NumericLiteralSegment")), Sequence( OneOf("REPEATABLE"), Bracketed(Ref("NumericLiteralSegment")), optional=True, ), ) class LocalAliasSegment(BaseSegment): """The `LOCAL.ALIAS` syntax allows to use an alias name of a column within clauses. A hookpoint for other dialects e.g. Exasol. """ type = "local_alias_segment" match_grammar: Matchable = Nothing() class PathSegment(BaseSegment): """A reference to a path.""" type = "path_segment" match_grammar: Matchable = OneOf( Sequence( Ref("SlashSegment"), Delimited( TypedParser("word", WordSegment, type="path_segment"), delimiter=Ref("SlashSegment"), allow_gaps=False, ), ), Ref("QuotedLiteralSegment"), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_ansi_keywords.py000066400000000000000000000172311451700765000244750ustar00rootroot00000000000000"""A list of all SQL key words.""" ansi_reserved_keywords = """SELECT JOIN ON USING CROSS INNER LEFT RIGHT OUTER INTERVAL CASE FULL NOT NULL UNION IGNORE RESPECT PARTITION ORDER ROWS SET NATURAL """ ansi_unreserved_keywords = """ABORT ABS ABSOLUTE ACCESS ACCOUNT ACCOUNTS ACTION ADA ADD ADMIN AFTER AGGREGATE ALIAS ALL ALLOCATE ALSO ALTER ALWAYS ANALYSE ANALYZE AND ANY APPLY ARE ARRAY AS ASC ASENSITIVE ASSERTION ASSIGNMENT ASYMMETRIC AT ATOMIC ATTRIBUTE ATTRIBUTES AUDIT AUTHORIZATION AUTO_INCREMENT AVG AVG_ROW_LENGTH BACKUP BACKWARD BEFORE BEGIN BERNOULLI BETWEEN BIGINT BINARY BINDING BIT BIT_LENGTH BITVAR BLOB BOOL BOOLEAN BOTH BREADTH BREAK BROWSE BULK BY CACHE CALL CALLED CARDINALITY CASCADE CASCADED CAST CATALOG CATALOG_NAME CEIL CEILING CHAIN CHANGE CHAR CHARACTER CHARACTERISTICS CHARACTER_LENGTH CHARACTERS CHARACTER_SET_CATALOG CHARACTER_SET_NAME CHARACTER_SET_SCHEMA CHAR_LENGTH CHECK CHECKED CHECKPOINT CHECKSUM CLASS CLASS_ORIGIN CLOB CLOSE CLUSTER CLUSTERED COALESCE COBOL COLLATE COLLATION COLLATION_CATALOG COLLATION_NAME COLLATION_SCHEMA COLLECT COLUMN COLUMN_NAME COLUMNS COMMAND_FUNCTION COMMAND_FUNCTION_CODE COMMENT COMMIT COMMITTED COMPLETION COMPRESS COMPUTE CONDITION CONDITION_NUMBER CONNECT CONNECTION CONNECTION_NAME CONSTRAINT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINTS CONSTRAINT_SCHEMA CONSTRUCTOR CONTAINS CONTAINSTABLE CONTINUE CONVERSION CONVERT COPY CORR CORRESPONDING COUNT COVAR_POP COVAR_SAMP CREATE CREATEDB CREATEROLE CREATEUSER CSV CUBE CUME_DIST CURRENT CURRENT_DATE CURRENT_DEFAULT_TRANSFORM_GROUP CURRENT_PATH CURRENT_ROLE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_TRANSFORM_GROUP_FOR_TYPE CURRENT_USER CURSOR CURSOR_NAME CYCLE DATA DATABASE DATABASES DATE DATETIME DATETIME_INTERVAL_CODE DATETIME_INTERVAL_PRECISION DAY DAYS DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAYOFMONTH DAYOFWEEK DAYOFYEAR DAY_SECOND DBCC DEALLOCATE DEC DECIMAL DECLARE DEFAULT DEFAULTS DEFERRABLE DEFERRED DEFINED DEFINER DEGREE DELAYED DELAY_KEY_WRITE DELETE DELIMITER DELIMITERS DENSE_RANK DENY DEPTH DEREF DERIVED DESC DESCRIBE DESCRIPTOR DESTROY DESTRUCTOR DETERMINISTIC DIAGNOSTICS DICTIONARY DISABLE DISCONNECT DISK DISPATCH DISTINCT DISTINCTROW DISTRIBUTED DIV DO DOMAIN DOUBLE DROP DUMMY DUMP DYNAMIC DYNAMIC_FUNCTION DYNAMIC_FUNCTION_CODE EACH ELEMENT ELSE ELSEIF ENABLE ENCLOSED ENCODING ENCRYPTED END END-EXEC ENUM EQUALS ERRLVL ESCAPE ESCAPED EVERY EXCEPT EXCEPTION EXCLUDE EXCLUDING EXCLUSIVE EXEC EXECUTE EXECUTION EXISTING EXISTS EXIT EXP EXPLAIN EXTENSION EXTERNAL EXTRACT FALSE FETCH FIELDS FILE FILLFACTOR FILTER FINAL FIRST FLOAT FLOAT4 FLOAT8 FLOOR FLUSH FOLLOWING FOR FORCE FOREIGN FORMAT FORTRAN FORWARD FOUND FREE FREETEXT FREETEXTTABLE FREEZE FROM FULLTEXT FUNCTION FUNCTIONS FUSION FUTURE G GENERAL GENERATED GET GLOBAL GO GOTO GRANT GRANTED GRANTS GREATEST GROUP GROUPING HANDLER HAVING HEADER HEAP HIERARCHY HIGH_PRIORITY HOLD HOLDLOCK HOST HOSTS HOUR HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND IDENTIFIED IDENTITY IDENTITYCOL IDENTITY_INSERT IF ILIKE IMMEDIATE IMMUTABLE IMPLEMENTATION IMPLICIT IMPORTED IN INCLUDE INCLUDING INCREMENT INDEX INDICATOR INFILE INFIX INHERIT INHERITS INITIAL INITIALIZE INITIALLY INOUT INPUT INSENSITIVE INSERT INSERT_ID INSTANCE INSTANTIABLE INSTEAD INT INT1 INT2 INT3 INT4 INT8 INTEGER INTEGRATION INTEGRATIONS INTERSECT INTERSECTION INTO INVOKER IS ISAM ISNULL ISOLATION ITERATE JSON K KEY KEY_MEMBER KEYS KEY_TYPE KILL LANCOMPILER LANGUAGE LARGE LAST LAST_INSERT_ID LATERAL LEADING LEAST LEAVE LENGTH LESS LEVEL LIKE LIMIT LINENO LINES LISTEN LN LOAD LOCAL LOCALTIME LOCALTIMESTAMP LOCATION LOCATOR LOCK LOCKS LOGIN LOGS LONG LONGBLOB LONGTEXT LOOP LOWER LOW_PRIORITY M MANAGE MAP MASKING MATCH MATCHED MATERIALIZED MAX MAXEXTENTS MAX_ROWS MAXVALUE MEDIUMBLOB MEDIUMINT MEDIUMTEXT MEMBER MERGE MESSAGE_LENGTH MESSAGE_OCTET_LENGTH MESSAGE_TEXT METHOD MIDDLEINT MILLISECOND MIN MIN_ROWS MINUS MINUTE MINUTE_MICROSECOND MINUTE_SECOND MINVALUE ML MLSLABEL MOD MODE MODEL MODIFIES MODIFY MODULE MONITOR MONTH MONTHNAME MORE MOVE MULTISET MUMPS MYISAM NAME NAMES NAN NATIONAL NCHAR NCLOB NESTING NEW NEXT NO NOAUDIT NOCACHE NOCHECK NOCOMPRESS NOCREATEDB NOCREATEROLE NOCREATEUSER NOCYCLE NOINHERIT NOLOGIN NONCLUSTERED NONE NOORDER NORMALIZE NORMALIZED NOSUPERUSER NOTHING NOTIFY NOTNULL NOWAIT NO_WRITE_TO_BINLOG NULLABLE NULLIF NULLS NUMBER NUMERIC OBJECT OBJECTS OCTET_LENGTH OCTETS OF OFF OFFLINE OFFSET OFFSETS OIDS OLD ONLINE ONLY OPEN OPENDATASOURCE OPENQUERY OPENROWSET OPENXML OPERATE OPERATION OPERATOR OPTIMIZE OPTION OPTIONALLY OPTIONS OR ORDERING ORDINALITY OTHERS OUT OUTFILE OUTPUT OVER OVERLAPS OVERLAY OVERRIDING OVERWRITE OWNER OWNERSHIP PACK_KEYS PAD PARAMETER PARAMETER_MODE PARAMETER_NAME PARAMETER_ORDINAL_POSITION PARAMETERS PARAMETER_SPECIFIC_CATALOG PARAMETER_SPECIFIC_NAME PARAMETER_SPECIFIC_SCHEMA PARTIAL PASCAL PASSWORD PATH PCTFREE PERCENT PERCENTILE_CONT PERCENTILE_DISC PERCENT_RANK PIPE PLACING PLAN PLI POLICY POSITION POSTFIX POWER PRECEDING PRECISION PREFIX PREORDER PREPARE PREPARED PRESERVE PRIMARY PRINT PRIOR PRIVILEGES PROC PROCEDURAL PROCEDURE PROCEDURES PROCESS PROCESSLIST PUBLIC PURGE QUALIFY QUARTER QUOTE RAID0 RAISERROR RANGE RANK RAW READ READS READTEXT REAL RECHECK RECONFIGURE RECURSIVE REF REFERENCE_USAGE REFERENCES REFERENCING REGEXP REGR_AVGX REGR_AVGY REGR_COUNT REGR_INTERCEPT REGR_R2 REGR_SLOPE REGR_SXX REGR_SXY REGR_SYY REINDEX RELATIVE RELEASE RELOAD RENAME REPEAT REPEATABLE REPLACE REPLICATION REQUIRE RESET RESIGNAL RESOURCE RESTART RESTORE RESTRICT RESULT RETURN RETURNED_CARDINALITY RETURNED_LENGTH RETURNED_OCTET_LENGTH RETURNED_SQLSTATE RETURNS REVOKE RLIKE ROLE ROLES ROLLBACK ROLLUP ROUTINE ROUTINE_CATALOG ROUTINE_NAME ROUTINE_SCHEMA ROUTINES ROW ROWCOUNT ROW_COUNT ROWGUIDCOL ROWID ROWNUM ROW_NUMBER RULE SAVE SAVEPOINT SCALE SCHEMA SCHEMA_NAME SCHEMAS SCOPE SCOPE_CATALOG SCOPE_NAME SCOPE_SCHEMA SCROLL SEARCH SECOND SECOND_MICROSECOND SECTION SECURITY SELF SENSITIVE SEPARATOR SEQUENCE SEQUENCES SERIALIZABLE SERVER SERVER_NAME SESSION SESSION_USER SETOF SETS SETUSER SHARE SHARES SHOW SHUTDOWN SIGNAL SIMILAR SIMPLE SIZE SMALLINT SOME SONAME SOURCE SPACE SPATIAL SPECIFIC SPECIFIC_NAME SPECIFICTYPE SQL SQL_BIG_RESULT SQL_BIG_SELECTS SQL_BIG_TABLES SQLCA SQL_CALC_FOUND_ROWS SQLCODE SQLERROR SQLEXCEPTION SQL_LOG_OFF SQL_LOG_UPDATE SQL_LOW_PRIORITY_UPDATES SQL_SELECT_LIMIT SQL_SMALL_RESULT SQLSTATE SQLWARNING SQL_WARNINGS SQRT SSL STABLE STAGE STAGES START STARTING STARTS STATE STATEMENT STATIC STATISTICS STDDEV_POP STDDEV_SAMP STDIN STDOUT STORAGE STRAIGHT_JOIN STREAM STREAMS STRICT STRING STRUCTURE STYLE SUBCLASS_ORIGIN SUBLIST SUBMULTISET SUBSTRING SUCCESSFUL SUM SUPERUSER SYMMETRIC SYNONYM SYSDATE SYSID SYSTEM SYSTEM_USER TABLE TABLE_NAME TABLES TABLESAMPLE TABLESPACE TASK TASKS TEMP TEMPLATE TEMPORARY TERMINATE TERMINATED TEXT TEXTSIZE THAN THEN TIES TIME TIMESTAMP TIMEZONE_HOUR TIMEZONE_MINUTE TINYBLOB TINYINT TINYTEXT TO TOAST TOP TOP_LEVEL_COUNT TRAILING TRAN TRANSACTION TRANSACTION_ACTIVE TRANSACTIONS TRANSACTIONS_COMMITTED TRANSACTIONS_ROLLED_BACK TRANSFORM TRANSFORMS TRANSIENT TRANSLATE TRANSLATION TREAT TRIGGER TRIGGER_CATALOG TRIGGER_NAME TRIGGER_SCHEMA TRIM TRUE TRUNCATE TRUSTED TSEQUAL TYPE UESCAPE UID UNBOUNDED UNCOMMITTED UNDER UNDO UNENCRYPTED UNIQUE UNKNOWN UNLISTEN UNLOCK UNNAMED UNNEST UNSIGNED UNTIL UPDATE UPDATETEXT UPPER USAGE USE USE_ANY_ROLE USER USER_DEFINED_TYPE_CATALOG USER_DEFINED_TYPE_CODE USER_DEFINED_TYPE_NAME USER_DEFINED_TYPE_SCHEMA USERS UTC_DATE UTC_TIME UTC_TIMESTAMP VACUUM VALID VALIDATE VALIDATOR VALUE VALUES VARBINARY VARCHAR VARCHAR2 VARCHARACTER VARIABLE VARIABLES VAR_POP VAR_SAMP VARYING VERBOSE VERSION VIEW VIEWS VOLATILE WAITFOR WAREHOUSE WAREHOUSES WEEK WEEKDAY WHEN WHENEVER WHERE WHILE WIDTH_BUCKET WINDOW WITH WITHIN WITHOUT WORK WRAPPER WRITE WRITETEXT X509 XML XOR YAML YEAR YEAR_MONTH ZEROFILL ZONE""" sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_athena.py000066400000000000000000000541431451700765000230570ustar00rootroot00000000000000"""The AWS Athena dialect. https://docs.aws.amazon.com/athena/latest/ug/what-is.html """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, Bracketed, CodeSegment, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, Nothing, OneOf, OptionallyBracketed, Ref, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_athena_keywords import ( athena_reserved_keywords, athena_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") athena_dialect = ansi_dialect.copy_as("athena") athena_dialect.sets("unreserved_keywords").update(athena_unreserved_keywords) athena_dialect.sets("reserved_keywords").update(athena_reserved_keywords) athena_dialect.insert_lexer_matchers( # Array Operations: https://prestodb.io/docs/0.217/functions/array.html [ StringLexer("right_arrow", "->", CodeSegment), ], before="like_operator", ) athena_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) athena_dialect.add( StartAngleBracketSegment=StringParser( "<", SymbolSegment, type="start_angle_bracket" ), EndAngleBracketSegment=StringParser(">", SymbolSegment, type="end_angle_bracket"), RightArrowOperator=StringParser("->", SymbolSegment, type="binary_operator"), JsonfileKeywordSegment=StringParser("JSONFILE", KeywordSegment, type="file_format"), RcfileKeywordSegment=StringParser("RCFILE", KeywordSegment, type="file_format"), OrcKeywordSegment=StringParser("ORCFILE", KeywordSegment, type="file_format"), ParquetKeywordSegment=StringParser( "PARQUETFILE", KeywordSegment, type="file_format" ), AvroKeywordSegment=StringParser("AVROFILE", KeywordSegment, type="file_format"), IonKeywordSegment=StringParser("IONFILE", KeywordSegment, type="file_format"), SequencefileKeywordSegment=StringParser( "SEQUENCEFILE", KeywordSegment, type="file_format" ), TextfileKeywordSegment=StringParser("TEXTFILE", KeywordSegment, type="file_format"), PropertyGrammar=Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), LocationGrammar=Sequence("LOCATION", Ref("QuotedLiteralSegment")), BracketedPropertyListGrammar=Bracketed(Delimited(Ref("PropertyGrammar"))), CTASPropertyGrammar=Sequence( OneOf( "external_location", "format", "partitioned_by", "bucketed_by", "bucket_count", "write_compression", "orc_compression", "parquet_compression", "field_delimiter", "location", ), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), CTASIcebergPropertyGrammar=Sequence( OneOf( "external_location", "format", "partitioned_by", "bucketed_by", "bucket_count", "write_compression", "orc_compression", "parquet_compression", "field_delimiter", "location", "is_external", "table_type", "partitioning", "vacuum_max_snapshot_age_ms", "vacuum_min_snapshots_to_keep", ), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), BracketedCTASPropertyGrammar=Bracketed( OneOf( Delimited( Ref("CTASPropertyGrammar"), ), Delimited( Ref("CTASIcebergPropertyGrammar"), ), ), ), UnloadPropertyGrammar=Sequence( OneOf( "format", "partitioned_by", "compression", "field_delimiter", ), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), BracketedUnloadPropertyGrammar=Bracketed(Delimited(Ref("UnloadPropertyGrammar"))), TablePropertiesGrammar=Sequence( "TBLPROPERTIES", Ref("BracketedPropertyListGrammar") ), SerdePropertiesGrammar=Sequence( "WITH", "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar") ), TerminatedByGrammar=Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment")), FileFormatGrammar=OneOf( "SEQUENCEFILE", "TEXTFILE", "RCFILE", "ORC", "PARQUET", "AVRO", "JSONFILE", "ION", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), StoredAsGrammar=Sequence("STORED", "AS", Ref("FileFormatGrammar")), StoredByGrammar=Sequence( "STORED", "BY", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), StorageFormatGrammar=OneOf( Sequence( Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), ), Ref("StoredByGrammar"), ), CommentGrammar=Sequence("COMMENT", Ref("QuotedLiteralSegment")), PartitionSpecGrammar=Sequence( "PARTITION", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True, ), ) ) ), ), BackQuotedIdentifierSegment=TypedParser( "back_quote", LiteralSegment, type="quoted_identifier", ), DatetimeWithTZSegment=Sequence(OneOf("TIMESTAMP", "TIME"), "WITH", "TIME", "ZONE"), ) athena_dialect.replace( LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("ParameterSegment"), ] ), AccessorGrammar=Sequence( AnyNumberOf( Ref("ArrayAccessorSegment"), optional=True, ), AnyNumberOf( Sequence( Ref("ObjectReferenceDelimiterGrammar"), Ref("ObjectReferenceSegment"), ), optional=True, ), ), QuotedLiteralSegment=OneOf( TypedParser("single_quote", LiteralSegment, type="quoted_literal"), TypedParser("double_quote", LiteralSegment, type="quoted_literal"), TypedParser("back_quote", LiteralSegment, type="quoted_literal"), ), TrimParametersGrammar=Nothing(), NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z0-9_]*[A-Z_][A-Z0-9_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), SingleIdentifierGrammar=ansi_dialect.get_grammar("SingleIdentifierGrammar").copy( insert=[ Ref("BackQuotedIdentifierSegment"), ] ), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add arrow operators for functions (e.g. filter) Ref("RightArrowOperator"), ), PostFunctionGrammar=ansi_dialect.get_grammar("PostFunctionGrammar").copy( # UNNEST can optionally have a WITH ORDINALITY clause insert=[ Sequence("WITH", "ORDINALITY", optional=True), ] ), ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Sequence( "ARRAY", Ref("ArrayTypeSchemaSegment", optional=True), ) class ArrayTypeSchemaSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type_schema" match_grammar = Bracketed( Ref("DatatypeSegment"), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class MapTypeSegment(BaseSegment): """Expression to construct a MAP datatype.""" type = "map_type" match_grammar = Sequence( "MAP", Ref("MapTypeSchemaSegment", optional=True), ) class MapTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a MAP datatype.""" type = "map_type_schema" match_grammar = Bracketed( Sequence( Ref("PrimitiveTypeSegment"), Ref("CommaSegment"), Ref("DatatypeSegment"), ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class StructTypeSegment(ansi.StructTypeSegment): """Expression to construct a STRUCT datatype.""" match_grammar = Sequence( "STRUCT", Ref("StructTypeSchemaSegment", optional=True), ) class StructTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a STRUCT datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( Sequence( Ref("NakedIdentifierSegment"), Ref("ColonSegment"), Ref("DatatypeSegment"), Ref("CommentGrammar", optional=True), ), bracket_pairs_set="angle_bracket_pairs", ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class PrimitiveTypeSegment(BaseSegment): """Support Athena subset of Hive types. Primary Source: https://docs.aws.amazon.com/athena/latest/ug/data-types.html Additional Details: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types """ type = "primitive_type" match_grammar = OneOf( "BOOLEAN", "TINYINT", "SMALLINT", "INTEGER", # used in DML queries "INT", # used in DDL queries "BIGINT", "DOUBLE", "FLOAT", # used in DDL "REAL", # used "in SQL functions like SELECT CAST" Sequence( OneOf("DECIMAL", "CHAR", "VARCHAR"), Ref("BracketedArguments", optional=True), ), "STRING", "BINARY", "DATE", "TIMESTAMP", "VARBINARY", "JSON", "TIME", "IPADDRESS", "HyperLogLog", "P4HyperLogLog", ) class DatatypeSegment(BaseSegment): """Support complex Athena data types. Complex data types are typically used in either DDL statements or as the target type in casts. """ type = "data_type" match_grammar = OneOf( Ref("PrimitiveTypeSegment"), Ref("StructTypeSegment"), Ref("ArrayTypeSegment"), Ref("MapTypeSegment"), Sequence( "ROW", Bracketed( Delimited( AnyNumberOf( Sequence( Ref("NakedIdentifierSegment"), Ref("DatatypeSegment"), ), Ref("LiteralGrammar"), ) ) ), ), Ref("DatetimeWithTZSegment"), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("MsckRepairTableStatementSegment"), Ref("UnloadStatementSegment"), Ref("PrepareStatementSegment"), Ref("ExecuteStatementSegment"), Ref("ShowStatementSegment"), ], remove=[ Ref("TransactionStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), ], ) class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement. Inspired on Hive Dialect with adjustments based on: https://docs.aws.amazon.com/pt_br/athena/latest/ug/create-table.html """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref.keyword("EXTERNAL", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment", optional=True), Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), bracket_pairs_set="angle_bracket_pairs", ), optional=True, ), Ref("CommentGrammar", optional=True), # `STORED AS` can be called before or after the additional table # properties below Ref("StoredAsGrammar", optional=True), Sequence( "PARTITIONED", "BY", Bracketed( Delimited( Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), Sequence( "CLUSTERED", "BY", Ref("BracketedColumnReferenceListGrammar"), "INTO", Ref("NumericLiteralSegment"), "BUCKETS", optional=True, ), # Second call of `STORED AS` to match when appears after Ref("StoredAsGrammar", optional=True), Ref("StorageFormatGrammar", optional=True), Ref("LocationGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), Ref("CommentGrammar", optional=True), ), Sequence( Sequence("WITH", Ref("BracketedCTASPropertyGrammar"), optional=True), "AS", OptionallyBracketed( Ref("SelectableGrammar"), ), Sequence("WITH", "NO", "DATA", optional=True), ), ), ) class MsckRepairTableStatementSegment(BaseSegment): """An `MSCK REPAIR TABLE` statement. The `MSCK REPAIR TABLE` command scans a file system such as Amazon S3 for Hive compatible partitions that were added to the file system after the table was created. https://docs.aws.amazon.com/athena/latest/ug/msck-repair-table.html """ type = "msck_repair_table_statement" match_grammar = Sequence( "MSCK", "REPAIR", "TABLE", Ref("TableReferenceSegment"), ) class RowFormatClauseSegment(BaseSegment): """`ROW FORMAT` clause in a CREATE statement.""" type = "row_format_clause" match_grammar = Sequence( "ROW", "FORMAT", OneOf( Sequence( "DELIMITED", Sequence( "FIELDS", Ref("TerminatedByGrammar"), Sequence( "ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True ), optional=True, ), Sequence( "COLLECTION", "ITEMS", Ref("TerminatedByGrammar"), optional=True ), Sequence("MAP", "KEYS", Ref("TerminatedByGrammar"), optional=True), Sequence("LINES", Ref("TerminatedByGrammar"), optional=True), Sequence( "NULL", "DEFINED", "AS", Ref("QuotedLiteralSegment"), optional=True ), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), ), ) class InsertStatementSegment(BaseSegment): """`INSERT INTO` statement. https://docs.aws.amazon.com/athena/latest/ug/insert-into.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", "INTO", Ref("TableReferenceSegment"), OneOf( OptionallyBracketed(Ref("SelectableGrammar")), Sequence("DEFAULT", "VALUES"), Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ValuesClauseSegment"), OptionallyBracketed(Ref("SelectableGrammar")), ), ), ), ) class UnloadStatementSegment(BaseSegment): """An `UNLOAD` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html """ type = "unload_statement" match_grammar = Sequence( "UNLOAD", Bracketed(Ref("SelectableGrammar")), "TO", Ref("QuotedLiteralSegment"), Sequence("WITH", Ref("BracketedUnloadPropertyGrammar"), optional=True), ) class PrepareStatementSegment(BaseSegment): """A `prepare` statement. https://docs.aws.amazon.com/athena/latest/ug/querying-with-prepared-statements.html """ type = "prepare_statement" match_grammar = Sequence( "PREPARE", Ref("TableReferenceSegment"), "FROM", OptionallyBracketed( OneOf( Ref("SelectableGrammar"), Ref("UnloadStatementSegment"), Ref("InsertStatementSegment"), ), ), ) class ExecuteStatementSegment(BaseSegment): """An `execute` statement. https://docs.aws.amazon.com/athena/latest/ug/querying-with-prepared-statements.html """ type = "execute_statement" match_grammar = Sequence( "EXECUTE", Ref("TableReferenceSegment"), OneOf( Sequence( "USING", Delimited( Ref("LiteralGrammar"), ), ), optional=True, ), ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. Full Apache Hive `INTERVAL` reference here: https://cwiki.apache.org/confluence/display/hive/languagemanual+types#LanguageManualTypes-Intervals """ type = "interval_expression" match_grammar = Sequence( Ref.keyword("INTERVAL", optional=True), OneOf( Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Bracketed(Ref("ExpressionSegment")), ), Ref("DatetimeUnitSegment"), Sequence("TO", Ref("DatetimeUnitSegment"), optional=True), ), ), ) class GroupByClauseSegment(ansi.GroupByClauseSegment): """A `GROUP BY` clause like in `SELECT`. https://docs.aws.amazon.com/athena/latest/ug/select.html#:~:text=%5B-,GROUP,-BY%20%5B%20ALL%20%7C%20DISTINCT%20%5D%20grouping_expressions """ match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, Delimited( OneOf( Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), # Can `GROUP BY 1` Ref("ExpressionSegment"), # Can `GROUP BY coalesce(col, 1)` ), terminators=[ Sequence("ORDER", "BY"), "LIMIT", "OFFSET", "HAVING", Ref("SetOperatorSegment"), ], ), Dedent, ) class ShowStatementSegment(BaseSegment): """A `show` execute statement. Full Apache Hive `SHOW` reference: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-Show Athena supported subset: https://docs.aws.amazon.com/athena/latest/ug/ddl-reference.html """ type = "show_statement" match_grammar = Sequence( "SHOW", OneOf( Sequence( "COLUMNS", OneOf("FROM", "IN"), OneOf( Sequence( Ref("DatabaseReferenceSegment"), Ref("TableReferenceSegment") ), Sequence( Ref("TableReferenceSegment"), Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), ), ), ), Sequence( "CREATE", OneOf("TABLE", "VIEW"), Ref("TableReferenceSegment"), ), Sequence( OneOf("DATABASES", "SCHEMAS"), Sequence("LIKE", Ref("QuotedLiteralSegment"), optional=True), ), Sequence( "PARTITIONS", Ref("TableReferenceSegment"), ), Sequence( "TABLES", Sequence("IN", Ref("DatabaseReferenceSegment"), optional=True), Ref("QuotedLiteralSegment", optional=True), ), Sequence( "TBLPROPERTIES", Ref("TableReferenceSegment"), Bracketed(Ref("QuotedLiteralSegment"), optional=True), ), Sequence( "VIEWS", Sequence("IN", Ref("DatabaseReferenceSegment"), optional=True), Sequence("LIKE", Ref("QuotedLiteralSegment"), optional=True), ), ), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_athena_keywords.py000066400000000000000000000120661451700765000250040ustar00rootroot00000000000000"""A list of all Athena keywords. Presto List (for Athena v2): https://prestodb.io/docs/0.217/language/reserved.html Trino List (for Athena v3): https://trino.io/docs/current/language/reserved.html Hive List: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL """ athena_reserved_keywords = [ "ALL", "ALTER", "AND", "ARRAY", "AS", "AUTHORIZATION", "BETWEEN", "BIGINT", "BINARY", "BOOLEAN", "BOTH", "BY", "CACHE", "CASE", "CAST", "CHAR", "COLUMN", "COMMIT", "CONF", "CONSTRAINT", "CREATE", "CROSS", "CUBE", "CURRENT_DATE", "CURRENT_TIMESTAMP", "CURRENT", "CURSOR", "DATABASE", "DECIMAL", "DELETE", "DESCRIBE", "DISTINCT", "DOUBLE", "DROP", "ELSE", "END", "EXCHANGE", "EXISTS", "EXTENDED", "EXTERNAL", "EXTRACT", "FALSE", "FETCH", "FLOAT", "FLOOR", "FOLLOWING", "FOR", "FOREIGN", "FROM", "FULL", "FUNCTION", "GRANT", "GROUP", "GROUPING", "HAVING", "IF", "IMPORT", "IN", "INNER", "INSERT", "INT", "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "JOIN", "LATERAL", "LEFT", "LESS", "LIKE", "LOCAL", "MACRO", "MAP", "MORE", "NONE", "NOT", "NULL", "NUMERIC", "OF", "ON", "ONLY", "OR", "ORDER", "OUT", "OUTER", "OVER", "PARTIALSCAN", "PARTITION", "PERCENT", "PRECEDING", "PRECISION", "PREPARE", "PRESERVE", "PRIMARY", "PROCEDURE", "RANGE", "READS", "REDUCE", "REFERENCES", "REGEXP", "REVOKE", "RIGHT", "RLIKE", "ROLLBACK", "ROLLUP", "SELECT", "SET", "SMALLINT", "START", "SYNC", "TABLE", "TABLESAMPLE", "THEN", "TO", "TRANSFORM", "TRIGGER", "TRUE", "TRUNCATE", "UNBOUNDED", "UNION", "UNIQUEJOIN", "UPDATE", "USING", "UTC_TMESTAMP", "VALUES", "VARCHAR", "WHEN", "WHERE", "WITH", ] athena_unreserved_keywords = [ "ABORT", "ADD", "ADMIN", "AFTER", "ANALYZE", "ARCHIVE", "ASC", "AUTOCOMMIT", "BEFORE", "BUCKET_COUNT", "BUCKET", "BUCKETED_BY", "BUCKETS", "CASCADE", "CHANGE", "CLUSTER", "CLUSTERED", "CLUSTERSTATUS", "COLLECTION", "COLUMNS", "COMMENT", "COMPACT", "COMPACTIONS", "COMPRESSION", "COMPUTE", "CONCATENATE", "CONTINUE", "DATA", "DATABASES", "DATE", "DATETIME", "DAY", "DAYOFWEEK", "DBPROPERTIES", "DEFERRED", "DEFINED", "DELIMITED", "DEPENDENCY", "DESC", "DIRECTORIES", "DIRECTORY", "DISABLE", "DISTRIBUTE", "ELEM_TYPE", "ENABLE", "ESCAPED", "EXCLUSIVE", "EXPLAIN", "EXPORT", "EXTERNAL_LOCATION", "FIELD_DELIMITER", "FIELDS", "FILE", "FILEFORMAT", "FIRST", "FORMAT", "FORMATTED", "FUNCTIONS", "HOLD_DDLTIME", "HOUR", "HYPERLOGLOG", "IDXPROPERTIES", "IGNORE", "INDEX", "INDEXES", "INPATH", "INPUTDRIVER", "INPUTFORMAT", "IPADDRESS", "IS_EXTERNAL", "ISOLATION", "ITEMS", "JAR", "KEY_TYPE", "KEY", "KEYS", "LAST", "LEVEL", "LIMIT", "LINES", "LOAD", "LOCATION", "LOCK", "LOCKS", "LOGICAL", "LONG", "MAPJOIN", "MATERIALIZED", "METADATA", "MINUS", "MINUTE", "MONTH", "MSCK", "NO_DROP", "NORELY", "NOSCAN", "NOVALIDATE", "NULLS", "OFFLINE", "OFFSET", "OPTION", "ORC_COMPRESSION", "OUTPUTDRIVER", "OUTPUTFORMAT", "OVERWRITE", "OWNER", "P4HYPERLOGLOG", "PARQUET_COMPRESSION", "PARTITIONED_BY", "PARTITIONED", "PARTITIONING", "PARTITIONS", "PLUS", "PRETTY", "PRINCIPALS", "PROTECTION", "PURGE", "QDIGEST", "READ", "READONLY", "REBUILD", "RECORDREADER", "RECORDWRITER", "REGEXP", "RELOAD", "RELY", "RENAME", "REPAIR", "REPLACE", "REPLICATION", "RESTRICT", "REWRITE", "RLIKE", "ROLE", "ROLES", "ROW", "ROWS", "SCHEMA", "SCHEMAS", "SECOND", "SEMI", "SERDE", "SERDEPROPERTIES", "SERVER", "SETS", "SHARED", "SHOW_DATABASE", "SHOW", "SKEWED", "SNAPSHOT", "SORT", "SORTED", "SSL", "STATISTICS", "STORED", "STREAMTABLE", "STRING", "STRUCT", "TABLE_TYPE", "TABLES", "TBLPROPERTIES", "TEMPORARY", "TERMINATED", "TIME", "TIMESTAMP", "TIMESTAMPTZ", "TINYINT", "TOUCH", "TRANSACTION", "TRANSACTIONS", "UNARCHIVE", "UNDO", "UNIONTYPE", "UNLOAD", "UNLOCK", "UNSET", "UNSIGNED", "URI", "USE", "USER", "UTC", "UTCTIMESTAMP", "VACUUM_MAX_SNAPSHOT_AGE_MS", "VACUUM_MIN_SNAPSHOTS_TO_KEEP", "VALIDATE", "VALUE_TYPE", "VIEW", "VIEWS", "WINDOW", "WHILE", "WRITE_COMPRESSION", "YEAR", "ZONE", ] sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_bigquery.py000066400000000000000000002110251451700765000234400ustar00rootroot00000000000000"""The BigQuery dialect. This inherits from the ansi dialect, with changes as specified by https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax and https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseFileSegment, BaseSegment, Bracketed, BracketedSegment, CodeSegment, Dedent, Delimited, IdentifierSegment, Indent, LiteralSegment, Matchable, MultiStringParser, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_bigquery_keywords import ( bigquery_reserved_keywords, bigquery_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") bigquery_dialect = ansi_dialect.copy_as("bigquery") bigquery_dialect.insert_lexer_matchers( # JSON Operators: https://www.postgresql.org/docs/9.5/functions-json.html [ StringLexer("right_arrow", "=>", CodeSegment), StringLexer("question_mark", "?", CodeSegment), RegexLexer( "at_sign_literal", r"@[a-zA-Z_][\w]*", LiteralSegment, segment_kwargs={"trim_chars": ("@",)}, ), RegexLexer( "double_at_sign_literal", r"@@[a-zA-Z_][\w]*", LiteralSegment, segment_kwargs={"trim_chars": ("@@",)}, ), ], before="equals", ) bigquery_dialect.patch_lexer_matchers( [ # Quoted literals can have r or b (case insensitive) prefixes, in any order, to # indicate a raw/regex string or byte sequence, respectively. Allow escaped # quote characters inside strings by allowing \" with an optional even multiple # of backslashes in front of it. # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals # Triple quoted variant first, then single quoted RegexLexer( "single_quote", r"([rR]?[bB]?|[bB]?[rR]?)?('''((?", SymbolSegment, type="end_angle_bracket"), RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"), DashSegment=StringParser("-", SymbolSegment, type="dash"), SelectClauseElementListGrammar=Delimited( Ref("SelectClauseElementSegment"), allow_trailing=True, ), QuestionMarkSegment=StringParser("?", SymbolSegment, type="question_mark"), AtSignLiteralSegment=TypedParser( "at_sign_literal", LiteralSegment, type="at_sign_literal", ), DoubleAtSignLiteralSegment=TypedParser( "double_at_sign_literal", LiteralSegment, type="double_at_sign_literal", ), # Add a Full equivalent which also allow keywords NakedIdentifierFullSegment=RegexParser( r"[A-Z_][A-Z0-9_]*", IdentifierSegment, type="naked_identifier_all", ), NakedIdentifierPart=RegexParser( # The part of a an identifier after a hyphen. # NOTE: This one can match an "all numbers" variant. # https://cloud.google.com/resource-manager/docs/creating-managing-projects r"[A-Z0-9_]+", IdentifierSegment, type="naked_identifier", ), SingleIdentifierFullGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("NakedIdentifierFullSegment"), ), DefaultDeclareOptionsGrammar=Sequence( "DEFAULT", OneOf( Ref("LiteralGrammar"), Bracketed(Ref("SelectStatementSegment")), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ArrayLiteralSegment"), Ref("TupleSegment"), Ref("BaseExpressionElementGrammar"), terminators=[ Ref("SemicolonSegment"), ], ), ), ExtendedDatetimeUnitSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("extended_datetime_units"), CodeSegment, type="date_part", ) ), ProcedureNameIdentifierSegment=OneOf( # In BigQuery struct() has a special syntax, so we don't treat it as a function RegexParser( r"[A-Z_][A-Z0-9_]*", CodeSegment, type="procedure_name_identifier", anti_template=r"STRUCT", ), RegexParser( r"`[^`]*`", CodeSegment, type="procedure_name_identifier", ), ), ProcedureParameterGrammar=OneOf( Sequence( OneOf("IN", "OUT", "INOUT", optional=True), Ref("ParameterNameSegment", optional=True), OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")), ), OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")), ), ) bigquery_dialect.replace( # Override to allow _01 type identifiers which are valid in BigQuery # The strange regex here it to make sure we don't accidentally match numeric # literals. We also use a regex to explicitly exclude disallowed keywords. NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z_][A-Z0-9_]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), FunctionContentsExpressionGrammar=OneOf( Ref("DatetimeUnitSegment"), Ref("DatePartWeekSegment"), Sequence( Ref("ExpressionSegment"), Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True), ), Sequence(Ref("ExpressionSegment"), "HAVING", OneOf("MIN", "MAX")), Ref("NamedArgumentSegment"), ), TrimParametersGrammar=Nothing(), # BigQuery allows underscore in parameter names, and also anything if quoted in # backticks ParameterNameSegment=OneOf( RegexParser(r"[A-Z_][A-Z0-9_]*", CodeSegment, type="parameter"), RegexParser(r"`[^`]*`", CodeSegment, type="parameter"), ), DateTimeLiteralGrammar=Sequence( OneOf("DATE", "DATETIME", "TIME", "TIMESTAMP"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), JoinLikeClauseGrammar=Sequence( AnyNumberOf( Ref("FromPivotExpressionSegment"), Ref("FromUnpivotExpressionSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), NaturalJoinKeywordsGrammar=Nothing(), MergeIntoLiteralGrammar=Sequence("MERGE", Ref.keyword("INTO", optional=True)), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), PrimaryKeyGrammar=Nothing(), ForeignKeyGrammar=Nothing(), ) # Set Keywords bigquery_dialect.sets("unreserved_keywords").clear() bigquery_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", bigquery_unreserved_keywords ) bigquery_dialect.sets("reserved_keywords").clear() bigquery_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", bigquery_reserved_keywords ) # Add additional datetime units # https://cloud.google.com/bigquery/docs/reference/standard-sql/timestamp_functions#extract bigquery_dialect.sets("datetime_units").update( [ "MICROSECOND", "MILLISECOND", "SECOND", "MINUTE", "HOUR", "DAY", "DAYOFWEEK", "DAYOFYEAR", "WEEK", "ISOWEEK", "MONTH", "QUARTER", "YEAR", "ISOYEAR", ] ) # Add additional datetime units only recognised in some functions (e.g. extract) bigquery_dialect.sets("extended_datetime_units").update(["DATE", "DATETIME", "TIME"]) bigquery_dialect.sets("date_part_function_name").clear() bigquery_dialect.sets("date_part_function_name").update( [ "DATE_DIFF", "DATE_TRUNC", "DATETIME_DIFF", "DATETIME_TRUNC", "EXTRACT", "LAST_DAY", "TIME_DIFF", "TIME_TRUNC", "TIMESTAMP_DIFF", "TIMESTAMP_TRUNC", ] ) # In BigQuery, UNNEST() returns a "value table". # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#value_tables bigquery_dialect.sets("value_table_functions").update(["UNNEST"]) # Bracket pairs (a set of tuples). Note that BigQuery inherits the default # "bracket_pairs" set from ANSI. Here, we're adding a different set of bracket # pairs that are only available in specific contexts where they are # applicable. This limits the scope where BigQuery allows angle brackets, # eliminating many potential parsing errors with the "<" and ">" operators. bigquery_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Sequence( "ARRAY", Bracketed( Ref("DatatypeSegment"), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", ), ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`.""" type = "qualify_clause" match_grammar = Sequence( "QUALIFY", Indent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class SetOperatorSegment(BaseSegment): """A set operator UNION, INTERSECT or EXCEPT. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#set_operators """ type = "set_operator" match_grammar = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL")), Sequence("INTERSECT", "DISTINCT"), Sequence("EXCEPT", "DISTINCT"), ) class SetExpressionSegment(ansi.SetExpressionSegment): """A set expression with either Union, Minus, Except or Intersect.""" match_grammar: Matchable = Sequence( OneOf( Ref("NonSetSelectableGrammar"), Bracketed(Ref("SetExpressionSegment")), ), AnyNumberOf( Sequence( Ref("SetOperatorSegment"), OneOf( Ref("NonSetSelectableGrammar"), Bracketed(Ref("SetExpressionSegment")), ), ), min_times=1, ), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ) class SelectStatementSegment(ansi.SelectStatementSegment): """Enhance `SELECT` statement to include QUALIFY.""" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Enhance unordered `SELECT` statement to include QUALIFY.""" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OverlapsClauseSegment", optional=True), ) class MultiStatementSegment(BaseSegment): """Overriding StatementSegment to allow for additional segment parsing.""" type = "multi_statement_segment" match_grammar: Matchable = OneOf( Ref("ForInStatementSegment"), Ref("RepeatStatementSegment"), Ref("WhileStatementSegment"), Ref("LoopStatementSegment"), Ref("IfStatementSegment"), Ref("CreateProcedureStatementSegment"), Ref("BeginStatementSegment"), ) class FileSegment(BaseFileSegment): """A segment representing a whole file or script. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. """ # NB: We don't need a match_grammar here because we're # going straight into instantiating it directly usually. match_grammar = Sequence( Sequence( OneOf( Ref("MultiStatementSegment"), Ref("StatementSegment"), ), ), AnyNumberOf( Ref("DelimiterGrammar"), OneOf( Ref("MultiStatementSegment"), Ref("StatementSegment"), ), ), Ref("DelimiterGrammar", optional=True), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("DeclareStatementSegment"), Ref("SetStatementSegment"), Ref("ExportStatementSegment"), Ref("CreateExternalTableStatementSegment"), Ref("AssertStatementSegment"), Ref("CallStatementSegment"), Ref("ReturnStatementSegment"), Ref("BreakStatementSegment"), Ref("LeaveStatementSegment"), Ref("ContinueStatementSegment"), Ref("RaiseStatementSegment"), Ref("AlterViewStatementSegment"), Ref("CreateMaterializedViewStatementSegment"), Ref("AlterMaterializedViewStatementSegment"), Ref("DropMaterializedViewStatementSegment"), ], ) class AssertStatementSegment(BaseSegment): """ASSERT segment. https://cloud.google.com/bigquery/docs/reference/standard-sql/debugging-statements """ type = "assert_statement" match_grammar: Matchable = Sequence( "ASSERT", Ref("ExpressionSegment"), Sequence( "AS", Ref("QuotedLiteralSegment"), optional=True, ), ) class ForInStatementsSegment(BaseSegment): """Statements within a FOR..IN...DO...END FOR statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#for-in """ type = "for_in_statements" match_grammar = AnyNumberOf( Sequence( OneOf( Ref("StatementSegment"), Ref("MultiStatementSegment"), ), Ref("DelimiterGrammar"), ), terminators=[Sequence("END", "FOR")], parse_mode=ParseMode.GREEDY, ) class ForInStatementSegment(BaseSegment): """FOR..IN...DO...END FOR statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#for-in """ type = "for_in_statement" match_grammar = Sequence( "FOR", Ref("SingleIdentifierGrammar"), "IN", Indent, Ref("SelectableGrammar"), Dedent, "DO", Indent, Ref("ForInStatementsSegment"), Dedent, "END", "FOR", ) class RepeatStatementsSegment(BaseSegment): """Statements within a REPEAT...UNTIL... END REPEAT statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#repeat """ type = "repeat_statements" match_grammar = AnyNumberOf( Sequence( OneOf( Ref("StatementSegment"), Ref("MultiStatementSegment"), ), Ref("DelimiterGrammar"), ), terminators=["UNTIL"], parse_mode=ParseMode.GREEDY, ) class RepeatStatementSegment(BaseSegment): """REPEAT...END REPEAT statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#repeat """ type = "repeat_statement" match_grammar = Sequence( "REPEAT", Indent, Ref("RepeatStatementsSegment"), "UNTIL", Ref("ExpressionSegment"), Dedent, "END", "REPEAT", ) class IfStatementsSegment(BaseSegment): """Statements within a IF... END IF statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#if """ type = "if_statements" match_grammar = AnyNumberOf( Sequence( OneOf( Ref("StatementSegment"), Ref("MultiStatementSegment"), ), Ref("DelimiterGrammar"), ), terminators=[ "ELSE", "ELSEIF", Sequence("END", "IF"), ], parse_mode=ParseMode.GREEDY, ) class IfStatementSegment(BaseSegment): """IF...END IF statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#if """ type = "if_statement" match_grammar = Sequence( "IF", Ref("ExpressionSegment"), "THEN", Indent, Ref("IfStatementsSegment"), Dedent, AnyNumberOf( Sequence( "ELSEIF", Ref("ExpressionSegment"), "THEN", Indent, Ref("IfStatementsSegment"), Dedent, ), ), Sequence( "ELSE", Indent, Ref("IfStatementsSegment"), Dedent, optional=True, ), "END", "IF", ) class LoopStatementsSegment(BaseSegment): """Statements within a LOOP... END LOOP statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#loop """ type = "loop_statements" match_grammar = AnyNumberOf( Sequence( OneOf( Ref("StatementSegment"), Ref("MultiStatementSegment"), ), Ref("DelimiterGrammar"), ), terminators=[Sequence("END", "LOOP")], parse_mode=ParseMode.GREEDY, ) class LoopStatementSegment(BaseSegment): """LOOP...END LOOP statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#loop """ type = "loop_statement" match_grammar = Sequence( "LOOP", Indent, Ref("LoopStatementsSegment"), Dedent, "END", "LOOP", ) class WhileStatementsSegment(BaseSegment): """Statements within a WHILE... END WHILE statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#while """ type = "while_statements" match_grammar = AnyNumberOf( Sequence( Ref("StatementSegment"), Ref("DelimiterGrammar"), ), terminators=[Sequence("END", "WHILE")], parse_mode=ParseMode.GREEDY, ) class WhileStatementSegment(BaseSegment): """WHILE...END WHILE statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#while """ type = "while_statement" match_grammar = Sequence( "WHILE", Ref("ExpressionSegment"), "DO", Indent, Ref("WhileStatementsSegment"), Dedent, "END", "WHILE", ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns.""" match_grammar = Sequence( # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax OneOf("DISTINCT", "ALL", optional=True), Sequence("AS", OneOf("STRUCT", "VALUE"), optional=True), ) # BigQuery allows functions in INTERVAL class IntervalExpressionSegment(ansi.IntervalExpressionSegment): """An interval with a function as value segment.""" match_grammar = Sequence( "INTERVAL", Ref("ExpressionSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("DatetimeUnitSegment"), Sequence( Ref("DatetimeUnitSegment"), "TO", Ref("DatetimeUnitSegment"), ), ), ) bigquery_dialect.replace( QuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", trim_chars=("`",), ), # Add ParameterizedSegment to the ansi NumericLiteralSegment NumericLiteralSegment=OneOf( TypedParser("numeric_literal", LiteralSegment, type="numeric_literal"), Ref("ParameterizedSegment"), ), QuotedLiteralSegment=OneOf( Ref("SingleQuotedLiteralSegment"), Ref("DoubleQuotedLiteralSegment"), ), # Add elements to the ansi LiteralGrammar LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("ParameterizedSegment"), Ref("SystemVariableSegment"), ] ), PostTableExpressionGrammar=Sequence( Sequence( "FOR", OneOf("SYSTEM_TIME", Sequence("SYSTEM", "TIME")), "AS", "OF", Ref("ExpressionSegment"), optional=True, ), Sequence( "WITH", "OFFSET", Sequence("AS", Ref("SingleIdentifierGrammar"), optional=True), optional=True, ), ), FunctionNameIdentifierSegment=OneOf( # In BigQuery struct() and array() have a special syntax, # so we don't treat them as functions RegexParser( r"[A-Z_][A-Z0-9_]*", CodeSegment, type="function_name_identifier", anti_template=r"^(STRUCT|ARRAY)$", ), RegexParser( r"`[^`]*`", CodeSegment, type="function_name_identifier", ), ), ) class ExtractFunctionNameSegment(BaseSegment): """EXTRACT function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = StringParser( "EXTRACT", CodeSegment, type="function_name_identifier", ) class ArrayFunctionNameSegment(BaseSegment): """ARRAY function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = StringParser( "ARRAY", CodeSegment, type="function_name_identifier", ) class DatePartWeekSegment(BaseSegment): """WEEK() in EXTRACT, DATE_DIFF, DATE_TRUNC, LAST_DAY. https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#extract https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#date_diff https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#date_trunc https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#last_day """ type = "date_part_week" match_grammar: Matchable = Sequence( "WEEK", Bracketed( OneOf( "SUNDAY", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", ), ), ) class NormalizeFunctionNameSegment(BaseSegment): """NORMALIZE function name segment. Need to be able to specify this as type `function_name_identifier` within a `function_name` so that linting rules identify it properly. """ type = "function_name" match_grammar: Matchable = OneOf( StringParser( "NORMALIZE", CodeSegment, type="function_name_identifier", ), StringParser( "NORMALIZE_AND_CASEFOLD", CodeSegment, type="function_name_identifier", ), ) class FunctionNameSegment(ansi.FunctionNameSegment): """Describes the name of a function. This includes any prefix bits, e.g. project, schema or the SAFE keyword. """ match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( # BigQuery Function names can be prefixed by the keyword SAFE to # return NULL instead of error. # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#safe_prefix OneOf("SAFE", Ref("SingleIdentifierGrammar")), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name OneOf( Ref("FunctionNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), terminators=[Ref("BracketedSegment")], ), # BigQuery allows whitespaces between the `.` of a function refrence or # SAFE prefix. Keeping the explicit `allow_gaps=True` here to # make the distinction from `ansi.FunctionNameSegment` clear. allow_gaps=True, ) class FunctionSegment(ansi.FunctionSegment): """A scalar or aggregate function. Maybe in the future we should distinguish between aggregate functions and other functions. For now we treat them the same because they look the same for our purposes. """ match_grammar = Sequence( OneOf( Sequence( # BigQuery EXTRACT allows optional TimeZone Ref("ExtractFunctionNameSegment"), Bracketed( OneOf( Ref("DatetimeUnitSegment"), Ref("DatePartWeekSegment"), Ref("ExtendedDatetimeUnitSegment"), ), "FROM", Ref("ExpressionSegment"), ), ), Sequence( # BigQuery NORMALIZE allows optional normalization_mode # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#normalize Ref("NormalizeFunctionNameSegment"), Bracketed( Ref("ExpressionSegment"), Sequence( Ref("CommaSegment"), OneOf("NFC", "NFKC", "NFD", "NFKD"), optional=True, ), ), ), Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Ref( "DatePartFunctionNameSegment", exclude=Ref("ExtractFunctionNameSegment"), ), Bracketed( Delimited( Ref("DatetimeUnitSegment"), Ref("DatePartWeekSegment"), Ref( "FunctionContentsGrammar", ), ), parse_mode=ParseMode.GREEDY, ), ), Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("NormalizeFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), parse_mode=ParseMode.GREEDY, ), ), # Functions returning ARRAYS in BigQuery can have optional # Array Accessor clauses Ref("ArrayAccessorSegment", optional=True), # Functions returning STRUCTs in BigQuery can have the fields # elements referenced (e.g. ".a"), including wildcards (e.g. ".*") # or multiple nested fields (e.g. ".a.b", or ".a.b.c") Ref("SemiStructuredAccessorSegment", optional=True), Ref("PostFunctionGrammar", optional=True), ), ), allow_gaps=False, ) class FunctionDefinitionGrammar(ansi.FunctionDefinitionGrammar): """This is the body of a `CREATE FUNCTION AS` statement.""" match_grammar = Sequence( AnyNumberOf( Sequence( OneOf("DETERMINISTIC", Sequence("NOT", "DETERMINISTIC")), optional=True, ), Sequence( "LANGUAGE", Ref("NakedIdentifierSegment"), Sequence( "OPTIONS", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Anything(), ), ) ), optional=True, ), ), # There is some syntax not implemented here, Sequence( "AS", OneOf( Ref("DoubleQuotedUDFBody"), Ref("SingleQuotedUDFBody"), Bracketed( OneOf(Ref("ExpressionSegment"), Ref("SelectStatementSegment")) ), ), ), ) ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for Bigquery.""" match_grammar = ansi.WildcardExpressionSegment.match_grammar.copy( insert=[ # Optional EXCEPT or REPLACE clause # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#select_replace Ref("ExceptClauseSegment", optional=True), Ref("ReplaceClauseSegment", optional=True), ] ) class ExceptClauseSegment(BaseSegment): """SELECT EXCEPT clause.""" type = "select_except_clause" match_grammar = Sequence( "EXCEPT", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), ) class BeginStatementSegment(BaseSegment): """A `BEGIN...EXCEPTION...END` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#beginexceptionend """ type = "begin_statement" match_grammar = Sequence( "BEGIN", Indent, AnyNumberOf( Sequence( Ref("StatementSegment"), Ref("DelimiterGrammar"), ), min_times=1, terminators=["END", "EXCEPTION"], parse_mode=ParseMode.GREEDY, ), Dedent, Sequence( "EXCEPTION", "WHEN", "ERROR", "THEN", Indent, AnyNumberOf( Sequence( Ref("StatementSegment"), Ref("DelimiterGrammar"), ), min_times=1, terminators=["END"], parse_mode=ParseMode.GREEDY, ), Dedent, optional=True, ), "END", ) class ReplaceClauseSegment(BaseSegment): """SELECT REPLACE clause.""" type = "select_replace_clause" match_grammar = Sequence( "REPLACE", Bracketed( Delimited( # Not *really* a select target element. It behaves exactly # the same way however. Ref("SelectClauseElementSegment"), ) ), ) class DatatypeSegment(ansi.DatatypeSegment): """A data type segment. In particular here, this enabled the support for the STRUCT datatypes. """ match_grammar = OneOf( # Parameter type Sequence( Ref("DatatypeIdentifierSegment"), # Simple type # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#parameterized_data_types Ref("BracketedArguments", optional=True), ), Sequence("ANY", "TYPE"), # SQL UDFs can specify this "type" Ref("ArrayTypeSegment"), Ref("StructTypeSegment"), ) class StructTypeSegment(ansi.StructTypeSegment): """Expression to construct a STRUCT datatype.""" match_grammar = Sequence( "STRUCT", Ref("StructTypeSchemaSegment", optional=True), ) class StructTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a STRUCT datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( # Comma-separated list of field names/types Sequence( OneOf( # ParameterNames can look like Datatypes so can't use # Optional=True here and instead do a OneOf in order # with DataType only first, followed by both. Ref("DatatypeSegment"), Sequence( Ref("ParameterNameSegment"), Ref("DatatypeSegment"), ), ), AnyNumberOf(Ref("ColumnConstraintSegment")), Ref("OptionsSegment", optional=True), ), ), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", ) class ArrayExpressionSegment(ansi.ArrayExpressionSegment): """Expression to construct a ARRAY from a subquery. https://cloud.google.com/bigquery/docs/reference/standard-sql/array_functions#array """ match_grammar = Sequence( Ref("ArrayFunctionNameSegment"), Bracketed( Ref("SelectableGrammar"), ), ) class TupleSegment(BaseSegment): """Expression to construct a TUPLE. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#tuple_syntax """ type = "tuple" match_grammar = Bracketed(Delimited(Ref("BaseExpressionElementGrammar"))) class NamedArgumentSegment(BaseSegment): """Named argument to a function. https://cloud.google.com/bigquery/docs/reference/standard-sql/geography_functions#st_geogfromgeojson """ type = "named_argument" match_grammar = Sequence( Ref("NakedIdentifierSegment"), Ref("RightArrowSegment"), Ref("ExpressionSegment"), ) class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment.""" type = "semi_structured_expression" match_grammar = Sequence( AnyNumberOf( Sequence( Ref("DotSegment"), OneOf( Ref("SingleIdentifierGrammar"), Ref("StarSegment"), ), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, min_times=1, ), allow_gaps=True, ) class ColumnReferenceSegment(ansi.ObjectReferenceSegment): """A reference to column, field or alias. We override this for BigQuery to allow keywords in structures (using Full segments) and to properly return references for objects. Ref: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical "A reserved keyword must be a quoted identifier if it is a standalone keyword or the first component of a path expression. It may be unquoted as the second or later component of a path expression." """ type = "column_reference" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), Sequence( Ref("ObjectReferenceDelimiterGrammar"), Delimited( Ref("SingleIdentifierFullGrammar"), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[ "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), BracketedSegment, ], allow_gaps=False, ), allow_gaps=False, optional=True, ), allow_gaps=False, ) def extract_possible_references(self, level): """Extract possible references of a given level. Overrides the parent-class function. BigQuery's support for things like the following: - Functions that take a table as a parameter (e.g. TO_JSON_STRING) https://cloud.google.com/bigquery/docs/reference/standard-sql/ json_functions#to_json_string - STRUCT means that, without schema information (which SQLFluff does not have), references to data are often ambiguous. """ level = self._level_to_int(level) refs = list(self.iter_raw_references()) if level == self.ObjectReferenceLevel.SCHEMA.value and len(refs) >= 3: return [refs[0]] # pragma: no cover if level == self.ObjectReferenceLevel.TABLE.value: # One part: Could be a table, e.g. TO_JSON_STRING(t) # Two parts: Could be dataset.table or table.column. # Three parts: Could be table.column.struct or dataset.table.column. # Four parts: dataset.table.column.struct # Five parts: project.dataset.table.column.struct # So... return the first 3 parts. return refs[:3] if ( level == self.ObjectReferenceLevel.OBJECT.value and len(refs) >= 3 ): # pragma: no cover # Ambiguous case: The object (i.e. column) could be the first or # second part, so return both. return [refs[1], refs[2]] return super().extract_possible_references(level) # pragma: no cover def extract_possible_multipart_references(self, levels): """Extract possible multipart references, e.g. schema.table.""" levels_tmp = [self._level_to_int(level) for level in levels] min_level = min(levels_tmp) max_level = max(levels_tmp) refs = list(self.iter_raw_references()) if max_level == self.ObjectReferenceLevel.SCHEMA.value and len(refs) >= 3: return [tuple(refs[0 : max_level - min_level + 1])] # Note we aren't handling other possible cases. We'll add these as # needed. return super().extract_possible_multipart_references(levels) class TableReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an object that may contain embedded hyphens.""" type = "table_reference" match_grammar: Matchable = Delimited( Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DashSegment"), Ref("NakedIdentifierPart"), allow_gaps=False, ), optional=True, ), allow_gaps=False, ), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[ "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ], allow_gaps=False, ) def iter_raw_references(self): """Generate a list of reference strings and elements. Each reference is an ObjectReferencePart. Overrides the base class because hyphens (DashSegment) causes one logical part of the name to be split across multiple elements, e.g. "table-a" is parsed as three segments. """ # For each descendant element, group them, using "dot" elements as a # delimiter. parts = [] elems_for_parts = [] def flush(): nonlocal parts, elems_for_parts result = self.ObjectReferencePart("".join(parts), elems_for_parts) parts = [] elems_for_parts = [] return result for elem in self.recursive_crawl( "identifier", "literal", "dash", "dot", "star" ): if not elem.is_type("dot"): if elem.is_type("identifier"): # Found an identifier (potentially with embedded dots). elem_subparts = elem.raw_trimmed().split(".") for idx, part in enumerate(elem_subparts): # Save each part of the segment. parts.append(part) elems_for_parts.append(elem) if idx != len(elem_subparts) - 1: # For each part except the last, flush. yield flush() else: # For non-identifier segments, save the whole segment. parts.append(elem.raw_trimmed()) elems_for_parts.append(elem) else: yield flush() # Flush any leftovers. if parts: yield flush() class SystemVariableSegment(BaseSegment): """BigQuery supports usage of system-level variables, which are prefixed with @@. These are also used in exception blocks in the @@error object. https://cloud.google.com/bigquery/docs/reference/system-variables https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#beginexceptionend """ type = "system_variable" match_grammar = Ref("DoubleAtSignLiteralSegment") class DeclareStatementSegment(BaseSegment): """Declaration of a variable. https://cloud.google.com/bigquery/docs/reference/standard-sql/scripting#declare """ type = "declare_segment" match_grammar = Sequence( "DECLARE", Delimited(Ref("SingleIdentifierFullGrammar")), OneOf( Ref("DefaultDeclareOptionsGrammar"), Sequence( Ref("DatatypeSegment"), Ref("DefaultDeclareOptionsGrammar", optional=True), ), ), ) class SetStatementSegment(BaseSegment): """Setting an already declared variable. https://cloud.google.com/bigquery/docs/reference/standard-sql/scripting#set """ type = "set_segment" match_grammar = Sequence( "SET", OneOf( Ref("NakedIdentifierSegment"), Bracketed(Delimited(Ref("NakedIdentifierSegment"))), ), Ref("EqualsSegment"), Delimited( OneOf( Ref("LiteralGrammar"), Bracketed(Ref("SelectStatementSegment")), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Bracketed( Delimited( OneOf( Ref("LiteralGrammar"), Bracketed(Ref("SelectStatementSegment")), Ref("BareFunctionSegment"), Ref("FunctionSegment"), ) ) ), Ref("ArrayLiteralSegment"), Ref("ExpressionSegment"), ), ), ) class PartitionBySegment(BaseSegment): """PARTITION BY partition_expression.""" type = "partition_by_segment" match_grammar = Sequence( "PARTITION", "BY", Ref("ExpressionSegment"), ) class ClusterBySegment(BaseSegment): """CLUSTER BY clustering_column_list.""" type = "cluster_by_segment" match_grammar = Sequence( "CLUSTER", "BY", Delimited(Ref("ExpressionSegment")), ) class OptionsSegment(BaseSegment): """OPTIONS clause for a table.""" type = "options_segment" match_grammar = Sequence( "OPTIONS", Bracketed( Delimited( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("BaseExpressionElementGrammar"), ) ) ), ) class ColumnDefinitionSegment(ansi.ColumnDefinitionSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE. Override ANSI support to allow passing of column options """ match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type AnyNumberOf(Ref("ColumnConstraintSegment")), Ref("OptionsSegment", optional=True), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement.""" # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_table_statement match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Sequence( OneOf("COPY", "LIKE", "CLONE"), Ref("TableReferenceSegment"), optional=True, ), # Column list Sequence( Bracketed( Delimited( Ref("ColumnDefinitionSegment"), allow_trailing=True, ) ), optional=True, ), Ref("PartitionBySegment", optional=True), Ref("ClusterBySegment", optional=True), Ref("OptionsSegment", optional=True), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """A `ALTER TABLE` statement.""" match_grammar = Sequence( "ALTER", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_set_options_statement Sequence( "SET", Ref("OptionsSegment"), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_add_column_statement Delimited( Sequence( "ADD", "COLUMN", Ref("IfNotExistsGrammar", optional=True), Ref("ColumnDefinitionSegment"), ), allow_trailing=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_rename_to_statement Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_rename_column_statement Delimited( Sequence( "RENAME", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name "TO", Ref("SingleIdentifierGrammar"), # Column name ), allow_trailing=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_table_drop_column_statement Delimited( Sequence( "DROP", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name ), ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_column_set_options_statement Delimited( Sequence( "ALTER", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), # Column name OneOf( Sequence( "SET", OneOf( Ref("OptionsSegment"), Sequence( "DATA", "TYPE", Ref("DatatypeSegment"), ), Sequence( "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), ), ), ), ), Sequence("DROP", OneOf("DEFAULT", Sequence("NOT", "NULL"))), ), ), ), ), ) class CreateExternalTableStatementSegment(BaseSegment): """A `CREATE EXTERNAL TABLE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_external_table_statement """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), "EXTERNAL", "TABLE", Sequence("IF", "NOT", "EXISTS", optional=True), Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnDefinitionSegment"), allow_trailing=True, ), optional=True, ), # Although not specified in the BigQuery documentation optional arguments for # CREATE EXTERNAL TABLE statements can be ordered arbitrarily. AnyNumberOf( # connection names have the same rules as table names in BigQuery Sequence("WITH", "CONNECTION", Ref("TableReferenceSegment"), optional=True), Sequence( "WITH", "PARTITION", "COLUMNS", Bracketed( Delimited( Ref("ColumnDefinitionSegment"), allow_trailing=True, ), optional=True, ), optional=True, ), Ref("OptionsSegment", optional=True), ), ) class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#view_option_list """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("OptionsSegment", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class AlterViewStatementSegment(BaseSegment): """A `ALTER VIEW` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_view_set_options_statement """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), "SET", Ref("OptionsSegment"), ) class CreateMaterializedViewStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_materialized_view_statement """ type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "MATERIALIZED", "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("PartitionBySegment", optional=True), Ref("ClusterBySegment", optional=True), Ref("OptionsSegment", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class AlterMaterializedViewStatementSegment(BaseSegment): """A `ALTER MATERIALIZED VIEW SET OPTIONS` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#alter_materialized_view_set_options_statement """ type = "alter_materialized_view_set_options_statement" match_grammar = Sequence( "ALTER", "MATERIALIZED", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), "SET", Ref("OptionsSegment"), ) class DropMaterializedViewStatementSegment(BaseSegment): """A `DROP MATERIALIZED VIEW` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#drop_materialized_view_statement """ type = "drop_materialized_view_statement" match_grammar = Sequence( "DROP", "MATERIALIZED", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class ParameterizedSegment(BaseSegment): """BigQuery allows named and argument based parameters to prevent SQL Injection. https://cloud.google.com/bigquery/docs/parameterized-queries """ type = "parameterized_expression" match_grammar = OneOf(Ref("AtSignLiteralSegment"), Ref("QuestionMarkSegment")) class PivotForClauseSegment(BaseSegment): """The FOR part of a PIVOT expression. Needed to avoid BaseExpressionElementGrammar swallowing up the IN part """ type = "pivot_for_clause" match_grammar = Sequence( Ref("BaseExpressionElementGrammar"), terminators=["IN"], parse_mode=ParseMode.GREEDY, ) class FromPivotExpressionSegment(BaseSegment): """A PIVOT expression. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#pivot_operator """ type = "from_pivot_expression" match_grammar = Sequence( "PIVOT", Bracketed( Delimited( Sequence( Ref("FunctionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), "FOR", Ref("PivotForClauseSegment"), "IN", Bracketed( Delimited( Sequence( Ref("LiteralGrammar"), Ref("AliasExpressionSegment", optional=True), ), ) ), ), ) class UnpivotAliasExpressionSegment(BaseSegment): """In BigQuery UNPIVOT alias's can be single or double quoted or numeric.""" type = "alias_expression" match_grammar = Sequence( Indent, Ref.keyword("AS", optional=True), OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), Dedent, ) class FromUnpivotExpressionSegment(BaseSegment): """An UNPIVOT expression. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#unpivot_operator """ type = "from_unpivot_expression" match_grammar = Sequence( "UNPIVOT", Sequence( OneOf("INCLUDE", "EXCLUDE"), "NULLS", optional=True, ), OneOf( # single column unpivot Bracketed( Ref("SingleIdentifierGrammar"), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed( Delimited( Sequence( Delimited(Ref("SingleIdentifierGrammar")), Ref("UnpivotAliasExpressionSegment", optional=True), ), ), ), ), # multi column unpivot Bracketed( Bracketed( Delimited( Ref("SingleIdentifierGrammar"), min_delimiters=1, ), ), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed( Delimited( Sequence( Bracketed( Delimited( Ref("SingleIdentifierGrammar"), min_delimiters=1, ), ), Ref("UnpivotAliasExpressionSegment", optional=True), ), ), ), ), ), ) class InsertStatementSegment(ansi.InsertStatementSegment): """A `INSERT` statement. N.B. not a complete implementation. """ match_grammar = Sequence( "INSERT", Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("SelectableGrammar"), ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """A sampling expression. https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#tablesample_operator """ match_grammar = Sequence( "TABLESAMPLE", "SYSTEM", Bracketed(Ref("NumericLiteralSegment"), "PERCENT") ) class MergeMatchSegment(ansi.MergeMatchSegment): """Contains BigQuery specific merge operations. Overriding ANSI to allow `NOT MATCHED BY SOURCE` statements """ type = "merge_match" match_grammar: Matchable = AnyNumberOf( Ref("MergeMatchedClauseSegment"), Ref("MergeNotMatchedByTargetClauseSegment"), Ref("MergeNotMatchedBySourceClauseSegment"), min_times=1, ) class MergeNotMatchedByTargetClauseSegment(ansi.MergeNotMatchedClauseSegment): """The `WHEN NOT MATCHED [BY TARGET]` clause within a `MERGE` statement. Overriding ANSI to allow optionally `NOT MATCHED [BY TARGET]` statements """ type = "not_matched_by_target_clause" match_grammar: Matchable = Sequence( "WHEN", "NOT", "MATCHED", Sequence("BY", "TARGET", optional=True), Sequence("AND", Ref("ExpressionSegment"), optional=True), "THEN", Indent, Ref("MergeInsertClauseSegment"), Dedent, ) class MergeNotMatchedBySourceClauseSegment(ansi.MergeMatchedClauseSegment): """The `WHEN MATCHED BY SOURCE` clause within a `MERGE` statement. It inherits from `ansi.MergeMatchedClauseSegment` because NotMatchedBySource clause is conceptionally more close to a Matched clause than to NotMatched clause, i.e. it get's combined with an UPDATE or DELETE, not with an INSERT. """ type = "merge_when_matched_clause" match_grammar: Matchable = Sequence( "WHEN", "NOT", "MATCHED", "BY", "SOURCE", Sequence("AND", Ref("ExpressionSegment"), optional=True), "THEN", Indent, OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), Dedent, ) class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment): """`INSERT` clause within the `MERGE` statement. Overriding ANSI to allow `INSERT ROW` statements """ match_grammar: Matchable = OneOf( Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, Ref("ValuesClauseSegment", optional=True), ), Sequence("INSERT", "ROW"), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#delete_statement """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar: Matchable = Sequence( "DELETE", Ref.keyword("FROM", optional=True), Ref("TableReferenceSegment"), Ref("AliasExpressionSegment", optional=True), Ref("WhereClauseSegment", optional=True), ) class ExportStatementSegment(BaseSegment): """`EXPORT` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements#export_data_statement """ type = "export_statement" match_grammar: Matchable = Sequence( "EXPORT", "DATA", Sequence("WITH", "CONNECTION", Ref("ObjectReferenceSegment"), optional=True), "OPTIONS", Bracketed( Delimited( # String options # Note: adding as own type, rather than keywords as convention with # Bigquery, as per the docs, is to put Keywords in uppercase, and these # in lowercase. Sequence( OneOf( StringParser( "compression", CodeSegment, type="export_option", ), StringParser( "field_delimiter", CodeSegment, type="export_option", ), StringParser( "format", CodeSegment, type="export_option", ), StringParser( "uri", CodeSegment, type="export_option", ), ), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), # Bool options # Note: adding as own type, rather than keywords as convention with # Bigquery, as per the docs, is to put Keywords in uppercase, and these # in lowercase. Sequence( OneOf( StringParser( "header", CodeSegment, type="export_option", ), StringParser( "overwrite", CodeSegment, type="export_option", ), StringParser( "use_avro_logical_types", CodeSegment, type="export_option", ), ), Ref("EqualsSegment"), OneOf("TRUE", "FALSE"), ), ), ), "AS", Ref("SelectableGrammar"), ) class ProcedureNameSegment(BaseSegment): """Procedure name, including any prefix bits, e.g. project or schema.""" type = "procedure_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), ), # Base procedure name OneOf( Ref("ProcedureNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), ), allow_gaps=False, ) class ProcedureParameterListSegment(BaseSegment): """The parameters for a prcoedure ie. `(string, number)`.""" # Procedure parameter list (based on FunctionsParameterListGrammar) type = "procedure_parameter_list" match_grammar = Bracketed( Delimited( Ref("ProcedureParameterGrammar"), optional=True, ) ) class ProcedureStatements(BaseSegment): """Statements within a CREATE PROCEDURE statement. https://cloud.google.com/bigquery/docs/procedures """ type = "procedure_statements" match_grammar = AnyNumberOf( Sequence( Ref("StatementSegment"), Ref("DelimiterGrammar"), ), terminators=["END"], parse_mode=ParseMode.GREEDY, ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE PROCEDURE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/data-definition-language#create_procedure """ type = "create_procedure_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "PROCEDURE", Ref("IfNotExistsGrammar", optional=True), Ref("ProcedureNameSegment"), Ref("ProcedureParameterListSegment"), Sequence( "OPTIONS", "strict_mode", StringParser("strict_mode", CodeSegment, type="procedure_option"), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), "BEGIN", Indent, Ref("ProcedureStatements"), Dedent, "END", ) class CallStatementSegment(BaseSegment): """A `CALL` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#call """ type = "call_statement" match_grammar: Matchable = Sequence( "CALL", Ref("ProcedureNameSegment"), Bracketed( Delimited( Ref("ExpressionSegment"), optional=True, ), ), ) class ReturnStatementSegment(BaseSegment): """A `RETURN` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#return """ type = "return_statement" match_grammar: Matchable = Sequence( "RETURN", ) class BreakStatementSegment(BaseSegment): """A `BREAK` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#break """ type = "break_statement" match_grammar: Matchable = Sequence( "BREAK", ) class LeaveStatementSegment(BaseSegment): """A `LEAVE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#leave """ type = "leave_statement" match_grammar: Matchable = Sequence( "LEAVE", ) class ContinueStatementSegment(BaseSegment): """A `CONTINUE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#continue """ type = "continue_statement" match_grammar: Matchable = OneOf( "CONTINUE", "ITERATE", ) class RaiseStatementSegment(BaseSegment): """A `RAISE` statement. https://cloud.google.com/bigquery/docs/reference/standard-sql/procedural-language#raise """ type = "raise_statement" match_grammar: Matchable = Sequence( "RAISE", Sequence( "USING", "MESSAGE", Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True, ), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_bigquery_keywords.py000066400000000000000000000045301451700765000253700ustar00rootroot00000000000000"""A list of all BigQuery SQL key words.""" # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords bigquery_reserved_keywords = """ALL AND ANY ARRAY AS ASC ASSERT_ROWS_MODIFIED AT BETWEEN BY CASE CAST COLLATE CONTAINS CREATE CROSS CUBE CURRENT DEFAULT DEFINE DESC DISTINCT ELSE END ENUM ESCAPE EXCEPT EXCLUDE EXISTS FALSE FETCH FOLLOWING FOR FROM FULL GROUP GROUPING GROUPS HASH HAVING IF IGNORE IN INCLUDE INNER INTERSECT INTERVAL INTO IS JOIN LATERAL LEFT LIKE LIMIT LOOKUP MERGE NEW NO NOT NULL NULLS OF ON OR ORDER OUTER OVER PARTITION PIVOT PRECEDING PROTO RANGE RECURSIVE RESPECT RIGHT ROLLUP ROWS SELECT SET SOME STRUCT TABLESAMPLE THEN TO TREAT TRUE UNBOUNDED UNION UNNEST UNPIVOT USING WHEN WHERE WINDOW WITH WITHIN""" # Note BigQuery doesn't have a list of Unreserved Keywords # so these are just ones we need to allow parsing to work bigquery_unreserved_keywords = """ACCOUNT ADD ADMIN AFTER ALTER APPLY ASSERT AUTO_INCREMENT BEGIN BERNOULLI BINARY BINDING BREAK CACHE CALL CASCADE CHAIN CHARACTER CHECK CLONE CLUSTER COLUMN COLUMNS COMMENT COMMIT CONCURRENTLY CONTINUE CONNECT CONNECTION CONSTRAINT COPY CURRENT_USER CYCLE DATA DATABASE DATE DATETIME DECLARE DELETE DESCRIBE DETERMINISTIC DO DOMAIN DOUBLE DROP ELSEIF ERROR EXCEPTION EXECUTE EXECUTION EXPLAIN EXPORT EXTENSION EXTERNAL FILE FILTER FIRST FOREIGN FORMAT FRIDAY FUNCTION FUTURE GRANT GRANTED GRANTS HOUR ILIKE IMPORTED IN INCREMENT INDEX INOUT INSERT INTEGRATION ITERATE LANGUAGE LARGE LAST LEAVE LOOP MANAGE MASKING MATCHED MATERIALIZED MAX MAXVALUE MESSAGE MIN MINUS MINVALUE ML MODEL MODIFY MONDAY MONITOR NAME NAN NFC NFKC NFD NFKD NOCACHE NOCYCLE NOORDER OBJECT OFFSET OPERATE OPTION OPTIONS ORDINAL OUT OVERLAPS OVERWRITE OWNERSHIP PERCENT PIPE POLICY PRECISION PRIMARY PRIOR PRIVILEGES PROCEDURE PUBLIC QUALIFY QUARTER RAISE READ REFERENCE_USAGE REFERENCES RENAME REPEAT REPEATABLE REPLACE RESOURCE RESTRICT RETURN RETURNS REVOKE RLIKE ROLE ROLLBACK ROW ROUTINE SAFE SATURDAY SCHEMA SCHEMAS SECOND SEPARATOR SERVER SEQUENCE SESSION_USER SHARE SOURCE STAGE START STREAM SUNDAY SYSTEM SYSTEM_TIME TABLE TABLESPACE TARGET TASK TEMP TEMPORARY THURSDAY TIME TIMESTAMP TRANSACTION TRANSIENT TRIGGER TRUNCATE TUESDAY TYPE UNIQUE UNSIGNED UNTIL UPDATE USAGE USE USE_ANY_ROLE USER VALUE VALUES VARYING VERSION VIEW WAREHOUSE WEDNESDAY WEEK WHILE WITHOUT WORK WRAPPER WRITE ZONE""" sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_clickhouse.py000066400000000000000000001016571451700765000237530ustar00rootroot00000000000000"""The clickhouse dialect. https://clickhouse.com/ """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, BaseSegment, Bracketed, Conditional, Dedent, Delimited, Indent, LiteralSegment, Matchable, OneOf, OptionallyBracketed, ParseMode, Ref, Sequence, StringLexer, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_clickhouse_keywords import UNRESERVED_KEYWORDS ansi_dialect = load_raw_dialect("ansi") clickhouse_dialect = ansi_dialect.copy_as("clickhouse") clickhouse_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) clickhouse_dialect.replace( SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), ), QuotedLiteralSegment=OneOf( TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), TypedParser( "dollar_quote", LiteralSegment, type="quoted_literal", ), ), ) clickhouse_dialect.insert_lexer_matchers( # https://clickhouse.com/docs/en/sql-reference/functions#higher-order-functions---operator-and-lambdaparams-expr-function [StringLexer("lambda", r"->", SymbolSegment)], before="newline", ) clickhouse_dialect.add( JoinTypeKeywords=OneOf( # This case INNER [ANY,ALL] JOIN Sequence("INNER", OneOf("ALL", "ANY", optional=True)), # This case [ANY,ALL] INNER JOIN Sequence(OneOf("ALL", "ANY", optional=True), "INNER"), # This case FULL ALL OUTER JOIN Sequence( "FULL", Ref.keyword("ALL", optional=True), Ref.keyword("OUTER", optional=True), ), # This case ALL FULL OUTER JOIN Sequence( Ref.keyword("ALL", optional=True), "FULL", Ref.keyword("OUTER", optional=True), ), # This case LEFT [OUTER,ANTI,SEMI,ANY,ASOF] JOIN Sequence( "LEFT", OneOf( "ANTI", "SEMI", OneOf("ANY", "ALL", optional=True), "ASOF", optional=True, ), Ref.keyword("OUTER", optional=True), ), # This case [ANTI,SEMI,ANY,ASOF] LEFT JOIN Sequence( OneOf( "ANTI", "SEMI", OneOf("ANY", "ALL", optional=True), "ASOF", ), "LEFT", ), # This case RIGHT [OUTER,ANTI,SEMI,ANY,ASOF] JOIN Sequence( "RIGHT", OneOf( "OUTER", "ANTI", "SEMI", OneOf("ANY", "ALL", optional=True), optional=True, ), Ref.keyword("OUTER", optional=True), ), # This case [OUTER,ANTI,SEMI,ANY] RIGHT JOIN Sequence( OneOf( "ANTI", "SEMI", OneOf("ANY", "ALL", optional=True), optional=True, ), "RIGHT", ), # This case CROSS JOIN "CROSS", # This case ANY JOIN "ANY", # This case ALL JOIN "ALL", ), LambdaFunctionSegment=TypedParser("lambda", SymbolSegment, type="lambda"), ) clickhouse_dialect.replace( BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add Lambda Function Ref("LambdaFunctionSegment"), ), ) clickhouse_dialect.replace( JoinLikeClauseGrammar=Sequence( AnyNumberOf( Ref("ArrayJoinClauseSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), ) class BracketedArguments(ansi.BracketedArguments): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ match_grammar = Bracketed( Delimited( OneOf( # Dataypes like Nullable allow optional datatypes here. Ref("DatatypeIdentifierSegment"), Ref("NumericLiteralSegment"), ), # The brackets might be empty for some cases... optional=True, ), ) class JoinClauseSegment(ansi.JoinClauseSegment): """Any number of join clauses, including the `JOIN` keyword. https://clickhouse.com/docs/en/sql-reference/statements/select/join/#supported-types-of-join """ match_grammar = OneOf( Sequence( Ref("JoinTypeKeywords", optional=True), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, Conditional(Indent, indented_using_on=True), OneOf( # ON clause Ref("JoinOnConditionSegment"), # USING clause Sequence( "USING", Conditional(Indent, indented_using_on=False), Delimited( OneOf( Bracketed( Delimited(Ref("SingleIdentifierGrammar")), parse_mode=ParseMode.GREEDY, ), Delimited(Ref("SingleIdentifierGrammar")), ), ), Conditional(Dedent, indented_using_on=False), ), # Requires True for CROSS JOIN optional=True, ), Conditional(Dedent, indented_using_on=True), ), ) class ArrayJoinClauseSegment(BaseSegment): """[LEFT] ARRAY JOIN does not support Join conditions and doesn't work as real JOIN. https://clickhouse.com/docs/en/sql-reference/statements/select/array-join """ type = "array_join_clause" match_grammar: Matchable = Sequence( Ref.keyword("LEFT", optional=True), "ARRAY", Ref("JoinKeywordsGrammar"), Indent, Delimited( Ref("SelectClauseElementSegment"), ), Dedent, ) class CTEDefinitionSegment(ansi.CTEDefinitionSegment): """A CTE Definition from a WITH statement. Overridden from ANSI to allow expression CTEs. https://clickhouse.com/docs/en/sql-reference/statements/select/with/ """ type = "common_table_expression" match_grammar: Matchable = OneOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("CTEColumnList", optional=True), "AS", Bracketed( # Ephemeral here to subdivide the query. Ref("SelectableGrammar"), parse_mode=ParseMode.GREEDY, ), ), Sequence( Ref("ExpressionSegment"), "AS", Ref("SingleIdentifierGrammar"), ), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause.""" type = "alias_expression" match_grammar: Matchable = Sequence( Indent, Ref.keyword("AS", optional=True), OneOf( Sequence( Ref("SingleIdentifierGrammar"), # Column alias in VALUES clause Bracketed(Ref("SingleIdentifierListSegment"), optional=True), ), Ref("SingleQuotedIdentifierSegment"), exclude=OneOf( "LATERAL", "WINDOW", "KEYS", ), ), Dedent, ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """A table expression. Overridden from ANSI to allow FINAL modifier. https://clickhouse.com/docs/en/sql-reference/statements/select/from#final-modifier """ type = "from_expression_element" match_grammar: Matchable = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("SamplingExpressionSegment"), Ref("JoinLikeClauseGrammar"), "FINAL", Ref("JoinClauseSegment"), ), optional=True, ), Ref.keyword("FINAL", optional=True), # https://cloud.google.com/bigquery/docs/reference/standard-sql/arrays#flattening_arrays Sequence("WITH", "OFFSET", Ref("AliasExpressionSegment"), optional=True), Ref("SamplingExpressionSegment", optional=True), Ref("PostTableExpressionGrammar", optional=True), ) class TableEngineFunctionSegment(BaseSegment): """A ClickHouse `ENGINE` clause function. With this segment we attempt to match all possible engines. """ type = "table_engine_function" match_grammar: Matchable = Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), # Engine functions may omit brackets. optional=True, parse_mode=ParseMode.GREEDY, ), ), ) class OnClusterClauseSegment(BaseSegment): """A `ON CLUSTER` clause.""" type = "on_cluster_clause" match_grammar = Sequence( "ON", "CLUSTER", Ref("SingleIdentifierGrammar"), ) class TableEngineSegment(BaseSegment): """An `ENGINE` used in `CREATE TABLE`.""" type = "engine" match_grammar = Sequence( "ENGINE", Ref("EqualsSegment"), Sequence( Ref("TableEngineFunctionSegment"), AnySetOf( Sequence( "ORDER", "BY", OneOf( Ref("BracketedColumnReferenceListGrammar"), Ref("ColumnReferenceSegment"), ), ), Sequence( "PARTITION", "BY", Ref("ExpressionSegment"), ), Sequence( "PRIMARY", "KEY", Ref("ExpressionSegment"), ), Sequence( "SAMPLE", "BY", Ref("ExpressionSegment"), ), Sequence( "SETTINGS", Delimited( Sequence( Ref("NakedIdentifierSegment"), Ref("EqualsSegment"), OneOf( Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), ), ), ), ), ) class DatabaseEngineFunctionSegment(BaseSegment): """A ClickHouse `ENGINE` clause function. With this segment we attempt to match all possible engines. """ type = "engine_function" match_grammar: Matchable = Sequence( Sequence( OneOf( "ATOMIC", "MYSQL", "MATERIALIZEDMYSQL", "LAZY", "POSTGRESQL", "MATERIALIZEDPOSTGRESQL", "REPLICATED", "SQLITE", ), Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), # Engine functions may omit brackets. optional=True, parse_mode=ParseMode.GREEDY, ), ), ) class DatabaseEngineSegment(BaseSegment): """An `ENGINE` used in `CREATE TABLE`.""" type = "database_engine" match_grammar = Sequence( "ENGINE", Ref("EqualsSegment"), Sequence( Ref("DatabaseEngineFunctionSegment"), AnySetOf( Sequence( "ORDER", "BY", OneOf( Ref("BracketedColumnReferenceListGrammar"), Ref("ColumnReferenceSegment"), ), optional=True, ), Sequence( "PARTITION", "BY", Ref("ExpressionSegment"), optional=True, ), Sequence( "PRIMARY", "KEY", Ref("ExpressionSegment"), optional=True, ), Sequence( "SAMPLE", "BY", Ref("ExpressionSegment"), optional=True, ), Sequence( "SETTINGS", Delimited( AnyNumberOf( Sequence( Ref("NakedIdentifierSegment"), Ref("EqualsSegment"), OneOf( Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), ) ), optional=True, ), ), ), ) class ColumnTTLSegment(BaseSegment): """A TTL clause for columns as used in CREATE TABLE. Specified in https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-column-ttl """ type = "column_ttl_segment" match_grammar = Sequence( "TTL", Ref("ExpressionSegment"), ) class TableTTLSegment(BaseSegment): """A TTL clause for tables as used in CREATE TABLE. Specified in https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl """ type = "table_ttl_segment" match_grammar = Sequence( "TTL", Delimited( Sequence( Ref("ExpressionSegment"), OneOf( "DELETE", Sequence( "TO", "VOLUME", Ref("QuotedLiteralSegment"), ), Sequence( "TO", "DISK", Ref("QuotedLiteralSegment"), ), optional=True, ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), ) ), ) class ColumnConstraintSegment(BaseSegment): """ClickHouse specific column constraints. As specified in https://clickhouse.com/docs/en/sql-reference/statements/create/table#constraints """ type = "column_constraint_segment" match_grammar = AnySetOf( Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( OneOf( "DEFAULT", "MATERIALIZED", "ALIAS", ), OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), ), ), Sequence( "EPHEMERAL", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), optional=True, ), ), Ref("PrimaryKeyGrammar"), Sequence( "CODEC", Ref("FunctionContentsGrammar"), optional=True, ), Ref("ColumnTTLSegment"), ), ) ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/create/database """ type = "create_database_statement" match_grammar = Sequence( "CREATE", "DATABASE", Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), AnySetOf( Ref("OnClusterClauseSegment", optional=True), Ref("DatabaseEngineSegment", optional=True), Sequence( "COMMENT", Ref("SingleIdentifierGrammar"), optional=True, ), Sequence( "SETTINGS", Delimited( Sequence( Ref("NakedIdentifierSegment"), Ref("EqualsSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), Ref("BooleanLiteralGrammar"), ), optional=True, ), ), optional=True, ), ), AnyNumberOf( "TABLE", "OVERRIDE", Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("ColumnConstraintSegment"), ), optional=True, ), optional=True, ), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/create/table/ """ type = "create_table_statement" match_grammar: Matchable = Sequence( "CREATE", OneOf( Ref("OrReplaceGrammar"), Ref.keyword("TEMPORARY"), optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), OneOf( # CREATE TABLE (...): Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("ColumnConstraintSegment"), ), ), # Column definition may be missing if using AS SELECT optional=True, ), Ref("TableEngineSegment"), # CREATE TABLE (...) AS SELECT: Sequence( "AS", Ref("SelectableGrammar"), optional=True, ), ), # CREATE TABLE AS other_table: Sequence( "AS", Ref("TableReferenceSegment"), Ref("TableEngineSegment", optional=True), ), # CREATE TABLE AS table_function(): Sequence( "AS", Ref("FunctionSegment"), ), ), AnySetOf( Sequence( "COMMENT", OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedIdentifierSegment"), ), ), Ref("TableTTLSegment"), optional=True, ), Ref("TableEndClauseSegment", optional=True), ) class CreateMaterializedViewStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW` statement. https://clickhouse.com/docs/en/sql-reference/statements/create/table/ """ type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", "MATERIALIZED", "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), OneOf( Sequence( "TO", Ref("TableReferenceSegment"), Ref("TableEngineSegment", optional=True), ), Sequence( Ref("TableEngineSegment", optional=True), Sequence("POPULATE", optional=True), ), ), "AS", Ref("SelectableGrammar"), Ref("TableEndClauseSegment", optional=True), ) class DropTableStatementSegment(ansi.DropTableStatementSegment): """A `DROP TABLE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_table_statement" match_grammar = Sequence( "DROP", Ref.keyword("TEMPORARY", optional=True), "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), Ref.keyword("SYNC", optional=True), ) class DropDatabaseStatementSegment(ansi.DropDatabaseStatementSegment): """A `DROP DATABASE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_database_statement" match_grammar = Sequence( "DROP", "DATABASE", Ref("IfExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), Ref.keyword("SYNC", optional=True), ) class DropDictionaryStatementSegment(BaseSegment): """A `DROP DICTIONARY` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_dictionary_statement" match_grammar = Sequence( "DROP", "DICTIONARY", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref.keyword("SYNC", optional=True), ) class DropUserStatementSegment(ansi.DropUserStatementSegment): """A `DROP USER` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_user_statement" match_grammar = Sequence( "DROP", "USER", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class DropRoleStatementSegment(ansi.DropRoleStatementSegment): """A `DROP ROLE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_user_statement" match_grammar = Sequence( "DROP", "ROLE", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class DropQuotaStatementSegment(BaseSegment): """A `DROP QUOTA` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_quota_statement" match_grammar = Sequence( "DROP", "QUOTA", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class DropSettingProfileStatementSegment(BaseSegment): """A `DROP setting PROFILE` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_setting_profile_statement" match_grammar = Sequence( "DROP", Delimited( Ref("NakedIdentifierSegment"), min_delimiters=0, ), "PROFILE", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class DropViewStatementSegment(ansi.DropViewStatementSegment): """A `DROP VIEW` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_view_statement" match_grammar = Sequence( "DROP", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), Ref.keyword("SYNC", optional=True), ) class DropFunctionStatementSegment(ansi.DropFunctionStatementSegment): """A `DROP FUNCTION` statement. As specified in https://clickhouse.com/docs/en/sql-reference/statements/drop/ """ type = "drop_function_statement" match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Ref("OnClusterClauseSegment", optional=True), ) class SystemMergesSegment(BaseSegment): """A `SYSTEM ... MERGES` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_merges_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "MERGES", OneOf( Sequence( "ON", "VOLUME", Ref("ObjectReferenceSegment"), ), Ref("TableReferenceSegment"), ), ) class SystemTTLMergesSegment(BaseSegment): """A `SYSTEM ... TTL MERGES` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_ttl_merges_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "TTL", "MERGES", Ref("TableReferenceSegment", optional=True), ) class SystemMovesSegment(BaseSegment): """A `SYSTEM ... MOVES` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_moves_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "MOVES", Ref("TableReferenceSegment", optional=True), ) class SystemReplicaSegment(BaseSegment): """A `SYSTEM ... REPLICA` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_replica_segment" match_grammar = OneOf( Sequence( "SYNC", "REPLICA", Ref("OnClusterClauseSegment", optional=True), Ref("TableReferenceSegment"), Sequence("STRICT", optional=True), ), Sequence( "DROP", "REPLICA", Ref("SingleIdentifierGrammar"), Sequence( "FROM", OneOf( Sequence( "DATABASE", Ref("ObjectReferenceSegment"), ), Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "ZKPATH", Ref("PathSegment"), ), ), optional=True, ), ), Sequence( "RESTART", "REPLICA", Ref("TableReferenceSegment"), ), Sequence( "RESTORE", "REPLICA", Ref("TableReferenceSegment"), Ref("OnClusterClauseSegment", optional=True), ), ) class SystemFilesystemSegment(BaseSegment): """A `SYSTEM ... FILESYSTEM` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_filesystem_segment" match_grammar = Sequence( "DROP", "FILESYSTEM", "CACHE", ) class SystemReplicatedSegment(BaseSegment): """A `SYSTEM ... REPLICATED` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_replicated_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "REPLICATED", "SENDS", Ref("TableReferenceSegment", optional=True), ) class SystemReplicationSegment(BaseSegment): """A `SYSTEM ... REPLICATION` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_replication_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "REPLICATION", "QUEUES", Ref("TableReferenceSegment", optional=True), ) class SystemFetchesSegment(BaseSegment): """A `SYSTEM ... FETCHES` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_fetches_segment" match_grammar = Sequence( OneOf( "START", "STOP", ), "FETCHES", Ref("TableReferenceSegment", optional=True), ) class SystemDistributedSegment(BaseSegment): """A `SYSTEM ... DISTRIBUTED` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_distributed_segment" match_grammar = Sequence( OneOf( Sequence( OneOf( "START", "STOP", ), "DISTRIBUTED", "SENDS", Ref("TableReferenceSegment"), ), Sequence( "FLUSH", "DISTRIBUTED", Ref("TableReferenceSegment"), ), ), # Ref("TableReferenceSegment"), ) class SystemModelSegment(BaseSegment): """A `SYSTEM ... MODEL` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_model_segment" match_grammar = Sequence( "RELOAD", OneOf( Sequence( "MODELS", Ref("OnClusterClauseSegment", optional=True), ), Sequence( "MODEL", AnySetOf( Ref("OnClusterClauseSegment", optional=True), Ref("PathSegment"), ), ), ), ) class SystemFileSegment(BaseSegment): """A `SYSTEM ... FILE` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_file_segment" match_grammar = Sequence( "SYNC", "FILE", "CACHE", ) class SystemUnfreezeSegment(BaseSegment): """A `SYSTEM ... UNFREEZE` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_unfreeze_segment" match_grammar = Sequence( "UNFREEZE", "WITH", "NAME", Ref("ObjectReferenceSegment"), ) class SystemStatementSegment(BaseSegment): """A `SYSTEM ...` statement. https://clickhouse.com/docs/en/sql-reference/statements/system """ type = "system_statement" match_grammar: Matchable = Sequence( "SYSTEM", OneOf( Ref("SystemMergesSegment"), Ref("SystemTTLMergesSegment"), Ref("SystemMovesSegment"), Ref("SystemReplicaSegment"), Ref("SystemReplicatedSegment"), Ref("SystemReplicationSegment"), Ref("SystemFetchesSegment"), Ref("SystemDistributedSegment"), Ref("SystemFileSegment"), Ref("SystemFilesystemSegment"), Ref("SystemUnfreezeSegment"), Ref("SystemModelSegment"), ), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("CreateMaterializedViewStatementSegment"), Ref("DropDictionaryStatementSegment"), Ref("DropQuotaStatementSegment"), Ref("DropSettingProfileStatementSegment"), Ref("SystemStatementSegment"), ] ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_clickhouse_keywords.py000066400000000000000000000055621451700765000257000ustar00rootroot00000000000000"""A list of ClickHouse keywords.""" UNRESERVED_KEYWORDS = [ # All keywords are unreserved. They are only treated as reserved according to # context. # See: https://clickhouse.com/docs/en/sql-reference/syntax/#keywords # This means that, for example, using `join` or `select` as table identifiers # without quotes is allowed. "ADD", "AFTER", "ALIAS", "ALL", "ALTER", "AND", "ANTI", "ANY", "ARRAY", "AS", "ASCENDING", "ASOF", "AST", "ASYNC", "ATOMIC", "ATTACH", "BETWEEN", "BOTH", "BY", "CHECK", "CLEAR", "CLUSTER", "CODEC", "COLLATE", "COLUMN", "COMMENT", "CONSTRAINT", "CREATE", "CUBE", "DATABASE", "DATABASES", "DATE", "DAY", "DEDUPLICATE", "DEFAULT", "DELAY", "DELETE", "DESC", "DESCENDING", "DESCRIBE", "DETACH", "DICTIONARIES", "DICTIONARY", "DISK", "DISTINCT", "DISTRIBUTED", "DROP", "ELSE", "END", "ENGINE", "EPHEMERAL", "EVENTS", "EXISTS", "EXPLAIN", "EXPRESSION", "EXTRACT", "FETCHES", "FILE", "FILESYSTEM", "FINAL", "FIRST", "FLUSH", "FOR", "FORMAT", "FREEZE", "FROM", "FUNCTION", "GLOBAL", "GRANULARITY", "GROUP", "HAVING", "HIERARCHICAL", "HOUR", "ID", "IF", "ILIKE", "IN", "INDEX", "INF", "INJECTIVE", "INSERT", "INTO", "IS", "IS_OBJECT_ID", "KEY", "KILL", "LAST", "LAYOUT", "LAZY", "LEADING", "LIFETIME", "LIKE", "LIMIT", "LIVE", "LOCAL", "LOGS", "MATERIALIZE", "MATERIALIZED", "MATERIALIZEDMYSQL", "MATERIALIZEDPOSTGRESQL", "MAX", "MERGES", "MIN", "MINUTE", "MODEL", "MODELS", "MODIFY", "MONTH", "MOVE", "MOVES", "MUTATION", "MYSQL", "NAN_SQL", "NO", "NOT", "NULLS", "NULL_SQL", "OFFSET", "OPTIMIZE", "OR", "OUTFILE", "OVERRIDE", "POPULATE", "POSTGRESQL", "PREWHERE", "PRIMARY", "PROFILE", "PROJECTION", "QUARTER", "QUOTA", "QUEUES", "RANGE", "RELOAD", "REMOVE", "RENAME", "REPLACE", "REPLICA", "REPLICATED", "REPLICATION", "ROLLUP", "SAMPLE", "SECOND", "SEMI", "SEND", "SENDS", "SETTINGS", "SHOW", "SOURCE", "SQLITE", "START", "STOP", "SUBSTRING", "SYNC", "SYNTAX", "SYSTEM", "TABLE", "TABLES", "TEMPORARY", "TEST", "THEN", "TIES", "TIMEOUT", "TIMESTAMP", "TO", "TOP", "TOTALS", "TRAILING", "TRIM", "TRUNCATE", "TTL", "TYPE", "UNFREEZE", "UPDATE", "USE", "UUID", "VALUES", "VIEW", "VOLUME", "WATCH", "WEEK", "WHEN", "WHERE", "WITH", "YEAR", "ZKPATH", ] sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_databricks.py000066400000000000000000000104021451700765000237140ustar00rootroot00000000000000"""The Databricks Dialect. Functionally, it is quite similar to SparkSQL, however it's much less strict on keywords. It also has some extensions. """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import BaseSegment, OneOf, Ref, Sequence from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_sparksql as sparksql from sqlfluff.dialects.dialect_databricks_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) sparksql_dialect = load_raw_dialect("sparksql") databricks_dialect = sparksql_dialect.copy_as("databricks") databricks_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) databricks_dialect.sets("unreserved_keywords").update( sparksql_dialect.sets("reserved_keywords") ) databricks_dialect.sets("unreserved_keywords").difference_update(RESERVED_KEYWORDS) databricks_dialect.sets("reserved_keywords").clear() databricks_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) # Object References class CatalogReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a catalog. https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html """ type = "catalog_reference" # Data Definition Statements # https://docs.databricks.com/sql/language-manual/index.html#ddl-statements class AlterCatalogStatementSegment(BaseSegment): """An `ALTER CATALOG` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-alter-catalog.html """ type = "alter_catalog_statement" match_grammar = Sequence( "ALTER", "CATALOG", Ref("CatalogReferenceSegment"), Ref.keyword("SET", optional=True), Sequence( "OWNER", "TO", Ref("SingleIdentifierGrammar"), ), ) class CreateCatalogStatementSegment(BaseSegment): """A `CREATE CATALOG` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-catalog.html """ type = "create_catalog_statement" match_grammar = Sequence( "CREATE", "CATALOG", Ref("IfNotExistsGrammar", optional=True), Ref("CatalogReferenceSegment"), Ref("CommentGrammar", optional=True), ) class DropCatalogStatementSegment(BaseSegment): """A `DROP CATALOG` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-drop-catalog.html """ type = "drop_catalog_statement" match_grammar = Sequence( "DROP", "CATALOG", Ref("IfExistsGrammar", optional=True), Ref("CatalogReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class UseCatalogStatementSegment(BaseSegment): """A `USE CATALOG` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html """ type = "use_catalog_statement" match_grammar = Sequence( "USE", "CATALOG", Ref("CatalogReferenceSegment"), ) class UseDatabaseStatementSegment(sparksql.UseDatabaseStatementSegment): """A `USE DATABASE` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-usedb.html """ type = "use_database_statement" match_grammar = Sequence( "USE", OneOf("DATABASE", "SCHEMA", optional=True), Ref("DatabaseReferenceSegment"), ) class SetTimeZoneStatementSegment(BaseSegment): """A `SET TIME ZONE` statement. https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html """ type = "set_timezone_statement" match_grammar = Sequence( "SET", "TIME", "ZONE", OneOf("LOCAL", Ref("QuotedLiteralSegment"), Ref("IntervalExpressionSegment")), ) class StatementSegment(sparksql.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = sparksql.StatementSegment.match_grammar.copy( # Segments defined in Databricks SQL dialect insert=[ # Unity Catalog Ref("AlterCatalogStatementSegment"), Ref("CreateCatalogStatementSegment"), Ref("DropCatalogStatementSegment"), Ref("UseCatalogStatementSegment"), Ref("SetTimeZoneStatementSegment"), ] ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_databricks_keywords.py000066400000000000000000000006431451700765000256510ustar00rootroot00000000000000"""A list of databricks reserved keywords. https://docs.databricks.com/sql/language-manual/sql-ref-reserved-words.html """ RESERVED_KEYWORDS = [ "ANTI", "CROSS", "EXCEPT", "FULL", "INNER", "INTERSECT", "JOIN", "LATERAL", "LEFT", "MINUS", "NATURAL", "ON", "RIGHT", "SEMI", "UNION", "USING", ] UNRESERVED_KEYWORDS = [ # Unity Catalog "CATALOG" ] sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_db2.py000066400000000000000000000053641451700765000222670ustar00rootroot00000000000000"""The Db2 dialect. https://www.ibm.com/docs/en/i/7.4?topic=overview-db2-i """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, Bracketed, CodeSegment, CommentSegment, IdentifierSegment, OneOf, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, WordSegment, ) from sqlfluff.dialects.dialect_db2_keywords import UNRESERVED_KEYWORDS ansi_dialect = load_raw_dialect("ansi") db2_dialect = ansi_dialect.copy_as("db2") db2_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) db2_dialect.replace( # Db2 allows # in field names, and doesn't use it as a comment NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z0-9_#]*[A-Z#][A-Z0-9_#]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), PostFunctionGrammar=OneOf( Ref("OverClauseSegment"), Ref("WithinGroupClauseSegment"), ), Expression_C_Grammar=OneOf( Sequence("EXISTS", Bracketed(Ref("SelectableGrammar"))), # should be first priority, otherwise EXISTS() would be matched as a function Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf(Ref("TimeZoneGrammar")), ), Ref("ShorthandCastSegment"), Sequence(Ref("NumericLiteralSegment"), OneOf("DAYS", "DAY")), ), ) db2_dialect.patch_lexer_matchers( [ # Patching comments to remove hash comments RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--")}, ), # In Db2, the only escape character is ' for single quote strings RegexLexer( "single_quote", r"(?s)('')+?(?!')|('.*?(?10` This is supported by: `SELECT`, `WHERE`, `GROUP BY`, `ORDER BY`, `HAVING`, `QUALIFY` Note: it's not necessary to use `LOCAL` within `ÒRDER BY` and `QUALIFY` because the alias could be accessed directly (...but we can). """ type = "local_alias_segment" match_grammar = Sequence("LOCAL", Ref("DotSegment"), Ref("SingleIdentifierGrammar")) ############################ # SCHEMA ############################ class CreateSchemaStatementSegment(BaseSegment): """A `CREATE SCHEMA` statement. https://docs.exasol.com/sql/create_schema.htm """ type = "create_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), ) class CreateVirtualSchemaStatementSegment(BaseSegment): """A `CREATE VIRTUAL SCHEMA` statement. https://docs.exasol.com/sql/create_schema.htm """ type = "create_virtual_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", "VIRTUAL", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), "USING", Ref("ObjectReferenceSegment"), Ref.keyword("WITH", optional=True), AnyNumberOf( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("LiteralGrammar"), ) ), ) class AlterSchemaStatementSegment(BaseSegment): """A `ALTER VIRTUAL SCHEMA` statement. https://docs.exasol.com/sql/alter_schema.htm """ type = "alter_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "SCHEMA", Ref("SchemaReferenceSegment"), OneOf( Sequence( "SET", "RAW_SIZE_LIMIT", Ref("EqualsSegment"), AnyNumberOf(Ref("NumericLiteralSegment"), Ref("StarSegment")), ), Sequence("CHANGE", "OWNER", Ref("SingleIdentifierGrammar")), ), ) class AlterVirtualSchemaStatementSegment(BaseSegment): """A `ALTER VIRTUAL SCHEMA` statement. https://docs.exasol.com/sql/alter_schema.htm """ type = "alter_virtual_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "VIRTUAL", "SCHEMA", Ref("SchemaReferenceSegment"), OneOf( Sequence( "SET", AnyNumberOf( Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), Ref("LiteralGrammar"), ) ), ), Sequence( "REFRESH", Sequence( "TABLES", Delimited(Ref("TableReferenceSegment")), optional=True, ), ), Sequence("CHANGE", "OWNER", Ref("SingleIdentifierGrammar")), ), ) class DropSchemaStatementSegment(BaseSegment): """A `DROP SCHEMA` statement for EXASOL schema. https://docs.exasol.com/sql/drop_schema.htm """ type = "drop_schema_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", Ref.keyword("FORCE", optional=True), Ref.keyword("VIRTUAL", optional=True), "SCHEMA", Ref("IfExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) ############################ # VIEW ############################ class ViewReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an schema.""" type = "view_reference" class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement. https://docs.exasol.com/sql/create_view.htm """ type = "create_view_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("FORCE", optional=True), "VIEW", Ref("ViewReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("CommentClauseSegment", optional=True), ), ), optional=True, ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("CommentClauseSegment", optional=True), ) class DropViewStatementSegment(BaseSegment): """A `DROP VIEW` statement with CASCADE and RESTRICT option. https://docs.exasol.com/sql/drop_view.htm """ type = "drop_view_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("ViewReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) ############################ # TABLE ############################ class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement. https://docs.exasol.com/sql/create_table.htm """ type = "create_table_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Bracketed( Sequence( Delimited( Ref("TableContentDefinitionSegment"), ), Sequence( Ref("CommaSegment"), Ref("TableDistributionPartitionClause"), optional=True, ), ), ), # Create AS syntax: Sequence( "AS", Ref("SelectableGrammar"), Ref("WithDataClauseSegment", optional=True), ), # Create like syntax Ref("CreateTableLikeClauseSegment"), ), Ref("CommentClauseSegment", optional=True), ) class TableContentDefinitionSegment(BaseSegment): """The table content definition.""" type = "table_content_definition" match_grammar = OneOf( Ref("ColumnDefinitionSegment"), Ref("TableOutOfLineConstraintSegment"), Ref("CreateTableLikeClauseSegment"), ) class ColumnDatatypeSegment(BaseSegment): """sequence of column and datatype definition.""" type = "column_datatype_definition" match_grammar = Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), ) class BracketedArguments(ansi.BracketedArguments): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ match_grammar = Bracketed( # The brackets might be empty for some cases... Delimited(Ref("NumericLiteralSegment"), optional=True), # In exasol, some types offer on optional MAX # qualifier of BIT, BYTE or CHAR OneOf("BIT", "BYTE", "CHAR", optional=True), ) class DatatypeSegment(BaseSegment): """A data type segment. Supports all Exasol datatypes and their aliases https://docs.exasol.com/sql_references/data_types/datatypedetails.htm https://docs.exasol.com/sql_references/data_types/datatypealiases.htm . """ type = "data_type" match_grammar = OneOf( # Numeric Data Types Sequence( OneOf("DECIMAL", "DEC", "NUMBER", "NUMERIC"), Ref("BracketedArguments", optional=True), ), "BIGINT", Sequence("DOUBLE", Ref.keyword("PRECISION", optional=True)), "FLOAT", "INT", "INTEGER", "REAL", "SHORTINT", "TINYINT", "SMALLINT", OneOf("BOOLEAN", "BOOL"), OneOf( "DATE", Sequence( "TIMESTAMP", Sequence("WITH", "LOCAL", "TIME", "ZONE", optional=True) ), ), Sequence( "INTERVAL", "YEAR", Ref("BracketedArguments", optional=True), "TO", "MONTH", ), Sequence( "INTERVAL", "DAY", Ref("BracketedArguments", optional=True), "TO", "SECOND", Ref("BracketedArguments", optional=True), ), Sequence( "GEOMETRY", Ref("BracketedArguments", optional=True), ), Sequence( "HASHTYPE", Ref("BracketedArguments", optional=True), ), Sequence( OneOf( Sequence( OneOf( Sequence("CHAR", Ref.keyword("VARYING", optional=True)), "VARCHAR", "VARCHAR2", "NCHAR", "NVARCHAR", "NVARCHAR2", ), Ref("BracketedArguments", optional=True), ), Sequence("LONG", "VARCHAR"), Sequence( "CHARACTER", Sequence( OneOf(Sequence("LARGE", "OBJECT"), "VARYING", optional=True), Ref("BracketedArguments", optional=True), ), ), Sequence( "CLOB", Ref("BracketedArguments", optional=True), ), ), Ref("CharCharacterSetGrammar", optional=True), ), ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. https://docs.exasol.com/db/latest/sql_references/literals.htm """ type = "interval_expression" match_grammar = Sequence( "INTERVAL", Ref("QuotedLiteralSegment"), OneOf( # INTERVAL '5' MONTH # INTERVAL '130' MONTH (3) Sequence( "MONTH", Bracketed(Ref("NumericLiteralSegment"), optional=True), ), # INTERVAL '27' YEAR # INTERVAL '100-1' YEAR(3) TO MONTH Sequence( "YEAR", Bracketed(Ref("NumericLiteralSegment"), optional=True), Sequence("TO", "MONTH", optional=True), ), # INTERVAL '5' DAY # INTERVAL '100' HOUR(3) # INTERVAL '1.99999' SECOND(2,2) # INTERVAL '23:10:59.123' HOUR(2) TO SECOND(3) Sequence( OneOf( Sequence( OneOf("DAY", "HOUR", "MINUTE"), Bracketed(Ref("NumericLiteralSegment"), optional=True), ), Sequence( "SECOND", Bracketed( Delimited(Ref("NumericLiteralSegment")), optional=True, ), ), ), Sequence( "TO", OneOf( "HOUR", "MINUTE", Sequence( "SECOND", Bracketed(Ref("NumericLiteralSegment"), optional=True), ), ), optional=True, ), ), ), ) class ColumnDefinitionSegment(BaseSegment): """Column definition within a `CREATE / ALTER TABLE` statement.""" type = "column_definition" match_grammar = Sequence( Ref("ColumnDatatypeSegment"), Ref("ColumnConstraintSegment", optional=True), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more.""" match_grammar = Sequence( OneOf( Sequence( "DEFAULT", OneOf(Ref("LiteralGrammar"), Ref("BareFunctionSegment")) ), Sequence( # IDENTITY(1000) or IDENTITY 1000 or IDENTITY "IDENTITY", OptionallyBracketed(Ref("NumericLiteralSegment"), optional=True), ), optional=True, ), Ref("TableInlineConstraintSegment", optional=True), Ref("CommentClauseSegment", optional=True), ) class TableInlineConstraintSegment(BaseSegment): """Inline table constraint for CREATE / ALTER TABLE.""" type = "table_constraint_definition" match_grammar = Sequence( Sequence( "CONSTRAINT", Ref( "SingleIdentifierGrammar", # exclude UNRESERVED_KEYWORDS which could used as NakedIdentifier # to make e.g. `id NUMBER CONSTRAINT PRIMARY KEY` work (which is equal # to just `id NUMBER PRIMARY KEY`) exclude=OneOf("NOT", "NULL", "PRIMARY", "FOREIGN"), optional=True, ), optional=True, ), OneOf( # (NOT) NULL Sequence(Ref.keyword("NOT", optional=True), "NULL"), # PRIMARY KEY Ref("PrimaryKeyGrammar"), # FOREIGN KEY Ref("ForeignKeyReferencesClauseGrammar"), ), Ref("TableConstraintEnableDisableGrammar", optional=True), ) class TableOutOfLineConstraintSegment(BaseSegment): """Out of line table constraint for CREATE / ALTER TABLE.""" type = "table_constraint_definition" match_grammar = Sequence( Sequence( "CONSTRAINT", Ref( "SingleIdentifierGrammar", # exclude UNRESERVED_KEYWORDS which could used as NakedIdentifier # to make e.g. `id NUMBER, CONSTRAINT PRIMARY KEY(id)` work (which is # equal to just `id NUMBER, PRIMARY KEY(id)`) exclude=OneOf("NOT", "NULL", "PRIMARY", "FOREIGN"), optional=True, ), optional=True, ), OneOf( # PRIMARY KEY Sequence( Ref("PrimaryKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), ), # FOREIGN KEY Sequence( Ref("ForeignKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), Ref("ForeignKeyReferencesClauseGrammar"), ), ), Ref("TableConstraintEnableDisableGrammar", optional=True), ) class CreateTableLikeClauseSegment(BaseSegment): """`CREATE TABLE` LIKE clause.""" type = "table_like_clause" match_grammar = Sequence( "LIKE", Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), optional=True, ), Sequence(OneOf("INCLUDING", "EXCLUDING"), "DEFAULTS", optional=True), Sequence(OneOf("INCLUDING", "EXCLUDING"), "IDENTITY", optional=True), Sequence(OneOf("INCLUDING", "EXCLUDING"), "COMMENTS", optional=True), ) class TableDistributionPartitionClause(BaseSegment): """`CREATE / ALTER TABLE` distribution / partition clause. DISTRIBUTE/PARTITION clause doesn't except the identifiers in brackets """ type = "table_distribution_partition_clause" match_grammar = OneOf( Sequence( Ref("TableDistributeByGrammar"), Ref("CommaSegment", optional=True), Ref("TablePartitionByGrammar", optional=True), ), Sequence( Ref("TablePartitionByGrammar"), Ref("CommaSegment", optional=True), Ref("TableDistributeByGrammar", optional=True), ), ) class AlterTableStatementSegment(BaseSegment): """`ALTER TABLE` statement.""" type = "alter_table_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = OneOf( Ref("AlterTableColumnSegment"), Ref("AlterTableConstraintSegment"), Ref("AlterTableDistributePartitionSegment"), ) class AlterTableColumnSegment(BaseSegment): """A `ALTER TABLE` statement to add, modify, drop or rename columns. https://docs.exasol.com/sql/alter_table(column).htm """ type = "alter_table_column_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( Ref("AlterTableAddColumnSegment"), Ref("AlterTableDropColumnSegment"), Ref("AlterTableModifyColumnSegment"), Ref("AlterTableRenameColumnSegment"), Ref("AlterTableAlterColumnSegment"), ), ) class AlterTableAddColumnSegment(BaseSegment): """ALTER TABLE ADD..""" type = "alter_table_add_column" match_grammar = Sequence( "ADD", Ref.keyword("COLUMN", optional=True), Ref("IfNotExistsGrammar", optional=True), OptionallyBracketed(Ref("ColumnDefinitionSegment")), ) class AlterTableDropColumnSegment(BaseSegment): """ALTER TABLE DROP..""" type = "alter_table_drop_column" match_grammar = Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), Sequence("CASCADE", "CONSTRAINTS", optional=True), ) class AlterTableModifyColumnSegment(BaseSegment): """ALTER TABLE MODIFY..""" type = "alter_table_modify_column" match_grammar = Sequence( "MODIFY", Ref.keyword("COLUMN", optional=True), OptionallyBracketed( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment", optional=True), Ref("ColumnConstraintSegment", optional=True), ), ) class AlterTableRenameColumnSegment(BaseSegment): """ALTER TABLE RENAME..""" type = "alter_table_rename_column" match_grammar = Sequence( "RENAME", "COLUMN", Ref("SingleIdentifierGrammar"), "TO", Ref("SingleIdentifierGrammar"), ) class AlterTableAlterColumnSegment(BaseSegment): """ALTER TABLE ALTER..""" type = "alter_table_alter_column" match_grammar = Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("SingleIdentifierGrammar"), OneOf( Sequence( "SET", OneOf( Sequence( # IDENTITY(1000) or IDENTITY 1000 "IDENTITY", OptionallyBracketed(Ref("NumericLiteralSegment")), ), Sequence( "DEFAULT", OneOf(Ref("LiteralGrammar"), Ref("BareFunctionSegment")), ), ), ), Sequence("DROP", OneOf("IDENTITY", "DEFAULT")), ), ) class AlterTableConstraintSegment(BaseSegment): """A `ALTER TABLE` statement to add, modify, drop or rename constraints. https://docs.exasol.com/sql/alter_table(constraints).htm """ type = "alter_table_constraint_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( Sequence("ADD", Ref("TableOutOfLineConstraintSegment")), Sequence( "MODIFY", OneOf( Sequence("CONSTRAINT", Ref("SingleIdentifierGrammar")), Ref("PrimaryKeyGrammar"), ), Ref("TableConstraintEnableDisableGrammar"), ), Sequence( "DROP", OneOf( Sequence( "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ), Ref("PrimaryKeyGrammar"), ), ), Sequence( "RENAME", "CONSTRAINT", Ref("SingleIdentifierGrammar"), "TO", Ref("SingleIdentifierGrammar"), ), ), ) class AlterTableDistributePartitionSegment(BaseSegment): """A `ALTER TABLE` statement to add or drop distribution / partition keys. https://docs.exasol.com/sql/alter_table(distribution_partitioning).htm """ type = "alter_table_distribute_partition_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( Ref("TableDistributionPartitionClause"), Sequence( "DROP", OneOf( Sequence( Ref.keyword("DISTRIBUTION"), Ref.keyword("AND", optional=True), Ref.keyword("PARTITION", optional=True), ), Sequence( Ref.keyword("PARTITION"), Ref.keyword("AND", optional=True), Ref.keyword("DISTRIBUTION", optional=True), ), ), "KEYS", ), ), ) class DropTableStatementSegment(BaseSegment): """A `DROP` table statement. https://docs.exasol.com/sql/drop_table.htm """ type = "drop_table_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), Sequence("CASCADE", "CONSTRAINTS", optional=True), ) class CommentClauseSegment(BaseSegment): """A comment clause within `CREATE TABLE` / `CREATE VIEW` statements. e.g. COMMENT IS 'view/table/column description' """ type = "comment_clause" match_grammar = Sequence("COMMENT", "IS", Ref("QuotedLiteralSegment")) ############################ # RENAME ############################ class RenameStatementSegment(BaseSegment): """`RENAME` statement. https://docs.exasol.com/sql/rename.htm """ type = "rename_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "RENAME", OneOf( "SCHEMA", "TABLE", "VIEW", "FUNCTION", "SCRIPT", "USER", "ROLE", "CONNECTION", Sequence("CONSUMER", "GROUP"), optional=True, ), Ref("ObjectReferenceSegment"), "TO", Ref("ObjectReferenceSegment"), ) ############################ # COMMENT ############################ class CommentStatementSegment(BaseSegment): """`COMMENT` statement. https://docs.exasol.com/sql/comment.htm """ type = "comment_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "COMMENT", "ON", OneOf( Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Sequence("IS", Ref("QuotedLiteralSegment"), optional=True), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), "IS", Ref("QuotedLiteralSegment"), ), ), optional=True, ), ), Sequence( OneOf( "COLUMN", "SCHEMA", "FUNCTION", "SCRIPT", "USER", "ROLE", "CONNECTION", Sequence("CONSUMER", "GROUP"), ), Ref("ObjectReferenceSegment"), "IS", Ref("QuotedLiteralSegment"), ), ), ) ############################ # INSERT ############################ class InsertStatementSegment(BaseSegment): """A `INSERT` statement.""" type = "insert_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "INSERT", Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), AnyNumberOf( Ref("ValuesInsertClauseSegment"), Ref("ValuesRangeClauseSegment"), Sequence("DEFAULT", "VALUES"), Ref("SelectableGrammar"), Ref("BracketedColumnReferenceListGrammar", optional=True), ), ) class ValuesInsertClauseSegment(BaseSegment): """A `VALUES` clause like in `INSERT`.""" type = "values_insert_clause" match_grammar = Sequence( "VALUES", Delimited( Bracketed( Delimited( Ref("LiteralGrammar"), Ref("IntervalExpressionSegment"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), "DEFAULT", Ref("SelectableGrammar"), ), parse_mode=ParseMode.GREEDY, ), ), ) ############################ # UPDATE ############################ class UpdateStatementSegment(BaseSegment): """A `Update` statement. UPDATE
SET [ WHERE ] https://docs.exasol.com/sql/update.htm """ type = "update_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "UPDATE", OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")), Ref("SetClauseListSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("PreferringClauseSegment", optional=True), ) class SetClauseListSegment(BaseSegment): """Overwritten from ANSI.""" type = "set_clause_list" match_grammar = Sequence( "SET", Indent, Delimited( Ref("SetClauseSegment"), terminators=["FROM"], ), Dedent, ) class SetClauseSegment(BaseSegment): """Overwritten from ANSI.""" type = "set_clause" match_grammar = Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), OneOf( Ref("ExpressionSegment"), # Maybe add this to ANSI to match math x=x+1 Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), "NULL", "DEFAULT", ), ) ############################ # MERGE ############################ class MergeMatchSegment(BaseSegment): """Contains dialect specific merge operations.""" type = "merge_match" match_grammar = OneOf( Sequence( Ref("MergeMatchedClauseSegment"), Ref("MergeNotMatchedClauseSegment", optional=True), ), Sequence( Ref("MergeNotMatchedClauseSegment"), Ref("MergeMatchedClauseSegment", optional=True), ), ) class MergeMatchedClauseSegment(BaseSegment): """The `WHEN MATCHED` clause within a `MERGE` statement.""" type = "merge_when_matched_clause" match_grammar = Sequence( "WHEN", "MATCHED", "THEN", OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), ) class MergeNotMatchedClauseSegment(BaseSegment): """The `WHEN NOT MATCHED` clause within a `MERGE` statement.""" type = "merge_when_not_matched_clause" match_grammar = Sequence( "WHEN", "NOT", "MATCHED", "THEN", Ref("MergeInsertClauseSegment"), ) class MergeUpdateClauseSegment(BaseSegment): """`UPDATE` clause within the `MERGE` statement.""" type = "merge_update_clause" match_grammar = Sequence( "UPDATE", Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), ) class MergeDeleteClauseSegment(BaseSegment): """`DELETE` clause within the `MERGE` statement.""" type = "merge_delete_clause" match_grammar = Sequence( "DELETE", Ref("WhereClauseSegment", optional=True), ) class MergeInsertClauseSegment(BaseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar = Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, Ref("ValuesClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), ) ############################ # DELETE ############################ class DeleteStatementSegment(BaseSegment): """`DELETE` statement. https://docs.exasol.com/sql/delete.htm """ type = "delete_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "DELETE", Ref("StarSegment", optional=True), "FROM", OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")), Ref("WhereClauseSegment", optional=True), Ref("PreferringClauseSegment", optional=True), ) ############################ # TRUNCATE ############################ class TruncateStatementSegment(BaseSegment): """`TRUNCATE TABLE` statement. https://docs.exasol.com/sql/truncate.htm """ type = "truncate_table" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "TRUNCATE", "TABLE", Ref("TableReferenceSegment"), ) ############################ # IMPORT ############################ class ImportStatementSegment(BaseSegment): """`IMPORT` statement. https://docs.exasol.com/sql/import.htm """ type = "import_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "IMPORT", Sequence( "INTO", OneOf( Sequence( Ref("TableReferenceSegment"), Bracketed( Ref("SingleIdentifierListSegment"), optional=True, ), ), Bracketed( Delimited(Ref("ImportColumnsSegment")), ), ), optional=True, ), Ref("ImportFromClauseSegment"), ) class ExportStatementSegment(BaseSegment): """`EXPORT` statement. https://docs.exasol.com/sql/export.htm """ type = "export_statement" is_ddl = False is_dml = True is_dql = False is_dcl = False match_grammar = Sequence( "EXPORT", OneOf( Sequence( Ref("TableReferenceSegment"), Bracketed( Ref("SingleIdentifierListSegment"), optional=True, ), ), Bracketed( Ref("SelectableGrammar"), ), ), Ref("ExportIntoClauseSegment"), ) class ExportIntoClauseSegment(BaseSegment): """EXPORT INTO CLAUSE.""" type = "export_into_clause" match_grammar = Sequence( "INTO", OneOf( Sequence( OneOf( Ref("ImportFromExportIntoDbSrcSegment"), Ref("ImportFromExportIntoFileSegment"), ), Ref("RejectClauseSegment", optional=True), ), Ref("ImportFromExportIntoScriptSegment"), ), ) class ImportColumnsSegment(BaseSegment): """IMPORT COLUMNS.""" type = "import_columns" match_grammar = Sequence( OneOf( Ref("ColumnDatatypeSegment"), Ref("CreateTableLikeClauseSegment"), ) ) class ImportFromClauseSegment(BaseSegment): """IMPORT FROM CLAUSE.""" type = "import_from_clause" match_grammar = Sequence( "FROM", OneOf( Sequence( OneOf( Ref("ImportFromExportIntoDbSrcSegment"), Ref("ImportFromExportIntoFileSegment"), ), Ref("ImportErrorsClauseSegment", optional=True), ), Ref("ImportFromExportIntoScriptSegment"), ), ) class ImportFromExportIntoDbSrcSegment(BaseSegment): """`IMPORT` from or `EXPORT` to a external database source (EXA,ORA,JDBC).""" type = "import_export_dbsrc" match_grammar = Sequence( OneOf( "EXA", "ORA", Sequence( "JDBC", Sequence( "DRIVER", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), Sequence("AT", Ref("ConnectionDefinition")), OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Bracketed( Ref("SingleIdentifierListSegment"), optional=True, ), Sequence( # EXPORT only AnyNumberOf( OneOf("REPLACE", "TRUNCATE"), Sequence( "CREATED", "BY", Ref("QuotedLiteralSegment"), ), max_times=2, ), optional=True, ), ), AnyNumberOf( Sequence( "STATEMENT", Ref("QuotedLiteralSegment"), ), min_times=1, ), ), ) class ImportFromExportIntoFileSegment(BaseSegment): """`IMPORT` from or `EXPORT` to a file source (FBV,CSV).""" type = "import_file" match_grammar = Sequence( OneOf( Sequence( OneOf( "CSV", "FBV", ), AnyNumberOf( Sequence( "AT", Ref("ConnectionDefinition"), ), AnyNumberOf( "FILE", Ref("QuotedLiteralSegment"), min_times=1, ), min_times=1, ), ), Sequence( "LOCAL", Ref.keyword("SECURE", optional=True), OneOf( "CSV", "FBV", ), AnyNumberOf( "FILE", Ref("QuotedLiteralSegment"), min_times=1, ), ), ), OneOf( Ref("CSVColumnDefinitionSegment"), Ref("FBVColumnDefinitionSegment"), optional=True, ), Ref("FileOptionSegment", optional=True), ) class ImportFromExportIntoScriptSegment(BaseSegment): """`IMPORT` from / `EXPORT` to a executed database script.""" type = "import_script" match_grammar = Sequence( "SCRIPT", Ref("ObjectReferenceSegment"), Sequence("AT", Ref("ConnectionDefinition"), optional=True), Sequence( "WITH", AnyNumberOf( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), min_times=1, ), optional=True, ), ) class ImportErrorsClauseSegment(BaseSegment): """`ERRORS` clause.""" type = "import_errors_clause" match_grammar = Sequence( "ERRORS", "INTO", Ref("ImportErrorDestinationSegment"), Bracketed( Ref("ExpressionSegment"), # maybe wrong implementation? optional=True, ), OneOf( "REPLACE", "TRUNCATE", optional=True, ), Ref("RejectClauseSegment", optional=True), ) class ImportErrorDestinationSegment(BaseSegment): """Error destination (csv file or table).""" type = "import_error_destination" match_grammar = OneOf( Sequence( "CSV", Sequence("AT", Ref("ConnectionDefinition")), "FILE", Ref("QuotedLiteralSegment"), ), Sequence( "LOCAL", Ref.keyword("SECURE", optional=True), "CSV", "FILE", Ref("QuotedLiteralSegment"), ), Sequence( Ref("TableReferenceSegment"), ), ) class RejectClauseSegment(BaseSegment): """`REJECT` clause within an import / export statement.""" type = "reject_clause" match_grammar = Sequence( "REJECT", "LIMIT", OneOf( Ref("NumericLiteralSegment"), "UNLIMITED", ), Ref.keyword("ERRORS", optional=True), ) class CSVColumnDefinitionSegment(BaseSegment): """Definition of csv columns within an `IMPORT` / `EXPORT` statement.""" type = "csv_cols" match_grammar = Bracketed( Delimited( Sequence( OneOf( Ref("NumericLiteralSegment"), Sequence( # Expression 1..3, for col 1, 2 and 3 Ref("NumericLiteralSegment"), Ref("RangeOperator"), Ref("NumericLiteralSegment"), ), ), Sequence( "FORMAT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( # EXPORT only "DELIMIT", Ref("EqualsSegment"), OneOf("ALWAYS", "NEVER", "AUTO"), optional=True, ), ), ) ) class FBVColumnDefinitionSegment(BaseSegment): """Definition of fbv columns within an `IMPORT` / `EXPORT` statement.""" type = "fbv_cols" match_grammar = Bracketed( Delimited( AnyNumberOf( # IMPORT valid: SIZE ,START, FORMAT, PADDING, ALIGN # EXPORT valid: SIZE, FORMAT, ALIGN, PADDING Sequence( OneOf("SIZE", "START"), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( OneOf("FORMAT", "PADDING"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ALIGN", Ref("EqualsSegment"), OneOf("LEFT", "RIGHT"), ), ), ) ) class FileOptionSegment(BaseSegment): """File options.""" type = "file_opts" match_grammar = AnyNumberOf( OneOf( # IMPORT valid: ENCODING, NULL, ROW SEPARATOR, COLUMN SEPARATOR / DELIMITER # TRIM, LTRIM, RTRIM, SKIP, ROW SIZE # EXPORT valid: REPLACE, TRUNCATE, ENCODING, NULL, BOOLEAN, ROW SEPARATOR # COLUMN SEPARATOR / DELIMITER, DELIMIT, WITH COLUMN NAMES "ENCODING", "NULL", "BOOLEAN", Sequence("ROW", "SEPARATOR"), Sequence( "COLUMN", OneOf("SEPARATOR", "DELIMITER"), ), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), OneOf("TRIM", "LTRIM", "RTRIM"), Sequence( OneOf( "SKIP", Sequence("ROW", "SIZE"), ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), "REPLACE", "TRUNCATE", Sequence( "WITH", "COLUMN", "NAMES", ), Sequence( # EXPORT only "DELIMIT", Ref("EqualsSegment"), OneOf("ALWAYS", "NEVER", "AUTO"), ), ) ############################ # USER ############################ class CreateUserStatementSegment(ansi.CreateUserStatementSegment): """`CREATE USER` statement. https://docs.exasol.com/sql/create_user.htm """ is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "CREATE", "USER", Ref("RoleReferenceSegment"), "IDENTIFIED", OneOf( Ref("UserPasswordAuthSegment"), Ref("UserKerberosAuthSegment"), Ref("UserLDAPAuthSegment"), Ref("UserOpenIDAuthSegment"), ), ) class AlterUserStatementSegment(BaseSegment): """`ALTER USER` statement. https://docs.exasol.com/sql/alter_user.htm """ type = "alter_user_statement" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "ALTER", "USER", Ref("RoleReferenceSegment"), OneOf( Sequence( "IDENTIFIED", OneOf( Sequence( Ref("UserPasswordAuthSegment"), Sequence( "REPLACE", Ref("PasswordLiteralSegment"), optional=True, ), ), Ref("UserLDAPAuthSegment"), Ref("UserKerberosAuthSegment"), Ref("UserOpenIDAuthSegment"), ), ), Sequence( "PASSWORD_EXPIRY_POLICY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence("PASSWORD", "EXPIRE"), Sequence("RESET", "FAILED", "LOGIN", "ATTEMPTS"), Sequence( "SET", "CONSUMER_GROUP", Ref("EqualsSegment"), OneOf(Ref("SingleIdentifierGrammar"), "NULL"), ), ), ) class UserPasswordAuthSegment(BaseSegment): """user password authentication.""" type = "password_auth" match_grammar = Sequence( # password "BY", Ref("PasswordLiteralSegment"), ) class UserKerberosAuthSegment(BaseSegment): """user kerberos authentication.""" type = "kerberos_auth" match_grammar = Sequence( "BY", "KERBEROS", "PRINCIPAL", Ref("QuotedLiteralSegment"), ) class UserLDAPAuthSegment(BaseSegment): """user ldap authentication.""" type = "ldap_auth" match_grammar = Sequence( "AT", "LDAP", "AS", Ref("QuotedLiteralSegment"), Ref.keyword("FORCE", optional=True), ) class UserOpenIDAuthSegment(BaseSegment): """User OpenID authentication.""" type = "openid_auth" match_grammar = Sequence( "BY", "OPENID", "SUBJECT", Ref("QuotedLiteralSegment"), ) class DropUserStatementSegment(ansi.DropUserStatementSegment): """A `DROP USER` statement with CASCADE option. https://docs.exasol.com/sql/drop_user.htm """ is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "DROP", "USER", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), Ref.keyword("CASCADE", optional=True), ) ############################ # CONSUMER GROUP ############################ class CreateConsumerGroupSegment(BaseSegment): """`CREATE CONSUMER GROUP` statement.""" type = "create_consumer_group_statement" match_grammar = Sequence( "CREATE", "CONSUMER", "GROUP", Ref("SingleIdentifierGrammar"), "WITH", Delimited(Ref("ConsumerGroupParameterSegment")), ) class AlterConsumerGroupSegment(BaseSegment): """`ALTER CONSUMER GROUP` statement.""" type = "alter_consumer_group_statement" match_grammar = Sequence( "ALTER", "CONSUMER", "GROUP", Ref("SingleIdentifierGrammar"), "SET", Delimited(Ref("ConsumerGroupParameterSegment")), ) class ConsumerGroupParameterSegment(BaseSegment): """Consumer Group Parameters.""" type = "consumer_group_parameter" match_grammar = Sequence( OneOf( "CPU_WEIGHT", "PRECEDENCE", "GROUP_TEMP_DB_RAM_LIMIT", "USER_TEMP_DB_RAM_LIMIT", "SESSION_TEMP_DB_RAM_LIMIT", "QUERY_TIMEOUT", "IDLE_TIMEOUT", ), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")), ) class DropConsumerGroupSegment(BaseSegment): """A `DROP CONSUMER GROUP` statement. https://docs.exasol.com/sql/consumer_group.htm """ type = "drop_consumer_group_statement" match_grammar = Sequence( "DROP", Sequence("CONSUMER", "GROUP"), Ref("SingleIdentifierGrammar") ) ############################ # ROLE ############################ class CreateRoleStatementSegment(ansi.CreateRoleStatementSegment): """`CREATE ROLE` statement. https://docs.exasol.com/sql/create_role.htm """ is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "CREATE", "ROLE", Ref("RoleReferenceSegment"), ) class AlterRoleStatementSegment(BaseSegment): """`ALTER ROLE` statement. Only allowed to alter CONSUMER GROUPs """ type = "alter_role_statement" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "ALTER", "ROLE", Ref("RoleReferenceSegment"), "SET", Sequence( "CONSUMER_GROUP", Ref("EqualsSegment"), OneOf(Ref("SingleIdentifierGrammar"), "NULL"), ), ) class DropRoleStatementSegment(ansi.DropRoleStatementSegment): """A `DROP ROLE` statement with CASCADE option. https://docs.exasol.com/sql/drop_role.htm """ is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "DROP", "ROLE", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), Ref.keyword("CASCADE", optional=True), ) ############################ # CONNECTION ############################ class CreateConnectionSegment(BaseSegment): """`CREATE CONNECTION` statement. https://docs.exasol.com/sql/create_connection.htm """ type = "create_connection" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "CONNECTION", Ref("NakedIdentifierSegment"), "TO", Ref("ConnectionDefinition"), ) class AlterConnectionSegment(BaseSegment): """`ALTER CONNECTION` statement. https://docs.exasol.com/sql/alter_connection.htm """ type = "alter_connection" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "ALTER", "CONNECTION", Ref("NakedIdentifierSegment"), "TO", Ref("ConnectionDefinition"), ) class ConnectionDefinition(BaseSegment): """Definition of a connection.""" type = "connection_definition" match_grammar = Sequence( OneOf( # string or identifier Ref("SingleIdentifierGrammar"), Ref("QuotedLiteralSegment"), ), Sequence( "USER", Ref("QuotedLiteralSegment"), "IDENTIFIED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), ) class DropConnectionStatementSegment(BaseSegment): """A `DROP CONNECTION` statement. https://docs.exasol.com/sql/drop_connection.htm """ type = "drop_connection_statement" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( "DROP", "CONNECTION", Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ) ############################ # GRANT / REVOKE ############################ class AccessStatementSegment(BaseSegment): """`GRANT` / `REVOKE` statement. https://docs.exasol.com/sql/grant.htm https://docs.exasol.com/sql/revoke.htm """ type = "access_statement" is_ddl = False is_dml = False is_dql = False is_dcl = True match_grammar = Sequence( OneOf("GRANT", "REVOKE"), OneOf( Ref("GrantRevokeSystemPrivilegesSegment"), Ref("GrantRevokeObjectPrivilegesSegment"), Ref("GrantRevokeRolesSegment"), Ref("GrantRevokeImpersonationSegment"), Ref("GrantRevokeConnectionSegment"), Ref("GrantRevokeConnectionRestrictedSegment"), ), ) class GrantRevokeSystemPrivilegesSegment(BaseSegment): """`GRANT` / `REVOKE` system privileges.""" type = "grant_revoke_system_privileges" match_grammar = Sequence( OneOf( Sequence( "ALL", Ref.keyword( "PRIVILEGES", optional=True, ), ), Delimited( Ref("SystemPrivilegesSegment"), terminators=["TO", "FROM"], ), ), OneOf("TO", "FROM"), Delimited( Ref("NakedIdentifierSegment"), ), Sequence("WITH", "ADMIN", "OPTION", optional=True), # Grant only ) class GrantRevokeObjectPrivilegesSegment(BaseSegment): """`GRANT` / `REVOKE` object privileges.""" type = "grant_revoke_object_privileges" match_grammar = Sequence( OneOf( Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), Delimited(Ref("ObjectPrivilegesSegment"), terminators=["ON"]), ), "ON", OneOf( OneOf("SCHEMA", "TABLE", "VIEW", "FUNCTION", "SCRIPT"), Sequence("ALL", Ref.keyword("OBJECTS", optional=True)), # Revoke only optional=True, ), Ref("ObjectReferenceSegment"), OneOf( Sequence( # Grant only "TO", Delimited(Ref("SingleIdentifierGrammar")), ), Sequence( # Revoke only "FROM", Delimited(Ref("SingleIdentifierGrammar")), Sequence("CASCADE", "CONSTRAINTS", optional=True), ), ), ) class GrantRevokeRolesSegment(BaseSegment): """`GRANT` / `REVOKE` roles.""" type = "grant_revoke_roles" match_grammar = Sequence( OneOf( Sequence("ALL", "ROLES"), # Revoke only Delimited(Ref("RoleReferenceSegment"), terminators=["TO", "FROM"]), ), OneOf("TO", "FROM"), Delimited(Ref("RoleReferenceSegment")), Sequence("WITH", "ADMIN", "OPTION", optional=True), # Grant only ) class GrantRevokeImpersonationSegment(BaseSegment): """`GRANT` / `REVOKE` impersonation.""" type = "grant_revoke_impersonation" match_grammar = Sequence( "IMPERSONATION", "ON", Delimited( Ref("SingleIdentifierGrammar"), terminators=["TO", "FROM"], ), OneOf("TO", "FROM"), Delimited(Ref("SingleIdentifierGrammar")), ) class GrantRevokeConnectionSegment(BaseSegment): """`GRANT` / `REVOKE` connection.""" type = "grant_revoke_connection" match_grammar = Sequence( "CONNECTION", Delimited( Ref("SingleIdentifierGrammar"), terminators=["TO", "FROM"], ), OneOf("TO", "FROM"), Delimited(Ref("SingleIdentifierGrammar")), Sequence("WITH", "ADMIN", "OPTION", optional=True), ) class GrantRevokeConnectionRestrictedSegment(BaseSegment): """`GRANT` / `REVOKE` connection restricted.""" type = "grant_revoke_connection_restricted" match_grammar = Sequence( "ACCESS", "ON", "CONNECTION", Ref("SingleIdentifierGrammar"), Sequence( "FOR", OneOf("SCRIPT", "SCHEMA", optional=True), Ref("SingleIdentifierGrammar"), ), OneOf("TO", "FROM"), Delimited(Ref("SingleIdentifierGrammar")), ) class SystemPrivilegesSegment(BaseSegment): """System privileges. https://docs.exasol.com/database_concepts/privileges/details_rights_management.htm#System_Privileges """ type = "system_privilege" match_grammar = OneOf( Sequence("GRANT", "ANY", "OBJECT", "PRIVILEGE"), Sequence("GRANT", "ANY", "PRIVILEGE"), Sequence("SET", "ANY", "CONSUMER", "GROUP"), Sequence("MANAGE", "CONSUMER", "GROUPS"), Sequence("KILL", "ANY", "SESSION"), Sequence("ALTER", "SYSTEM"), Sequence(OneOf("CREATE", "ALTER", "DROP"), "USER"), Sequence("IMPERSONATE", "ANY", "USER"), Sequence(OneOf("DROP", "GRANT"), "ANY", "ROLE"), Sequence(OneOf("ALTER", "DROP", "GRANT", "USE", "ACCESS"), "ANY", "CONNECTION"), Sequence("CREATE", Ref.keyword("VIRTUAL", optional=True), "SCHEMA"), Sequence( OneOf("ALTER", "DROP", "USE"), "ANY", Ref.keyword("VIRTUAL", optional=True), "SCHEMA", Ref.keyword("REFRESH", optional=True), ), Sequence( "CREATE", OneOf( "TABLE", "VIEW", "CONNECTION", "ROLE", "SESSION", "FUNCTION", "SCRIPT" ), ), Sequence( OneOf("CREATE", "ALTER", "DELETE", "DROP", "INSERT", "SELECT", "UPDATE"), "ANY", "TABLE", ), Sequence("SELECT", "ANY", "DICTIONARY"), Sequence(OneOf("CREATE", "DROP"), "ANY", "VIEW"), Sequence( OneOf("CREATE", "DROP", "EXECUTE"), "ANY", OneOf("SCRIPT", "FUNCTION") ), "IMPORT", "EXPORT", ) class ObjectPrivilegesSegment(BaseSegment): """Object privileges. https://docs.exasol.com/database_concepts/privileges/details_rights_management.htm#System_Privileges """ type = "object_privilege" match_grammar = OneOf( "ALTER", "SELECT", "INSERT", "UPDATE", "DELETE", "REFERENCES", "EXECUTE", # Revoke only "IMPORT", "EXPORT", ) ############################ # SKYLINE ############################ class PreferringClauseSegment(BaseSegment): """`PREFERRING` clause of the Exasol Skyline extension. https://docs.exasol.com/advanced_analytics/skyline.htm#preferring_clause """ type = "preferring_clause" match_grammar = Sequence( "PREFERRING", OptionallyBracketed(Ref("PreferringPreferenceTermSegment")), Ref("PartitionClauseSegment", optional=True), ) class PreferringPreferenceTermSegment(BaseSegment): """The preference term of a `PREFERRING` clause.""" type = "preference_term" match_grammar = Sequence( OneOf( Sequence( OneOf("HIGH", "LOW"), OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("LocalAliasSegment"), ), ), OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("LocalAliasSegment"), ), ), Ref("PreferringPlusPriorTermSegment", optional=True), ) class PreferringPlusPriorTermSegment(BaseSegment): """The preferring preference term expression.""" type = "plus_prior_inverse" match_grammar = OneOf( Sequence( Sequence( OneOf( "PLUS", Sequence("PRIOR", "TO"), ), Ref("PreferringPreferenceTermSegment"), optional=True, ), ), Sequence( "INVERSE", Ref("PreferringPreferenceTermSegment"), ), ) class MLTableExpressionSegment(ansi.MLTableExpressionSegment): """Not supported.""" match_grammar = Nothing() ############################ # SYSTEM ############################ class AlterSessionSegment(BaseSegment): """`ALTER SESSION` statement.""" type = "alter_session_statement" match_grammar = Sequence( "ALTER", "SESSION", "SET", Ref("SessionParameterSegment"), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")), ) class AlterSystemSegment(BaseSegment): """`ALTER SYSTEM` statement.""" type = "alter_system_statement" match_grammar = Sequence( "ALTER", "SYSTEM", "SET", Ref("SystemParameterSegment"), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")), ) class OpenSchemaSegment(BaseSegment): """`OPEN SCHEMA` statement.""" type = "open_schema_statement" match_grammar = Sequence("OPEN", "SCHEMA", Ref("SchemaReferenceSegment")) class CloseSchemaSegment(BaseSegment): """`CLOSE SCHEMA` statement.""" type = "close_schema_statement" match_grammar = Sequence("CLOSE", "SCHEMA") class FlushStatisticsSegment(BaseSegment): """`FLUSH STATISTICS` statement.""" type = "flush_statistics_statement" match_grammar = Sequence("FLUSH", "STATISTICS") class RecompressReorganizeSegment(BaseSegment): """`RECOMPRESS` and `REOGRANIZE` statement.""" type = "recompress_reorganize_statement" match_grammar = Sequence( OneOf("RECOMPRESS", "REORGANIZE"), OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar"), ), Sequence("TABLES", Delimited(Ref("TableReferenceSegment"))), Sequence("SCHEMA", Ref("SchemaReferenceSegment")), Sequence("SCHEMAS", Delimited(Ref("SchemaReferenceSegment"))), "DATABASE", ), Ref.keyword("ENFORCE", optional=True), ) class PreloadSegment(BaseSegment): """`PRELOAD` statement.""" type = "preload_statement" match_grammar = Sequence( "PRELOAD", OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar"), ), Sequence("TABLES", Delimited(Ref("TableReferenceSegment"))), Sequence("SCHEMA", Ref("SchemaReferenceSegment")), Sequence("SCHEMAS", Delimited(Ref("SchemaReferenceSegment"))), "DATABASE", ), ) class ImpersonateSegment(BaseSegment): """`IMPERSONATE` statement.""" type = "impersonate_statement" match_grammar = Sequence("IMPERSONATE", Ref("SingleIdentifierGrammar")) class KillSegment(BaseSegment): """`KILL` statement.""" type = "kill_statement" match_grammar = Sequence( "KILL", OneOf( Sequence("SESSION", OneOf("CURRENT_SESSION", Ref("NumericLiteralSegment"))), Sequence( "STATEMENT", Ref("NumericLiteralSegment", optional=True), "IN", "SESSION", Ref("NumericLiteralSegment"), Sequence("WITH", "MESSAGE", Ref("QuotedLiteralSegment"), optional=True), ), ), ) class TruncateAuditLogsSegment(BaseSegment): """`TRUNCATE AUDIT LOGS` statement.""" type = "truncate_audit_logs_statement" match_grammar = Sequence( "TRUNCATE", "AUDIT", "LOGS", Sequence( "KEEP", OneOf( Sequence("LAST", OneOf("DAY", "MONTH", "YEAR")), Sequence("FROM", Ref("QuotedLiteralSegment")), ), optional=True, ), ) ############################ # OTHERS ############################ class TransactionStatementSegment(BaseSegment): """A `COMMIT` or `ROLLBACK` statement.""" type = "transaction_statement" match_grammar = Sequence( OneOf("COMMIT", "ROLLBACK"), Ref.keyword("WORK", optional=True) ) class ExecuteScriptSegment(BaseSegment): """`EXECUTE SCRIPT` statement.""" type = "execute_script_statement" match_grammar = Sequence( "EXECUTE", "SCRIPT", Ref("ScriptReferenceSegment"), Bracketed( Delimited(Ref.keyword("ARRAY", optional=True), Ref("ExpressionSegment")), optional=True, ), Sequence("WITH", "OUTPUT", optional=True), ) class ExplainVirtualSegment(BaseSegment): """`EXPLAIN VIRTUAL` statement.""" type = "explain_virtual_statement" match_grammar = Sequence("EXPLAIN", "VIRTUAL", Ref("SelectableGrammar")) ############################ # FUNCTION ############################ class FunctionReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a function.""" type = "function_reference" class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement.""" type = "create_function_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "FUNCTION", Ref("FunctionReferenceSegment"), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref.keyword("IN", optional=True), Ref("DatatypeSegment"), # Column type ), optional=True, ), ), "RETURN", Ref("DatatypeSegment"), OneOf("IS", "AS", optional=True), Indent, AnyNumberOf( Sequence( Ref("VariableNameSegment"), Ref("DatatypeSegment"), Ref("DelimiterGrammar"), ), optional=True, ), Dedent, "BEGIN", Indent, AnyNumberOf(Ref("FunctionBodySegment")), "RETURN", Ref("FunctionContentsExpressionGrammar"), Ref("DelimiterGrammar"), Dedent, "END", Ref("FunctionReferenceSegment", optional=True), Ref("SemicolonSegment", optional=True), ) class FunctionBodySegment(BaseSegment): """The definition of the function body.""" type = "function_body" match_grammar = Sequence( OneOf( Ref("FunctionAssignmentSegment"), Ref("FunctionIfBranchSegment"), Ref("FunctionForLoopSegment"), Ref("FunctionWhileLoopSegment"), ), ) class FunctionAssignmentSegment(BaseSegment): """The definition of a assignment within a function body.""" type = "function_assignment" match_grammar = Sequence( # assignment Ref("VariableNameSegment"), Ref("WalrusOperatorSegment"), OneOf( Ref("FunctionSegment"), Ref("VariableNameSegment"), Ref("LiteralGrammar"), Ref("ExpressionSegment"), ), Ref("SemicolonSegment"), ) class FunctionIfBranchSegment(BaseSegment): """The definition of a if branch within a function body.""" type = "function_if_branch" match_grammar = Sequence( "IF", AnyNumberOf(Ref("ExpressionSegment")), "THEN", Indent, AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), Dedent, AnyNumberOf( Sequence( OneOf("ELSIF", "ELSEIF"), Ref("ExpressionSegment"), "THEN", Indent, AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), Dedent, ), optional=True, ), Sequence( "ELSE", Indent, AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), Dedent, optional=True, ), "END", "IF", Ref("SemicolonSegment"), ) class FunctionForLoopSegment(BaseSegment): """The definition of a for loop within a function body.""" type = "function_for_loop" match_grammar = Sequence( "FOR", Ref("NakedIdentifierSegment"), OneOf( # # for x := 1 to 10 do... Sequence( Ref("WalrusOperatorSegment"), Ref("ExpressionSegment"), # could be a variable "TO", Ref("ExpressionSegment"), # could be a variable "DO", AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), "END", "FOR", ), # for x IN 1..10... Sequence( "IN", Ref("ExpressionSegment"), # could be a variable Ref("RangeOperator"), Ref("ExpressionSegment"), # could be a variable "LOOP", AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), "END", "LOOP", ), ), Ref("SemicolonSegment"), ) class FunctionWhileLoopSegment(BaseSegment): """The definition of a while loop within a function body.""" type = "function_while_loop" match_grammar = Sequence( "WHILE", Ref("ExpressionSegment"), "DO", AnyNumberOf(Ref("FunctionBodySegment"), min_times=1), "END", "WHILE", Ref("SemicolonSegment"), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement with CASCADE and RESTRICT option. https://docs.exasol.com/sql/drop_function.htm """ type = "drop_function_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("DropBehaviorGrammar", optional=True), ) ############################ # SCRIPT ############################ class ScriptReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a script.""" type = "script_reference" class ScriptContentSegment(BaseSegment): """This represents the script content. Because the script content could be written in LUA, PYTHON, JAVA or R there is no further verification. """ type = "script_content" match_grammar = Anything( terminators=[Ref("FunctionScriptTerminatorSegment")], # Within the script we should _only_ look for the script # terminator segment. reset_terminators=True, ) class CreateScriptingLuaScriptStatementSegment(BaseSegment): """`CREATE SCRIPT` statement to create a Lua scripting script. https://docs.exasol.com/sql/create_script.htm """ type = "create_scripting_lua_script" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref.keyword("LUA", optional=True), "SCRIPT", Ref("ScriptReferenceSegment"), Bracketed( Delimited( Sequence( Ref.keyword("ARRAY", optional=True), Ref("SingleIdentifierGrammar") ), optional=True, ), optional=True, ), Sequence(Ref.keyword("RETURNS"), OneOf("TABLE", "ROWCOUNT"), optional=True), "AS", Indent, Ref("ScriptContentSegment"), Dedent, ) class CreateUDFScriptStatementSegment(BaseSegment): """`CREATE SCRIPT` statement create a UDF script. https://docs.exasol.com/sql/create_script.htm """ type = "create_udf_script" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), OneOf( "JAVA", "PYTHON", "LUA", "R", Ref("SingleIdentifierGrammar"), optional=True ), OneOf("SCALAR", "SET"), "SCRIPT", Ref("ScriptReferenceSegment"), Bracketed( Sequence( Ref("UDFParameterGrammar"), Ref("OrderByClauseSegment", optional=True), optional=True, ), ), OneOf(Sequence("RETURNS", Ref("DatatypeSegment")), Ref("EmitsSegment")), "AS", Indent, Ref("ScriptContentSegment"), Dedent, ) class CreateAdapterScriptStatementSegment(BaseSegment): """`CREATE SCRIPT` statement create a adapter script. https://docs.exasol.com/sql/create_script.htm """ type = "create_adapter_script" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), OneOf("JAVA", "PYTHON", "LUA", Ref("SingleIdentifierGrammar")), "ADAPTER", "SCRIPT", Ref("ScriptReferenceSegment"), "AS", Indent, Ref("ScriptContentSegment"), Dedent, ) class DropScriptStatementSegment(BaseSegment): """A `DROP SCRIPT` statement. https://docs.exasol.com/sql/drop_script.htm """ type = "drop_script_statement" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "DROP", Sequence( Ref.keyword("ADAPTER", optional=True), "SCRIPT", ), Ref("IfExistsGrammar", optional=True), Ref("ScriptReferenceSegment"), ) ############################ # DIALECT ############################ class FunctionScriptStatementSegment(BaseSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = OneOf( Ref("CreateFunctionStatementSegment"), Ref("CreateScriptingLuaScriptStatementSegment"), Ref("CreateUDFScriptStatementSegment"), Ref("CreateAdapterScriptStatementSegment"), ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = OneOf( # Data Query Language (DQL) Ref("SelectableGrammar"), # Data Modifying Language (DML) Ref("DeleteStatementSegment"), Ref("ExportStatementSegment"), Ref("ImportStatementSegment"), Ref("InsertStatementSegment"), Ref("MergeStatementSegment"), Ref("TruncateStatementSegment"), Ref("UpdateStatementSegment"), # Data Definition Language (DDL) Ref("AlterTableStatementSegment"), Ref("AlterSchemaStatementSegment"), Ref("AlterVirtualSchemaStatementSegment"), Ref("CommentStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("CreateTableStatementSegment"), Ref("CreateViewStatementSegment"), Ref("CreateVirtualSchemaStatementSegment"), Ref("DropViewStatementSegment"), Ref("DropFunctionStatementSegment"), Ref("DropScriptStatementSegment"), Ref("DropSchemaStatementSegment"), Ref("DropTableStatementSegment"), Ref("RenameStatementSegment"), # Access Control Language (DCL) Ref("AccessStatementSegment"), Ref("AlterConnectionSegment"), Ref("AlterUserStatementSegment"), Ref("CreateConnectionSegment"), Ref("CreateRoleStatementSegment"), Ref("CreateUserStatementSegment"), Ref("DropRoleStatementSegment"), Ref("DropUserStatementSegment"), Ref("DropConnectionStatementSegment"), # System Ref("CreateConsumerGroupSegment"), Ref("AlterConsumerGroupSegment"), Ref("DropConsumerGroupSegment"), Ref("AlterRoleStatementSegment"), Ref("AlterSessionSegment"), Ref("AlterSystemSegment"), Ref("OpenSchemaSegment"), Ref("CloseSchemaSegment"), Ref("FlushStatisticsSegment"), Ref("ImpersonateSegment"), Ref("RecompressReorganizeSegment"), Ref("KillSegment"), Ref("PreloadSegment"), Ref("TruncateAuditLogsSegment"), Ref("ExplainVirtualSegment"), # Others Ref("TransactionStatementSegment"), Ref("ExecuteScriptSegment"), terminators=[Ref("DelimiterGrammar")], ) class FileSegment(BaseFileSegment): """This overwrites the FileSegment from ANSI. The reason is because SCRIPT and FUNCTION statements are terminated by a trailing / at the end. A semicolon is the terminator of the statement within the function / script """ match_grammar = Delimited( Ref("FunctionScriptStatementSegment"), Ref("StatementSegment"), delimiter=OneOf( Ref("DelimiterGrammar"), Ref("FunctionScriptTerminatorSegment"), ), allow_gaps=True, allow_trailing=True, ) class EmitsSegment(BaseSegment): """EMITS Segment for JSON_EXTRACT for example. In it's own segment to give it a type to allow AL03 to find it easily. """ type = "emits_segment" match_grammar = Sequence( "EMITS", Bracketed(Ref("UDFParameterGrammar")), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_exasol_keywords.py000066400000000000000000000364471451700765000250500ustar00rootroot00000000000000"""A list of all SQL key words.""" RESERVED_KEYWORDS = [ "ABSOLUTE", "ACTION", "ADD", "AFTER", "ALL", "ALLOCATE", "ALTER", "AND", "ANY", "APPEND", "ARE", "ARRAY", "AS", "ASC", "ASENSITIVE", "ASSERTION", "AT", "ATTRIBUTE", "AUTHID", "AUTHORIZATION", "BEFORE", "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BIT", "BLOB", "BLOCKED", "BOOL", "BOOLEAN", "BOTH", "BY", "BYTE", "CALL", "CALLED", "CARDINALITY", "CASCADE", "CASCADED", "CASE", "CASESPECIFIC", "CAST", "CATALOG", "CHAIN", "CHAR", "CHARACTER", "CHARACTERISTICS", "CHARACTER_SET_CATALOG", "CHARACTER_SET_NAME", "CHARACTER_SET_SCHEMA", "CHECK", "CHECKED", "CLOB", "CLOSE", "COALESCE", "COLLATE", "COLLATION", "COLLATION_CATALOG", "COLLATION_NAME", "COLLATION_SCHEMA", "COLUMN", "COMMIT", "CONDITION", "CONNECTION", "CONNECT_BY_ISCYCLE", "CONNECT_BY_ISLEAF", "CONNECT_BY_ROOT", "CONSTANT", "CONSTRAINT", "CONSTRAINTS", "CONSTRAINT_STATE_DEFAULT", "CONSTRUCTOR", "CONTAINS", "CONTINUE", "CONTROL", "CONVERT", "CORRESPONDING", "CREATE", "CROSS", # a unreserved keyword but needed to be reserved to make join clause work "CS", "CSV", "CUBE", "CURRENT", "CURRENT_DATE", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_SESSION", "CURRENT_STATEMENT", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", "CYCLE", "DATA", "DATALINK", "DATE", "DATETIME_INTERVAL_CODE", "DATETIME_INTERVAL_PRECISION", "DAY", "DBTIMEZONE", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFAULT_LIKE_ESCAPE_CHARACTER", "DEFERRABLE", "DEFERRED", "DEFINED", "DEFINER", "DELETE", "DEREF", "DERIVED", "DESC", "DESCRIBE", "DESCRIPTOR", "DETERMINISTIC", "DISABLE", "DISABLED", "DISCONNECT", "DISPATCH", "DISTINCT", "DLURLCOMPLETE", "DLURLPATH", "DLURLPATHONLY", "DLURLSCHEME", "DLURLSERVER", "DLVALUE", "DO", "DOMAIN", "DOUBLE", "DROP", "DYNAMIC", "DYNAMIC_FUNCTION", "DYNAMIC_FUNCTION_CODE", "EACH", "ELSE", "ELSEIF", "ELSIF", "EMITS", "ENABLE", "ENABLED", "END", "END-EXEC", "ENDIF", "ENFORCE", "EQUALS", "ERRORS", "ESCAPE", "EXCEPT", "EXCEPTION", "EXEC", "EXECUTE", "EXISTS", "EXIT", "EXPORT", "EXTERNAL", "EXTRACT", "FALSE", "FBV", "FETCH", "FILE", "FINAL", "FIRST", "FLOAT", "FOLLOWING", "FOR", "FORALL", "FORCE", "FORMAT", "FOUND", "FREE", "FROM", "FS", "FULL", "FUNCTION", "GENERAL", "GENERATED", "GEOMETRY", "GET", "GLOBAL", "GO", "GOTO", "GRANT", "GRANTED", "GROUP", "GROUPING", "GROUPS", "GROUP_CONCAT", "HASHTYPE", "HASHTYPE_FORMAT", "HAVING", "HIGH", "HOLD", "HOUR", "IDENTITY", "IF", "IFNULL", "IMMEDIATE", "IMPERSONATE", "IMPLEMENTATION", "IMPORT", "IN", "INDEX", "INDICATOR", "INNER", "INOUT", "INPUT", "INSENSITIVE", "INSERT", "INSTANCE", "INSTANTIABLE", "INT", "INTEGER", "INTEGRITY", "INTERSECT", "INTERVAL", "INTO", "INVERSE", "INVOKER", "IS", "ITERATE", "JOIN", "KEY_MEMBER", "KEY_TYPE", "LARGE", "LAST", "LATERAL", "LDAP", "LEADING", "LEAVE", "LEFT", "LEVEL", "LIKE", "LIMIT", "LISTAGG", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", "LOCATOR", "LOG", "LONGVARCHAR", "LOOP", "LOW", "MAP", "MATCH", "MATCHED", "MERGE", "METHOD", "MINUS", "MINUTE", "MOD", "MODIFIES", "MODIFY", "MODULE", "MONTH", "NAMES", "NATIONAL", "NATURAL", "NCHAR", "NCLOB", "NEW", "NEXT", "NLS_DATE_FORMAT", "NLS_DATE_LANGUAGE", "NLS_FIRST_DAY_OF_WEEK", "NLS_NUMERIC_CHARACTERS", "NLS_TIMESTAMP_FORMAT", "NO", "NOCYCLE", "NOLOGGING", "NONE", "NOT", "NULL", "NULLIF", "NUMBER", "NUMERIC", "NVARCHAR", "NVARCHAR2", "OBJECT", "OF", "OFF", "OLD", "ON", "ONLY", "OPEN", "OPTION", "OPTIONS", "OR", "ORDER", "ORDERING", "ORDINALITY", "OTHERS", "OUT", "OUTER", "OUTPUT", "OVER", "OVERLAPS", "OVERLAY", "OVERRIDING", "PAD", "PARALLEL_ENABLE", "PARAMETER", "PARAMETER_SPECIFIC_CATALOG", "PARAMETER_SPECIFIC_NAME", "PARAMETER_SPECIFIC_SCHEMA", "PARTIAL", "PARTITION", # Should really be an unreserved keyword but need for Window clauses "PATH", "PERMISSION", "PLACING", "PLUS", "POSITION", "PRECEDING", "PREFERRING", "PREPARE", "PRESERVE", "PRIOR", "PRIVILEGES", "PROCEDURE", "PROFILE", "QUALIFY", "RANDOM", "RANGE", "READ", "READS", "REAL", "RECOVERY", "RECURSIVE", "REF", "REFERENCES", "REFERENCING", "REFRESH", "REGEXP_LIKE", "RELATIVE", "RELEASE", "RENAME", "REPEAT", "REPLACE", "RESTORE", "RESTRICT", "RESULT", "RETURN", "RETURNED_LENGTH", "RETURNED_OCTET_LENGTH", "RETURNS", "REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROUTINE", "ROW", "ROWS", "ROWTYPE", "SAVEPOINT", "SCHEMA", "SCOPE", "SCOPE_USER", "SCRIPT", "SCROLL", "SEARCH", "SECOND", "SECTION", "SECURITY", "SELECT", "SELECTIVE", "SELF", "SENSITIVE", "SEPARATOR", "SEQUENCE", "SESSION", "SESSIONTIMEZONE", "SESSION_USER", "SET", "SETS", "SHORTINT", "SIMILAR", "SMALLINT", "SOME", "SOURCE", "SPACE", "SPECIFIC", "SPECIFICTYPE", "SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING", "SQL_BIGINT", "SQL_BIT", "SQL_CHAR", "SQL_DATE", "SQL_DECIMAL", "SQL_DOUBLE", "SQL_FLOAT", "SQL_INTEGER", "SQL_LONGVARCHAR", "SQL_NUMERIC", "SQL_PREPROCESSOR_SCRIPT", "SQL_REAL", "SQL_SMALLINT", "SQL_TIMESTAMP", "SQL_TINYINT", "SQL_TYPE_DATE", "SQL_TYPE_TIMESTAMP", "SQL_VARCHAR", "START", "STATE", "STATEMENT", "STATIC", "STRUCTURE", "STYLE", "SUBSTRING", "SUBTYPE", "SYSDATE", "SYSTEM", "SYSTEM_USER", "SYSTIMESTAMP", "TABLE", "TEMPORARY", "TEXT", "THEN", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TINYINT", "TO", "TRAILING", "TRANSACTION", "TRANSFORM", "TRANSFORMS", "TRANSLATION", "TREAT", "TRIGGER", "TRIM", "TRUE", "TRUNCATE", "UNDER", "UNION", "UNIQUE", "UNKNOWN", "UNLINK", "UNNEST", "UNTIL", "UPDATE", "USAGE", "USER", "USING", "VALUE", "VALUES", "VARCHAR", "VARCHAR2", "VARRAY", "VERIFY", "VIEW", "WHEN", "WHENEVER", "WHERE", "WHILE", "WINDOW", "WITH", "WITHIN", "WITHOUT", "WORK", "YEAR", "YES", "ZONE", ] UNRESERVED_KEYWORDS = [ "ABS", "ACCESS", "ACOS", "ADAPTER", "ADD_DAYS", "ADD_HOURS", "ADD_MINUTES", "ADD_MONTHS", "ADD_SECONDS", "ADD_WEEKS", "ADD_YEARS", "ADMIN", "ALIGN", "ALWAYS", "ANALYZE", "ANSI", "APPROXIMATE_COUNT_DISTINCT", "ASCII", "ASIN", "ASSIGNMENT", "ASYMMETRIC", "ATAN", "ATAN2", "ATOMIC", "ATTEMPTS", "AUDIT", "AUTHENTICATED", "AUTO", "AVG", "BACKUP", "BERNOULLI", "BIT_AND", "BIT_CHECK", "BIT_LENGTH", "BIT_LROTATE", "BIT_LSHIFT", "BIT_NOT", "BIT_OR", "BIT_RROTATE", "BIT_RSHIFT", "BIT_SET", "BIT_TO_NUM", "BIT_XOR", "BREADTH", "CEIL", "CEILING", "CHANGE", "CHARACTERS", "CHARACTER_LENGTH", "CHR", "CLEAR", "COBOL", "COLOGNE_PHONETIC", "COMMENT", "COMMENTS", "COMMITTED", "CONCAT", "CONNECT", "CONVERT_TZ", "CORR", "COS", "COSH", "COT", "COUNT", "COVAR_POP", "COVAR_SAMP", "CREATED", "CURDATE", "DATABASE", "DATE_TRUNC", "DAYS_BETWEEN", "DEBUG", "DECODE", "DEFAULTS", "DEFAULT_CONSUMER_GROUP", "DEGREES", "DELIMIT", "DELIMITER", "DENSE_RANK", "DEPTH", "DIAGNOSTICS", "DICTIONARY", "DISTRIBUTE", "DISTRIBUTION", "DIV", "DOWN", "DUMP", "EDIT_DISTANCE", "EMPTY", "ENCODING", "ERROR", "ESTIMATE", "EVALUATE", "EVERY", "EXA", "EXCLUDE", "EXCLUDING", "EXP", "EXPERIMENTAL", "EXPIRE", "EXPLAIN", "EXPRESSION", "FAILED", "FILES", "FIRST_VALUE", "FLOOR", "FLUSH", "FOREIGN", "FORTRAN", "FROM_POSIX_TIME", "GRAPH", "GREATEST", "GROUPING_ID", "HANDLER", "HAS", "HASH", "HASHTYPE_MD5", "HASHTYPE_SHA", "HASHTYPE_SHA1", "HASHTYPE_SHA256", "HASHTYPE_SHA512", "HASHTYPE_TIGER", "HASH_MD5", "HASH_SHA", "HASH_SHA1", "HASH_SHA256", "HASH_SHA512", "HASH_TIGER", "HIERARCHY", "HOURS_BETWEEN", "IDENTIFIED", "IDLE_TIMEOUT", "IGNORE", "IMPERSONATION", "INCLUDING", "INITCAP", "INITIALLY", "INSTR", "INVALID", "IPROC", "ISOLATION", "IS_BOOLEAN", "IS_DATE", "IS_DSINTERVAL", "IS_NUMBER", "IS_TIMESTAMP", "IS_YMINTERVAL", "JAVA", "JAVASCRIPT", "JSON", "JSON_EXTRACT", "JSON_VALUE", "KEEP", "KERBEROS", "KEY", "KEYS", "KILL", "LAG", "LANGUAGE", "LAST_VALUE", "LCASE", "LEAD", "LEAST", "LENGTH", "LINK", "LN", "LOCATE", "LOCK", "LOG10", "LOG2", "LOGIN", "LOGS", "LONG", "LOWER", "LPAD", "LTRIM", "LUA", "MANAGE", "MAX", "MAXIMAL", "MEDIAN", "MESSAGE", "MID", "MIN", "MINUTES_BETWEEN", "MIN_SCALE", "MONTHS_BETWEEN", "MUL", "MULTIPLE", "MUMPS", "NEVER", "NICE", "NORMALIZED", "NOTICE", "NOW", "NPROC", "NULLIFZERO", "NULLS", "NUMTODSINTERVAL", "NUMTOYMINTERVAL", "NVL", "NVL2", "OBJECTS", "OCTETS", "OCTET_LENGTH", "OFFSET", "OPENID", "OPTIMIZE", "OPTIMIZER", "ORA", "OVERFLOW", "OWNER", "PADDING", "PASCAL", "PASSWORD", "PASSWORD_EXPIRY_POLICY", "PASSWORD_SECURITY_POLICY", "PERCENTILE_CONT", "PERCENTILE_DISC", "PI", "PLI", "POSIX_TIME", "POWER", "PRECISION", "PRELOAD", "PRIMARY", "PRINCIPAL", "PRIVILEGE", "PYTHON", "QUERY", "QUERY_CACHE", "QUERY_TIMEOUT", "QUIET", "R", "RADIANS", "RAND", "RANK", "RATIO_TO_REPORT", "RAW_SIZE_LIMIT", "RECOMPRESS", "RECORD", "REGEXP_INSTR", "REGEXP_REPLACE", "REGEXP_SUBSTR", "REGR_AVGX", "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", "REJECT", "REORGANIZE", "REPEATABLE", "RESET", "RESPECT", "RETURNING", "REVERSE", "ROLE", "ROLES", "ROUND", "ROWID", "ROW_NUMBER", "RPAD", "RTRIM", "SCALAR", "SCHEMAS", "SCHEME", "SCRIPT_LANGUAGES", "SCRIPT_OUTPUT_ADDRESS", "SECONDS_BETWEEN", "SECURE", "SERIALIZABLE", "SESSION_PARAMETER", "SESSION_TEMP_DB_RAM_LIMIT", "SHUT", "SIGN", "SIMPLE", "SIN", "SINH", "SIZE", "SKIP", "SNAPSHOT_MODE", "SOUNDEX", "SQRT", "STATISTICS", "STDDEV", "STDDEV_POP", "STDDEV_SAMP", "STEP", "ST_AREA", "ST_BOUNDARY", "ST_BUFFER", "ST_CENTROID", "ST_CONTAINS", "ST_CONVEXHULL", "ST_CROSSES", "ST_DIFFERENCE", "ST_DIMENSION", "ST_DISJOINT", "ST_DISTANCE", "ST_ENDPOINT", "ST_ENVELOPE", "ST_EQUALS", "ST_EXTERIORRING", "ST_FORCE2D", "ST_GEOMETRYN", "ST_GEOMETRYTYPE", "ST_INTERIORRINGN", "ST_INTERSECTION", "ST_INTERSECTS", "ST_ISCLOSED", "ST_ISEMPTY", "ST_ISRING", "ST_ISSIMPLE", "ST_LENGTH", "ST_MAX_DECIMAL_DIGITS", "ST_NUMGEOMETRIES", "ST_NUMINTERIORRINGS", "ST_NUMPOINTS", "ST_OVERLAPS", "ST_POINTN", "ST_SETSRID", "ST_STARTPOINT", "ST_SYMDIFFERENCE", "ST_TOUCHES", "ST_TRANSFORM", "ST_UNION", "ST_WITHIN", "ST_X", "ST_Y", "SUBSTR", "SUM", "SYMMETRIC", "SYS_CONNECT_BY_PATH", "SYS_GUID", "TABLES", "TABLESAMPLE", "TAN", "TANH", "TASKS", "TEMP_DB_RAM_LIMIT", "TIES", "TIMESTAMP_ARITHMETIC_BEHAVIOR", "TIME_ZONE", "TIME_ZONE_BEHAVIOR", "TO_CHAR", "TO_DATE", "TO_DSINTERVAL", "TO_NUMBER", "TO_TIMESTAMP", "TO_YMINTERVAL", "TRACE", "TRANSLATE", "TRUNC", "TYPE", "TYPEOF", "UCASE", "UNBOUNDED", "UNCOMMITTED", "UNDO", "UNICODE", "UNICODECHR", "UNLIMITED", "UPPER", "USE", "USER_TEMP_DB_RAM_LIMIT", "UTF8", "VALUE2PROC", "VARIANCE", "VARYING", "VAR_POP", "VAR_SAMP", "VIRTUAL", "WEEK", "WRITE", "YEARS_BETWEEN", "ZEROIFNULL", # Additional unreserved keywords not defined in EXA_SQL_KEYWORDS "CONSUMER", "CONSUMER_GROUP", "CPU_WEIGHT", "DRIVER", "GROUP_TEMP_DB_RAM_LIMIT", "JDBC", "PRECEDENCE", "ROWCOUNT", "SUBJECT", ] BARE_FUNCTIONS = [ "CONNECT_BY_ISCYCLE", "CONNECT_BY_ISLEAF", "CONNECT_BY_ROOT", "CURDATE", "CURRENT_DATE", "CURRENT_SCHEMA", "CURRENT_SESSION", "CURRENT_STATEMENT", "CURRENT_TIMESTAMP", "CURRENT_USER", "DBTIMEZONE", "LEVEL", "LOCALTIMESTAMP", "NOW", "ROWID", "ROWNUM", "SESSIONTIMEZONE", "SYSDATE", "SYSTIMESTAMP", "USER", ] SYSTEM_PARAMETERS = [ "CONSTRAINT_STATE_DEFAULT", "DEFAULT_CONSUMER_GROUP", "DEFAULT_LIKE_ESCAPE_CHARACTER", "HASHTYPE_FORMAT", "IDLE_TIMEOUT", "NLS_DATE_FORMAT", "NLS_DATE_LANGUAGE", "NLS_FIRST_DAY_OF_WEEK", "NLS_NUMERIC_CHARACTERS", "NLS_TIMESTAMP_FORMAT", "PASSWORD_SECURITY_POLICY", "PASSWORD_EXPIRY_POLICY", "PROFILE", "QUERY_CACHE", "QUERY_TIMEOUT", "SCRIPT_OUTPUT_ADDRESS", "SCRIPT_LANGUAGES", "SESSION_TEMP_DB_RAM_LIMIT", "SNAPSHOT_MODE", "SQL_PREPROCESSOR_SCRIPT", "ST_MAX_DECIMAL_DIGITS", "TEMP_DB_RAM_LIMIT", "TIME_ZONE", "TIME_ZONE_BEHAVIOR", "TIMESTAMP_ARITHMETIC_BEHAVIOR", "USER_TEMP_DB_RAM_LIMIT", ] SESSION_PARAMETERS = [ "CONSTRAINT_STATE_DEFAULT", "DEFAULT_LIKE_ESCAPE_CHARACTER", "HASHTYPE_FORMAT", "IDLE_TIMEOUT", "NICE", "NLS_DATE_LANGUAGE", "NLS_DATE_FORMAT", "NLS_FIRST_DAY_OF_WEEK", "NLS_NUMERIC_CHARACTERS", "NLS_TIMESTAMP_FORMAT", "PROFILE", "QUERY_CACHE", "QUERY_TIMEOUT", "SCRIPT_LANGUAGES", "SCRIPT_OUTPUT_ADDRESS", "SESSION_TEMP_DB_RAM_LIMIT", "SNAPSHOT_MODE", "SQL_PREPROCESSOR_SCRIPT", "ST_MAX_DECIMAL_DIGITS", "TIME_ZONE", "TIME_ZONE_BEHAVIOR", "TIMESTAMP_ARITHMETIC_BEHAVIOR", ] sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_greenplum.py000066400000000000000000000227451451700765000236200ustar00rootroot00000000000000"""The Greenplum dialect. Greenplum (http://www.greenplum.org/) is a Massively Parallel Postgres, so we base this dialect on Postgres. """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, Bracketed, Delimited, OneOf, OptionallyBracketed, Ref, Sequence, ) from sqlfluff.dialects import dialect_postgres as postgres postgres_dialect = load_raw_dialect("postgres") greenplum_dialect = postgres_dialect.copy_as("greenplum") greenplum_dialect.sets("reserved_keywords").update( ["DISTRIBUTED", "RANDOMLY", "REPLICATED"] ) class DistributedBySegment(BaseSegment): """A DISTRIBUTED BY clause.""" type = "distributed_by" match_grammar = Sequence( "DISTRIBUTED", OneOf( "RANDOMLY", "REPLICATED", Sequence("BY", Bracketed(Delimited(Ref("ColumnReferenceSegment")))), ), ) class CreateTableStatementSegment(postgres.CreateTableStatementSegment): """A `CREATE TABLE` statement. As specified in https://docs.vmware.com/en/VMware-Tanzu-Greenplum/6/greenplum-database/GUID-ref_guide-sql_commands-CREATE_TABLE.html This is overriden from Postgres to add the `DISTRIBUTED` clause. """ match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar", optional=True), ), "UNLOGGED", optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( # A single COLLATE segment can come before or after # constraint segments OneOf( Ref("ColumnConstraintSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), ), ), ), ), Ref("TableConstraintSegment"), Sequence( "LIKE", Ref("TableReferenceSegment"), AnyNumberOf(Ref("LikeOptionSegment"), optional=True), ), ), ) ), Sequence( "INHERITS", Bracketed(Delimited(Ref("TableReferenceSegment"))), optional=True, ), ), # Create OF syntax: Sequence( "OF", Ref("ParameterNameSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Ref("TableConstraintSegment"), ), optional=True, ), ), # Create PARTITION OF syntax Sequence( "PARTITION", "OF", Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Ref("TableConstraintSegment"), ), optional=True, ), OneOf( Sequence("FOR", "VALUES", Ref("PartitionBoundSpecSegment")), "DEFAULT", ), ), ), AnyNumberOf( Sequence( "PARTITION", "BY", OneOf("RANGE", "LIST", "HASH"), Bracketed( AnyNumberOf( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("FunctionSegment"), ), AnyNumberOf( Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("ParameterNameSegment", optional=True), ), ), ) ) ), ), Sequence("USING", Ref("ParameterNameSegment")), Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), ), optional=True, ), ), ) ), ), Sequence( "ON", "COMMIT", OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"), ), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment")), Ref("DistributedBySegment"), ), ) class CreateTableAsStatementSegment(postgres.CreateTableAsStatementSegment): """A `CREATE TABLE AS` statement. As specified in https://docs.vmware.com/en/VMware-Tanzu-Greenplum/6/greenplum-database/GUID-ref_guide-sql_commands-CREATE_TABLE_AS.html This is overriden from Postgres to add the `DISTRIBUTED` clause. """ match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar"), ), "UNLOGGED", optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), AnyNumberOf( Bracketed( Delimited(Ref("ColumnReferenceSegment")), optional=True, ), Sequence("USING", Ref("ParameterNameSegment"), optional=True), OneOf( Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), ), optional=True, ), ) ) ), ), Sequence("WITHOUT", "OIDS"), optional=True, ), Sequence( "ON", "COMMIT", OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"), optional=True, ), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True), ), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), OptionallyBracketed(Sequence("TABLE", Ref("TableReferenceSegment"))), Ref("ValuesClauseSegment"), OptionallyBracketed(Sequence("EXECUTE", Ref("FunctionSegment"))), ), Ref("WithDataClauseSegment", optional=True), Ref("DistributedBySegment", optional=True), ) class UnorderedSelectStatementSegment(postgres.UnorderedSelectStatementSegment): """Overrides Postgres Statement, adding DISTRIBUTED BY as a terminator.""" match_grammar = postgres.UnorderedSelectStatementSegment.match_grammar.copy( terminators=[ Ref("DistributedBySegment"), ], ) class SelectStatementSegment(postgres.SelectStatementSegment): """Overrides Postgres Statement, adding DISTRIBUTED BY as a terminator.""" match_grammar = postgres.SelectStatementSegment.match_grammar.copy( terminators=[ Ref("DistributedBySegment"), ], ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_hive.py000066400000000000000000000764771451700765000225700ustar00rootroot00000000000000"""The Hive dialect.""" from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, BaseSegment, Bracketed, CodeSegment, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexParser, Sequence, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_hive_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") hive_dialect = ansi_dialect.copy_as("hive") # Clear ANSI Keywords and add all Hive keywords # Commented clearing for now as some are needed for some statements imported # from ANSI to work # hive_dialect.sets("unreserved_keywords").clear() hive_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) # hive_dialect.sets("reserved_keywords").clear() hive_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) hive_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) # Hive adds these timeunit aliases for intervals "to aid portability / readability" # https://cwiki.apache.org/confluence/display/hive/languagemanual+types#LanguageManualTypes-Intervals hive_dialect.sets("datetime_units").update( [ "NANO", "NANOS", "SECONDS", "MINUTES", "HOURS", "DAYS", "WEEKS", "MONTHS", "YEARS", ] ) hive_dialect.add( StartAngleBracketSegment=StringParser( "<", SymbolSegment, type="start_angle_bracket" ), EndAngleBracketSegment=StringParser(">", SymbolSegment, type="end_angle_bracket"), JsonfileKeywordSegment=StringParser("JSONFILE", KeywordSegment, type="file_format"), RcfileKeywordSegment=StringParser("RCFILE", KeywordSegment, type="file_format"), SequencefileKeywordSegment=StringParser( "SEQUENCEFILE", KeywordSegment, type="file_format" ), TextfileKeywordSegment=StringParser("TEXTFILE", KeywordSegment, type="file_format"), LocationGrammar=Sequence("LOCATION", Ref("QuotedLiteralSegment")), PropertyGrammar=Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), BracketedPropertyListGrammar=Bracketed(Delimited(Ref("PropertyGrammar"))), TablePropertiesGrammar=Sequence( "TBLPROPERTIES", Ref("BracketedPropertyListGrammar") ), SerdePropertiesGrammar=Sequence( "WITH", "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar") ), TerminatedByGrammar=Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment")), FileFormatGrammar=OneOf( "SEQUENCEFILE", "TEXTFILE", "RCFILE", "ORC", "PARQUET", "AVRO", "JSONFILE", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), StoredAsGrammar=Sequence("STORED", "AS", Ref("FileFormatGrammar")), StoredByGrammar=Sequence( "STORED", "BY", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), StorageFormatGrammar=OneOf( Sequence( Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), ), Ref("StoredByGrammar"), ), CommentGrammar=Sequence("COMMENT", Ref("QuotedLiteralSegment")), PartitionSpecGrammar=Sequence( "PARTITION", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True, ), ) ) ), ), BackQuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", ), ) # https://cwiki.apache.org/confluence/display/hive/languagemanual+joins hive_dialect.replace( JoinKeywordsGrammar=Sequence(Sequence("SEMI", optional=True), "JOIN"), QuotedLiteralSegment=OneOf( TypedParser("single_quote", LiteralSegment, type="quoted_literal"), TypedParser("double_quote", LiteralSegment, type="quoted_literal"), TypedParser("back_quote", LiteralSegment, type="quoted_literal"), ), TrimParametersGrammar=Nothing(), SingleIdentifierGrammar=ansi_dialect.get_grammar("SingleIdentifierGrammar").copy( insert=[ Ref("BackQuotedIdentifierSegment"), ] ), SelectClauseTerminatorGrammar=ansi_dialect.get_grammar( "SelectClauseTerminatorGrammar" ).copy( insert=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), ], before=Sequence("ORDER", "BY"), ), FromClauseTerminatorGrammar=ansi_dialect.get_grammar( "FromClauseTerminatorGrammar" ).copy( insert=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), ], before=Sequence("ORDER", "BY"), ), WhereClauseTerminatorGrammar=ansi_dialect.get_grammar( "WhereClauseTerminatorGrammar" ).copy( insert=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), ], before=Sequence("ORDER", "BY"), ), GroupByClauseTerminatorGrammar=OneOf( Sequence( OneOf("ORDER", "CLUSTER", "DISTRIBUTE", "SORT"), "BY", ), "LIMIT", "HAVING", "QUALIFY", "WINDOW", ), HavingClauseTerminatorGrammar=OneOf( Sequence( OneOf( "ORDER", "CLUSTER", "DISTRIBUTE", "SORT", ), "BY", ), "LIMIT", "QUALIFY", "WINDOW", ), # Full Apache Hive `CREATE ALTER` reference here: # https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-AlterTable AlterTableOptionsGrammar=ansi_dialect.get_grammar("AlterTableOptionsGrammar").copy( insert=[ # Exchange Sequence( "EXCHANGE", Ref("PartitionSpecGrammar"), "WITH", "TABLE", Ref("TableReferenceSegment"), ), ] ), LikeGrammar=OneOf( "LIKE", "RLIKE", "ILIKE", "REGEXP", "IREGEXP" ), # Impala dialect uses REGEXP and IREGEXP ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Sequence( "ARRAY", Bracketed( Ref("DatatypeSegment"), bracket_type="angle", bracket_pairs_set="angle_bracket_pairs", optional=True, ), ) class StructTypeSegment(ansi.StructTypeSegment): """Expression to construct a STRUCT datatype.""" match_grammar = Sequence( "STRUCT", Ref("StructTypeSchemaSegment", optional=True), ) class StructTypeSchemaSegment(BaseSegment): """Expression to construct the schema of a STRUCT datatype.""" type = "struct_type_schema" match_grammar = Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), Ref("DatatypeSegment"), Ref("CommentGrammar", optional=True), ), bracket_pairs_set="angle_bracket_pairs", ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ) class CreateDatabaseStatementSegment(BaseSegment): """A `CREATE DATABASE` statement.""" type = "create_database_statement" match_grammar = Sequence( "CREATE", OneOf("DATABASE", "SCHEMA"), Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("CommentGrammar", optional=True), Ref("LocationGrammar", optional=True), Sequence("MANAGEDLOCATION", Ref("QuotedLiteralSegment"), optional=True), Sequence( "WITH", "DBPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True ), ) class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement. Full Apache Hive `CREATE TABLE` reference here: https://cwiki.apache.org/confluence/display/hive/languagemanual+ddl#LanguageManualDDL-CreateTable """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref.keyword("TEMPORARY", optional=True), Ref.keyword("EXTERNAL", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( # TODO: support all constraints Ref("TableConstraintSegment", optional=True), Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), bracket_pairs_set="angle_bracket_pairs", ), optional=True, ), Ref("CommentGrammar", optional=True), # `STORED AS` can be called before or after the additional table # properties below Ref("StoredAsGrammar", optional=True), Sequence( "PARTITIONED", "BY", Bracketed( Delimited( Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), Sequence( "CLUSTERED", "BY", Ref("BracketedColumnReferenceListGrammar"), Sequence( "SORTED", "BY", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), ) ) ), optional=True, ), "INTO", Ref("NumericLiteralSegment"), "BUCKETS", optional=True, ), # Second call of `STORED AS` to match when appears after Ref("StoredAsGrammar", optional=True), Ref("SkewedByClauseSegment", optional=True), Ref("StorageFormatGrammar", optional=True), Ref("LocationGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), Ref("CommentGrammar", optional=True), Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), ), # Create like syntax Sequence( "LIKE", Ref("TableReferenceSegment"), Ref("LocationGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), ), ), ) class TableConstraintSegment(ansi.TableConstraintSegment): """A table constraint, e.g. for CREATE TABLE.""" type = "table_constraint" match_grammar: Matchable = Sequence( Sequence("CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True), OneOf( Sequence( "UNIQUE", Ref("BracketedColumnReferenceListGrammar"), ), Sequence( Ref("PrimaryKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), Sequence( "DISABLE", "NOVALIDATE", OneOf("RELY", "NORELY", optional=True), optional=True, ), ), Sequence( Ref("ForeignKeyGrammar"), Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] Sequence("DISABLE", "NOVALIDATE", optional=True), ), ), ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """Modified from ANSI to allow for `LATERAL VIEW` clause.""" match_grammar = ansi.FromExpressionElementSegment.match_grammar.copy( insert=[ AnyNumberOf(Ref("LateralViewClauseSegment")), ], before=Ref("PostTableExpressionGrammar", optional=True), ) class LateralViewClauseSegment(BaseSegment): """A `LATERAL VIEW` in a `FROM` clause. https://cwiki.apache.org/confluence/display/hive/languagemanual+lateralview """ type = "lateral_view_clause" match_grammar = Sequence( Indent, "LATERAL", "VIEW", Ref.keyword("OUTER", optional=True), Ref("FunctionSegment"), # NB: AliasExpressionSegment is not used here for table # or column alias because `AS` is optional within it # (and in most scenarios). Here it's explicitly defined # for when it is required and not allowed. Ref("SingleIdentifierGrammar", optional=True), Sequence( "AS", Delimited( Ref("SingleIdentifierGrammar"), ), ), Dedent, ) class PrimitiveTypeSegment(BaseSegment): """Primitive data types.""" type = "primitive_type" match_grammar = OneOf( "TINYINT", "SMALLINT", "INT", "INTEGER", "BIGINT", "BOOLEAN", "FLOAT", Sequence("DOUBLE", Ref.keyword("PRECISION", optional=True)), "STRING", "BINARY", "TIMESTAMP", Sequence( OneOf("DECIMAL", "DEC", "NUMERIC"), Ref("BracketedArguments", optional=True), ), "DATE", "VARCHAR", "CHAR", "JSON", ) class DatatypeSegment(BaseSegment): """Data types.""" type = "data_type" match_grammar = OneOf( Ref("PrimitiveTypeSegment"), Ref("ArrayTypeSegment"), Ref("SizedArrayTypeSegment"), Sequence( "MAP", Bracketed( Sequence( Ref("PrimitiveTypeSegment"), Ref("CommaSegment"), Ref("DatatypeSegment"), ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), Ref("StructTypeSegment"), Sequence( "UNIONTYPE", Bracketed( Delimited( Ref("DatatypeSegment"), bracket_pairs_set="angle_bracket_pairs" ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), ) class SkewedByClauseSegment(BaseSegment): """`SKEWED BY` clause in a CREATE / ALTER statement.""" type = "skewed_by_clause" match_grammar = Sequence( "SKEWED", "BY", Ref("BracketedColumnReferenceListGrammar"), "ON", Bracketed( Delimited( OneOf( Ref("LiteralGrammar"), Bracketed(Delimited(Ref("LiteralGrammar"))) ) ) ), Sequence("STORED", "AS", "DIRECTORIES", optional=True), ) class RowFormatClauseSegment(BaseSegment): """`ROW FORMAT` clause in a CREATE statement.""" type = "row_format_clause" match_grammar = Sequence( "ROW", "FORMAT", OneOf( Sequence( "DELIMITED", Sequence( "FIELDS", Ref("TerminatedByGrammar"), Sequence( "ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True ), optional=True, ), Sequence( "COLLECTION", "ITEMS", Ref("TerminatedByGrammar"), optional=True ), Sequence("MAP", "KEYS", Ref("TerminatedByGrammar"), optional=True), Sequence("LINES", Ref("TerminatedByGrammar"), optional=True), Sequence( "NULL", "DEFINED", "AS", Ref("QuotedLiteralSegment"), optional=True ), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), ), ) class AlterDatabaseStatementSegment(BaseSegment): """An `ALTER DATABASE/SCHEMA` statement.""" type = "alter_database_statement" match_grammar = Sequence( "ALTER", OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment"), "SET", OneOf( Sequence("DBPROPERTIES", Ref("BracketedPropertyListGrammar")), Sequence( "OWNER", OneOf("USER", "ROLE"), Ref("QuotedLiteralSegment"), ), Ref("LocationGrammar"), Sequence("MANAGEDLOCATION", Ref("QuotedLiteralSegment")), ), ) class DropTableStatementSegment(BaseSegment): """A `DROP TABLE` statement.""" type = "drop_table_statement" match_grammar = Sequence( "DROP", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref.keyword("PURGE", optional=True), ) class TruncateStatementSegment(BaseSegment): """`TRUNCATE TABLE` statement.""" type = "truncate_table" match_grammar = Sequence( "TRUNCATE", Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ) class SetStatementSegment(BaseSegment): """A `SET` statement. https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Commands """ type = "set_statement" match_grammar = Sequence( "SET", OneOf( # set -v Sequence( StringParser("-", SymbolSegment, type="option_indicator"), StringParser("v", CodeSegment, type="option"), ), # set key = value Sequence( Delimited( Ref("ParameterNameSegment"), delimiter=OneOf(Ref("DotSegment"), Ref("ColonDelimiterSegment")), allow_gaps=False, ), Ref("RawEqualsSegment"), Ref("LiteralGrammar"), ), optional=True, ), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("AlterDatabaseStatementSegment"), Ref("MsckRepairTableStatementSegment"), Ref("MsckTableStatementSegment"), Ref("SetStatementSegment"), ], remove=[ Ref("TransactionStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), ], ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. Full Apache Hive `INSERT` reference here: https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DML """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf( Sequence( "OVERWRITE", OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), Ref("IfNotExistsGrammar", optional=True), Ref("SelectableGrammar"), ), Sequence( Sequence("LOCAL", optional=True), "DIRECTORY", Ref("QuotedLiteralSegment"), Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), Ref("SelectableGrammar"), ), ), ), Sequence( "INTO", Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), OneOf( Ref("SelectableGrammar"), Ref("ValuesClauseSegment"), ), ), ), ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. Full Apache Hive `INTERVAL` reference here: https://cwiki.apache.org/confluence/display/hive/languagemanual+types#LanguageManualTypes-Intervals """ type = "interval_expression" match_grammar = Sequence( Ref.keyword("INTERVAL", optional=True), OneOf( Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Bracketed(Ref("ExpressionSegment")), ), Ref("DatetimeUnitSegment"), Sequence("TO", Ref("DatetimeUnitSegment"), optional=True), ), ), ) class MsckRepairTableStatementSegment(BaseSegment): """An `MSCK REPAIR TABLE`statement. Updates the Hive metastore to be aware of any changes to partitions on the underlying file store. The `MSCK TABLE` command, and corresponding class in Hive dialect MsckTableStatementSegment, is used to determine mismatches between the Hive metastore and file system. Essentially, it is a dry run of the `MSCK REPAIR TABLE` command. https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RecoverPartitions(MSCKREPAIRTABLE) """ type = "msck_repair_table_statement" match_grammar = Sequence( "MSCK", "REPAIR", "TABLE", Ref("TableReferenceSegment"), Sequence( OneOf( "ADD", "DROP", "SYNC", ), "PARTITIONS", optional=True, ), ) class MsckTableStatementSegment(BaseSegment): """An `MSCK TABLE`statement. Checks for difference between partition metadata in the Hive metastore and underlying file system. Commonly used prior to `MSCK REPAIR TABLE` command, corresponding with class `MsckRepairTableStatementSegment` in Hive dialect, to asses size of updates for one-time or irregularly sized file system updates. https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RecoverPartitions(MSCKREPAIRTABLE) """ type = "msck_table_statement" match_grammar = Sequence( "MSCK", "TABLE", Ref("TableReferenceSegment"), Sequence( OneOf( "ADD", "DROP", "SYNC", ), "PARTITIONS", optional=True, ), ) class FunctionSegment(BaseSegment): """A scalar or aggregate function. Extended version of `ansi` to add support of row typecasting https://prestodb.io/docs/current/language/types.html#row ``` cast(row(val1, val2) as row(a integer, b integer)) ``` """ type = "function" match_grammar = OneOf( Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Sequence( Ref("DatePartFunctionNameSegment"), Bracketed( Delimited( Ref("DatetimeUnitSegment"), Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), ), parse_mode=ParseMode.GREEDY, ), ), ), Sequence( # This unusual syntax is used to cast the Keyword ROW to # to the function_name to avoid rule linting exceptions StringParser("ROW", KeywordSegment, type="function_name"), Bracketed( Delimited( Sequence( Ref("BaseExpressionElementGrammar"), ), ), ), "AS", "ROW", Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment", optional=True), ), ), ), ), Sequence( Sequence( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), ), ), Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), parse_mode=ParseMode.GREEDY, ), ), Ref("PostFunctionGrammar", optional=True), ), ) class SamplingExpressionSegment(BaseSegment): """A sampling expression.""" type = "sample_expression" match_grammar = Sequence( "TABLESAMPLE", Bracketed( OneOf( Sequence( "BUCKET", Ref("NumericLiteralSegment"), "OUT", "OF", Ref("NumericLiteralSegment"), Sequence( "ON", OneOf( Ref("SingleIdentifierGrammar"), Ref("FunctionSegment"), ), optional=True, ), ), Sequence( Ref("NumericLiteralSegment"), OneOf("PERCENT", "ROWS", optional=True), ), RegexParser( r"\d+[bBkKmMgG]", CodeSegment, type="byte_length_literal", ), ), ), Ref( "AliasExpressionSegment", optional=True, ), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Enhance unordered SELECT statement to include CLUSTER, DISTRIBUTE, SORT BY.""" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( terminators=[ Ref("ClusterByClauseSegment"), Ref("DistributeByClauseSegment"), Ref("SortByClauseSegment"), ], ) class SelectStatementSegment(ansi.SelectStatementSegment): """Overriding SelectStatementSegment to allow for additional segment parsing.""" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[ Ref("ClusterByClauseSegment", optional=True), Ref("DistributeByClauseSegment", optional=True), Ref("SortByClauseSegment", optional=True), ], before=Ref("LimitClauseSegment", optional=True), ) class SelectClauseSegment(ansi.SelectClauseSegment): """Overriding SelectClauseSegment to allow for additional segment parsing.""" match_grammar = ansi.SelectClauseSegment.match_grammar.copy( # Add additional terminators terminators=[ Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), ], ) class SetExpressionSegment(ansi.SetExpressionSegment): """Overriding SetExpressionSegment to allow for additional segment parsing.""" match_grammar = ansi.SetExpressionSegment.match_grammar.copy( insert=[ Ref("ClusterByClauseSegment", optional=True), Ref("DistributeByClauseSegment", optional=True), Ref("SortByClauseSegment", optional=True), ], before=Ref("LimitClauseSegment", optional=True), ) class ClusterByClauseSegment(ansi.OrderByClauseSegment): """A `CLUSTER BY` clause like in `SELECT`.""" type = "clusterby_clause" match_grammar: Matchable = Sequence( "CLUSTER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), ), terminators=["LIMIT", Ref("FrameClauseUnitGrammar")], ), Dedent, ) class DistributeByClauseSegment(ansi.OrderByClauseSegment): """A `DISTRIBUTE BY` clause like in `SELECT`.""" type = "distributeby_clause" match_grammar: Matchable = Sequence( "DISTRIBUTE", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), ), terminators=[ "SORT", "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ], ), Dedent, ) class SortByClauseSegment(ansi.OrderByClauseSegment): """A `SORT BY` clause like in `SELECT`.""" type = "sortby_clause" match_grammar: Matchable = Sequence( "SORT", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), terminators=["LIMIT", Ref("FrameClauseUnitGrammar")], ), Dedent, ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_hive_keywords.py000066400000000000000000000116121451700765000244730ustar00rootroot00000000000000"""A list of HiveQL keywords.""" RESERVED_KEYWORDS = [ "ALL", "ALTER", "AND", "ARRAY", "AS", "AUTHORIZATION", "BETWEEN", "BIGINT", "BINARY", "BOOLEAN", "BOTH", "BY", "CASE", "CAST", "CHAR", "COLUMN", "CONF", "CREATE", "CROSS", "CUBE", "CURRENT", "CURRENT_DATE", "CURRENT_TIMESTAMP", "CURSOR", "DATABASE", "DATE", "DEC", "DECIMAL", "DELETE", "DESCRIBE", "DISTINCT", "DOUBLE", "DROP", "ELSE", "END", "EXCHANGE", "EXISTS", "EXTENDED", "EXTERNAL", "FALSE", "FETCH", "FLOAT", "FOLLOWING", "FOR", "FROM", "FULL", "FUNCTION", "GRANT", "GROUP", "GROUPING", "HAVING", "IF", "IMPORT", "IN", "INNER", "INSERT", "INT", "INTEGER", "INTERSECT", "INTERVAL", "INTO", "IS", "JOIN", "LATERAL", "LEFT", "LESS", "LIKE", "LOCAL", "MACRO", "MAP", "MORE", "NONE", "NOT", "NULL", "NUMERIC", "OF", "ON", "OR", "ORDER", "OUT", "OUTER", "OVER", "PARTIALSCAN", "PARTITION", "PERCENT", "PRECEDING", "PRESERVE", "PROCEDURE", "RANGE", "READS", "REDUCE", "REVOKE", "RIGHT", "ROLLUP", "ROW", "ROWS", "SELECT", "SET", "SMALLINT", "TABLE", "TABLESAMPLE", "THEN", "TIMESTAMP", "TO", "TRANSFORM", "TRIGGER", "TRUE", "TRUNCATE", "UNBOUNDED", "UNION", "UNIQUEJOIN", "UPDATE", "USER", "USING", "UTC_TMESTAMP", "VALUES", "VARCHAR", "WHEN", "WHERE", "WINDOW", "WITH", "COMMIT", "ONLY", "REGEXP", "RLIKE", "ROLLBACK", "START", "CACHE", "CONSTRAINT", "FOREIGN", "PRIMARY", "REFERENCES", "DAYOFWEEK", "EXTRACT", "FLOOR", "INTEGER", "PRECISION", "VIEWS", "TIME", "NUMERIC", "SYNC", ] UNRESERVED_KEYWORDS = [ "ADD", "ADMIN", "AFTER", "ANALYZE", "ARCHIVE", "ASC", "BEFORE", "BERNOULLI", "BUCKET", "BUCKETS", "CASCADE", "CHANGE", "CLUSTER", "CLUSTERED", "CLUSTERSTATUS", "COLLECTION", "COLUMNS", "COMMENT", "COMPACT", "COMPACTIONS", "COMPUTE", "CONCATENATE", "CONTINUE", "DATA", "DATABASES", "DATETIME", "DAY", "DBPROPERTIES", "DEFERRED", "DEFINED", "DELIMITED", "DEPENDENCY", "DESC", "DIRECTORIES", "DIRECTORY", "DISABLE", "DISTRIBUTE", "ELEM_TYPE", "ENABLE", "ESCAPED", "EXCLUSIVE", "EXPLAIN", "EXPORT", "FIELDS", "FILE", "FILEFORMAT", "FIRST", "FORMAT", "FORMATTED", "FUNCTIONS", "HOLD_DDLTIME", "HOUR", "IDXPROPERTIES", "IGNORE", "INDEX", "INDEXES", "INPATH", "INPUTDRIVER", "INPUTFORMAT", "IREGEXP", # Impala dialect "ITEMS", "JAR", "KEYS", "KEY_TYPE", "LIMIT", "LINES", "LOAD", "LOCATION", "LOCK", "LOCKS", "LOGICAL", "LONG", "MAPJOIN", "MATERIALIZED", "METADATA", "MINUS", "MINUTE", "MONTH", "MSCK", "NOSCAN", "NO_DROP", "OFFLINE", "OPTION", "OUTPUTDRIVER", "OUTPUTFORMAT", "OVERWRITE", "OWNER", "PARTITIONED", "PARTITIONS", "PLUS", "PRETTY", "PRINCIPALS", "PROTECTION", "PURGE", "READ", "READONLY", "REBUILD", "RECORDREADER", "RECORDWRITER", "REGEXP", "RELOAD", "RENAME", "REPAIR", "REPEATABLE", "REPLACE", "REPLICATION", "RESTRICT", "REWRITE", "RLIKE", "ROLE", "ROLES", "SCHEMA", "SCHEMAS", "SECOND", "SEMI", "SERDE", "SERDEPROPERTIES", "SERVER", "SETS", "SHARED", "SHOW", "SHOW_DATABASE", "SKEWED", "SORT", "SORTED", "SSL", "STATISTICS", "STORED", "STREAMTABLE", "STRING", "STRUCT", "SYSTEM", "TABLES", "TBLPROPERTIES", "TEMPORARY", "TERMINATED", "TINYINT", "TOUCH", "TRANSACTIONS", "UNARCHIVE", "UNDO", "UNIONTYPE", "UNLOCK", "UNSET", "UNSIGNED", "URI", "USE", "UTC", "UTCTIMESTAMP", "VALUE_TYPE", "VIEW", "WHILE", "YEAR", "AUTOCOMMIT", "ISOLATION", "LEVEL", "OFFSET", "SNAPSHOT", "TRANSACTION", "WORK", "WRITE", "ABORT", "KEY", "LAST", "NORELY", "NOVALIDATE", "NULLS", "RELY", "VALIDATE", "DETAIL", "DOW", "EXPRESSION", "OPERATOR", "QUARTER", "SUMMARY", "VECTORIZATION", "WEEK", "YEARS", "MONTHS", "WEEKS", "DAYS", "HOURS", "MINUTES", "SECONDS", "TIMESTAMPTZ", "ZONE", # File format "SEQUENCEFILE", "TEXTFILE", "RCFILE", "ORC", "PARQUET", "AVRO", "JSONFILE", # Other "MANAGEDLOCATION", ] sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_materialize.py000066400000000000000000000723031451700765000241230ustar00rootroot00000000000000"""The Materialize dialect. This is based on postgres dialect, since it was initially based off of Postgres. We should monitor in future and see if it should be rebased off of ANSI """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( Anything, BaseSegment, Bracketed, Delimited, KeywordSegment, MultiStringParser, OneOf, Ref, Sequence, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_materialize_keywords import ( materialize_reserved_keywords, materialize_unreserved_keywords, ) postgres_dialect = load_raw_dialect("postgres") materialize_dialect = postgres_dialect.copy_as("materialize") materialize_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", materialize_unreserved_keywords ) materialize_dialect.sets("reserved_keywords").clear() materialize_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", materialize_reserved_keywords ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("AlterOwnerStatementSegment"), Ref("AlterConnectionRotateKeys"), Ref("AlterDefaultPrivilegesStatementSegment"), Ref("AlterIndexStatementSegment"), Ref("AlterRenameStatementSegment"), Ref("AlterSecretStatementSegment"), Ref("AlterSetClusterStatementSegment"), Ref("AlterSourceSinkSizeStatementSegment"), Ref("CloseStatementSegment"), Ref("CopyToStatementSegment"), Ref("CopyFromStatementSegment"), Ref("CreateClusterStatementSegment"), Ref("CreateClusterReplicaStatementSegment"), Ref("CreateConnectionStatementSegment"), Ref("CreateIndexStatementSegment"), Ref("CreateMaterializedViewStatementSegment"), Ref("CreateSecretStatementSegment"), Ref("CreateSinkKafkaStatementSegment"), Ref("CreateSourceKafkaStatementSegment"), Ref("CreateSourceLoadGeneratorStatementSegment"), Ref("CreateSourcePostgresStatementSegment"), Ref("CreateSourceWebhookStatementSegment"), Ref("CreateTypeStatementSegment"), Ref("CreateViewStatementSegment"), Ref("DropStatementSegment"), Ref("FetchStatementSegment"), Ref("GrantStatementSegment"), Ref("MaterializeExplainStatementSegment"), Ref("ShowStatementSegment"), Ref("ShowCreateStatementSegment"), Ref("ShowIndexesStatementSegment"), Ref("ShowMaterializedViewsStatementSegment"), Ref("DeclareStatementSegment"), ], remove=[ Ref("CreateIndexStatementSegment"), Ref("DropIndexStatementSegment"), ], ) materialize_dialect.sets("materialize_sizes").clear() materialize_dialect.sets("materialize_sizes").update( [ "3xsmall", "2xsmall", "xsmall", "small", "medium", "large", "xlarge", "2xlarge", "3xlarge", "4xlarge", "5xlarge", "6xlarge", ], ) materialize_dialect.add( InstanceSizes=OneOf( MultiStringParser( materialize_dialect.sets("materialize_sizes"), KeywordSegment, type="materialize_size", ), MultiStringParser( [ f"'{compression}'" for compression in materialize_dialect.sets("materialize_sizes") ], KeywordSegment, type="compression_type", ), ), InCluster=Sequence( "IN", "CLUSTER", Ref("ObjectReferenceSegment"), ), Privileges=OneOf( "SELECT", "INSERT", "UPDATE", "DELETE", "CREATE", "USAGE", "CREATEROLE", "CREATEDB", "CREATECLUSTER", Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), ) class AlterOwnerStatementSegment(BaseSegment): """A `ALTER OWNER` statement.""" type = "alter_owner_statement" match_grammar = Sequence( "ALTER", OneOf( "CONNECTION", "CLUSTER", Sequence("CLUSTER", "REPLICA"), "INDEX", "SOURCE", "SINK", "VIEW", Sequence("MATERIALIZED", "VIEW"), "TABLE", "SECRET", ), Ref("ObjectReferenceSegment"), Sequence("OWNER", "TO"), Ref("ObjectReferenceSegment"), ) class AlterConnectionRotateKeys(BaseSegment): """`ALTER CONNECTION` statement.""" type = "alter_connection_rotate_keys" match_grammar = Sequence( "ALTER", "CONNECTION", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "ROTATE", "KEYS", ) class AlterDefaultPrivilegesStatementSegment(BaseSegment): """A `ALTER DEFAULT PRIVILEGES` statement.""" type = "alter_default_privileges_statement" match_grammar = Sequence( Sequence("ALTER", "DEFAULT", "PRIVILEGES", "FOR"), OneOf( Sequence( OneOf("ROLE", "USER"), Ref("ObjectReferenceSegment"), ), Sequence("ALL", "ROLES"), ), Sequence( "IN", OneOf("SCHEMA", "DATABASE"), Ref("ObjectReferenceSegment"), optional=True, ), "GRANT", Ref("Privileges"), "ON", OneOf( "TABLES", "TYPES", "SECRETS", "CONNECTIONS", "DATABASES", "SCHEMAS", "CLUSTERS", ), "TO", Ref("ObjectReferenceSegment"), ) class AlterRenameStatementSegment(BaseSegment): """A `ALTER RENAME` statement.""" type = "alter_rename_statement" match_grammar = Sequence( "ALTER", OneOf( "CONNECTION", Sequence("CLUSTER", Ref.keyword("REPLICA", optional=True)), "INDEX", "SOURCE", "SINK", "VIEW", Sequence("MATERIALIZED", "VIEW"), "TABLE", "SECRET", ), Ref("ObjectReferenceSegment"), Sequence("RENAME", "TO"), Ref("ObjectReferenceSegment"), ) class AlterIndexStatementSegment(BaseSegment): """A `ALTER INDEX` statement.""" type = "alter_index_statement" match_grammar = Sequence( "ALTER", "INDEX", Ref("ObjectReferenceSegment"), Sequence("SET", "ENABLED"), ) class AlterSecretStatementSegment(BaseSegment): """A `ALTER SECRET` statement.""" type = "alter_secret_statement" match_grammar = Sequence( "ALTER", "SECRET", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "AS", Anything(), ) class AlterSetClusterStatementSegment(BaseSegment): """A `ALTER SET CLUSTER` statement.""" type = "alter_set_cluster_statement" match_grammar = Sequence( Sequence("ALTER", "MATERIALIZED", "VIEW"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence("IN", "CLUSTER"), Ref("ObjectReferenceSegment"), ) class AlterSourceSinkSizeStatementSegment(BaseSegment): """A `ALTER SOURCE/SINK SET SIZE` statement.""" type = "alter_source_sink_size_statement" match_grammar = Sequence( "ALTER", OneOf("SOURCE", "SINK"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "SET", Bracketed( "SIZE", Ref("InstanceSizes"), ), ) class CloseStatementSegment(BaseSegment): """A `CLOSE` statement.""" type = "close_statement" match_grammar = Sequence( "CLOSE", Ref("ObjectReferenceSegment"), ) class CopyToStatementSegment(BaseSegment): """A `COPY TO` statement.""" type = "copy_to_statement" match_grammar = Sequence( "COPY", Bracketed( # SELECT statement or SUBSCRIBE statement OneOf( Ref("SelectStatementSegment"), Sequence( "SUBSCRIBE", Ref("ObjectReferenceSegment"), ), Sequence( "VALUES", Delimited( Anything(), ), ), ), ), "TO", "STDOUT", Sequence( "WITH", Bracketed( Anything(), ), optional=True, ), ) class CopyFromStatementSegment(BaseSegment): """A `COPY FROM` statement.""" type = "copy_from_statement" match_grammar = Sequence( "COPY", Ref("ObjectReferenceSegment"), Bracketed( Anything(), optional=True, ), "FROM", "STDIN", Sequence( Sequence( "WITH", optional=True, ), Bracketed( Anything(), ), optional=True, ), ) class CreateClusterStatementSegment(BaseSegment): """A `CREATE CLUSTER` statement.""" type = "create_cluster_statement" match_grammar = Sequence( "CREATE", "CLUSTER", Ref("ObjectReferenceSegment"), OneOf( Sequence( "REPLICAS", Bracketed( Delimited( Anything(), ) ), optional=True, ), Sequence( Anything(), optional=True, ), ), ) class CreateClusterReplicaStatementSegment(BaseSegment): """A `CREATE CLUSTER REPLICA` statement.""" type = "create_cluster_replica_statement" match_grammar = Sequence( "CREATE", "CLUSTER", "REPLICA", Ref("ObjectReferenceSegment"), Sequence( Anything(), optional=True, ), ) class CreateConnectionStatementSegment(BaseSegment): """A `CREATE CONNECTION` statement.""" type = "create_connection_statement" match_grammar = Sequence( "CREATE", "CONNECTION", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "TO", OneOf( Sequence( "AWS", "PRIVATELINK", ), Sequence( "CONFLUENT", "SCHEMA", "REGISTRY", ), "KAFKA", "POSTGRES", Sequence( "SSH", "TUNNEL", ), ), Bracketed(Anything()), ) class CreateIndexStatementSegment(BaseSegment): """A `CREATE INDEX` statement.""" type = "create_index_statement" match_grammar = Sequence( "CREATE", OneOf( Sequence( "INDEX", Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), "ON", Ref("ObjectReferenceSegment"), Sequence( "USING", Anything(), optional=True, ), Bracketed( Delimited( Anything(), ) ), ), Sequence( "DEFAULT", "INDEX", Ref("InCluster", optional=True), "ON", Ref("ObjectReferenceSegment"), Sequence( "USING", Anything(), optional=True, ), ), ), ) class CreateMaterializedViewStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW` statement.""" type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", OneOf( Sequence( "MATERIALIZED", "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Ref("InCluster", optional=True), "AS", Anything(), ), Sequence( Ref("OrReplaceGrammar"), "MATERIALIZED", "VIEW", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Ref("InCluster", optional=True), "AS", Anything(), ), ), ) class CreateSecretStatementSegment(BaseSegment): """A `CREATE SECRET` statement.""" type = "create_secret_statement" match_grammar = Sequence( "CREATE", "SECRET", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "AS", Anything(), ) class CreateSinkKafkaStatementSegment(BaseSegment): """A `CREATE SINK KAFKA` statement.""" type = "create_sink_kafka_statement" match_grammar = Sequence( "CREATE", "SINK", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), "FROM", Ref("ObjectReferenceSegment"), "INTO", Anything(), Sequence( "KEY", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ) ), optional=True, ), Sequence( "FORMAT", Anything(), optional=True, ), Sequence( "ENVELOPE", OneOf( "DEBEZIUM", "UPSERT", ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class CreateSourceKafkaStatementSegment(BaseSegment): """A `CREATE SOURCE KAFKA` statement.""" type = "create_source_kafka_statement" match_grammar = Sequence( "CREATE", "SOURCE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), "FROM", "KAFKA", "CONNECTION", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Anything(), ) ), Sequence( "KEY", "FORMAT", Anything(), "VALUE", "FORMAT", Anything(), optional=True, ), Sequence( "FORMAT", Anything(), optional=True, ), Sequence( "INCLUDE", Delimited( Anything(), ), optional=True, ), Sequence( "ENVELOPE", OneOf( "NONE", "DEBEZIUM", "UPSERT", ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class CreateSourceLoadGeneratorStatementSegment(BaseSegment): """A `CREATE SOURCE LOAD GENERATOR` statement.""" type = "create_source_load_generator_statement" match_grammar = Sequence( "CREATE", "SOURCE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), Sequence( "FROM", "LOAD", "GENERATOR", ), OneOf( "AUCTION", "COUNTER", "MARKETING", "TPCH", ), Bracketed( Delimited( Anything(), ), optional=True, ), Sequence( "FOR", "ALL", "TABLES", optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class CreateSourcePostgresStatementSegment(BaseSegment): """A `CREATE SOURCE POSTGRES` statement.""" type = "create_source_postgres_statement" match_grammar = Sequence( "CREATE", "SOURCE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), Sequence( "FROM", "POSTGRES", "CONNECTION", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Anything(), ) ), optional=True, ), OneOf( Sequence( "FOR", "ALL", "TABLES", ), Sequence( "FOR", "TABLES", Bracketed( Delimited( Anything(), ) ), ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class CreateSourceWebhookStatementSegment(BaseSegment): """A `CREATE SOURCE WEBHOOK` statement.""" type = "create_source_load_generator_statement" match_grammar = Sequence( "CREATE", "SOURCE", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("InCluster", optional=True), Sequence( "FROM", "WEBHOOK", "BODY", "FORMAT", ), OneOf( "TEXT", "JSON", "BYTES", ), OneOf( Sequence( "INCLUDE", "HEADER", Sequence( Anything(), optional=True, ), ), Sequence( "INCLUDE", "HEADERS", Bracketed( Delimited( Anything(), ) ), ), optional=True, ), Sequence( "CHECK", Bracketed( "WITH", Bracketed( Delimited( Anything(), ) ), ), optional=True, ), Sequence( Anything(), optional=True, ), ) class CreateTypeStatementSegment(BaseSegment): """A `CREATE TYPE` statement.""" type = "create_type_statement" match_grammar = Sequence( "CREATE", "TYPE", Ref("ObjectReferenceSegment"), OneOf( Sequence( "AS", Bracketed( Delimited( Sequence( Ref("ObjectReferenceSegment"), Ref("DatatypeSegment"), ), ), ), ), Sequence( "AS", OneOf( "LIST", "MAP", ), Bracketed( Delimited( Sequence( Ref("ObjectReferenceSegment"), Ref("EqualsSegment"), Anything(), ) ) ), ), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" match_grammar = Sequence( "CREATE", OneOf( "TEMP", "TEMPORARY", optional=True, ), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), "AS", Ref("SelectableGrammar"), ) class DropStatementSegment(BaseSegment): """A `DROP` statement.""" type = "drop_statement" match_grammar = Sequence( "DROP", OneOf( "CONNECTION", "CLUSTER", Sequence( "CLUSTER", "REPLICA", ), "DATABASE", "INDEX", Sequence( "MATERIALIZED", "VIEW", ), "ROLE", "SECRET", "SCHEMA", "SINK", "SOURCE", "TABLE", "TYPE", "VIEW", "USER", ), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "CASCADE", ), Sequence( "RESTRICT", ), optional=True, ), ) class ShowStatementSegment(BaseSegment): """A Materialize `SHOW` statement.""" type = "show_statement" match_grammar = Sequence( "SHOW", OneOf( "COLUMNS", "CONNECTIONS", "CLUSTERS", Sequence("CLUSTER", "REPLICAS"), "DATABASES", "INDEXES", Sequence("MATERIALIZED", "VIEWS"), "SECRETS", "SCHEMAS", "SINKS", "SOURCES", "TABLES", "TYPES", "VIEWS", "OBJECTS", ), Ref("ObjectReferenceSegment", optional=True), # FROM is optional for some object types Sequence( "FROM", Ref("ObjectReferenceSegment"), optional=True, ), # Like or where is optional for some object types OneOf( Sequence( "LIKE", Ref("QuotedLiteralSegment"), ), Sequence( "WHERE", Ref("ExpressionSegment"), ), optional=True, ), ) class ShowCreateStatementSegment(BaseSegment): """A Materialize `SHOW CREATE` statement.""" type = "show_create_statement" match_grammar = Sequence( "SHOW", "CREATE", OneOf( Sequence("CONNECTION", optional=True), Sequence("INDEX", optional=True), Sequence("MATERIALIZED", "VIEW", optional=True), Sequence("SINK", optional=True), Sequence("SOURCE", optional=True), Sequence("TABLE", optional=True), Sequence("VIEW", optional=True), ), Ref("ObjectReferenceSegment"), ) class ShowIndexesStatementSegment(BaseSegment): """A Materialize `SHOW INDEXES` statement.""" type = "show_indexes_statement" match_grammar = Sequence( "SHOW", "INDEXES", Sequence( "ON", Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "FROM", Ref("ObjectReferenceSegment"), optional=True, ), Ref("InCluster", optional=True), OneOf( Sequence( "LIKE", Ref("QuotedLiteralSegment"), ), Sequence( "WHERE", Ref("ExpressionSegment"), ), optional=True, ), ) class ShowMaterializedViewsStatementSegment(BaseSegment): """A Materialize `SHOW MATERIALIZED VIEWS` statement.""" type = "show_materialized_views_statement" match_grammar = Sequence( "SHOW", "MATERIALIZED", "VIEWS", Sequence( "FROM", Ref("ObjectReferenceSegment"), optional=True, ), Ref("InCluster", optional=True), ) class MaterializeExplainStatementSegment(BaseSegment): """A `EXPLAIN` statement.""" type = "explain_statement" match_grammar = Sequence( "EXPLAIN", OneOf( Sequence( OneOf( "RAW", "DECORRELATED", "OPTIMIZED", "PHYSICAL", optional=True, ), "PLAN", optional=True, ), optional=True, ), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), Sequence( "AS", OneOf( "TEXT", "JSON", ), optional=True, ), Sequence( "FOR", optional=True, ), OneOf( Ref("SelectableGrammar"), Sequence( "VIEW", Ref("ObjectReferenceSegment"), ), Sequence( "MATERIALIZED", "VIEW", Ref("ObjectReferenceSegment"), ), Sequence( Anything(), ), ), ) class FetchStatementSegment(BaseSegment): """A `FETCH` statement.""" type = "fetch_statement" match_grammar = Sequence( "FETCH", Sequence( "FORWARD", optional=True, ), OneOf( "ALL", Ref("NumericLiteralSegment"), optional=True, ), Sequence( "FROM", optional=True, ), Ref("ObjectReferenceSegment"), Sequence( "WITH", Bracketed( Delimited( Anything(), ) ), optional=True, ), ) class GrantStatementSegment(BaseSegment): """A `GRANT` statement.""" type = "grant_statement" match_grammar = Sequence( "GRANT", Ref("Privileges"), "ON", OneOf( Sequence( OneOf( "TABLE", "TYPE", "SECRET", "CONNECTION", "DATABASE", "SCHEMA", "CLUSTER", optional=True, ), Delimited( Ref("ObjectReferenceSegment"), ), ), "SYSTEM", Sequence( "ALL", OneOf( Sequence( OneOf( "TABLES", "TYPES", "SECRETS", "CONNECTIONS", ), "IN", "SCHEMA", Delimited( Ref("ObjectReferenceSegment"), ), ), Sequence( OneOf("TABLES", "TYPES", "SECRETS", "CONNECTIONS", "SCHEMAS"), "IN", "DATABASE", Delimited( Ref("ObjectReferenceSegment"), ), ), "DATABASES", "SCHEMAS", "CLUSTERS", ), ), ), "TO", Sequence("GROUP", optional=True), Delimited( Ref("ObjectReferenceSegment"), ), ) class DeclareStatementSegment(BaseSegment): """A `DECLARE` statement.""" type = "declare_statement" match_grammar = Sequence( "DECLARE", Ref("ObjectReferenceSegment"), "CURSOR", Sequence( "WITHOUT", "HOLD", optional=True, ), "FOR", OneOf( Ref("SelectableGrammar"), Sequence( "VIEW", Ref("ObjectReferenceSegment"), ), Sequence( "MATERIALIZED", "VIEW", Ref("ObjectReferenceSegment"), ), Sequence( Anything(), ), ), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_materialize_keywords.py000066400000000000000000000053661451700765000260570ustar00rootroot00000000000000"""A list of all Materialize SQL key words. https://materialize.com/docs/sql/identifiers """ materialize_reserved_keywords = """ALL ALTER AND ANY AS BY CAST CHECK CLUSTER CLUSTERS CONNECTION CONNECTIONS CONSTRAINT CREATE CROSS CURRENT DELETE DISTINCT DROP ELSE EXISTS FOLLOWING FOR FROM FULL GROUP ILIKE IN INNER INSERT INTERSECT INTO IS JOIN LATERAL LEFT LIKE NATURAL NOT NULL NULLIF OF ON OR ORDER RETURNING ROW ROWS SELECT SET SINK SINKS TO UNION UNIQUE UPDATE USING VALUES WHEN WHERE WITH """ materialize_unreserved_keywords = """ACCESS ACKS ARN ARRANGEMENT ARRAY ASC AT AUCTION AUTHORITY AVAILABILITY AVRO AWS BEGIN BETWEEN BIGINT BODY BOOLEAN BOTH BPCHAR BROKER BROKERS BUCKET BYTES CASCADE CASE CERTIFICATE CHAIN CHAR CHARACTER CHARACTERISTICS CLIENT CLOSE COALESCE COLLATE COLUMNS COMMIT COMMITTED COMPACTION COMPRESSION COMPUTE CONFLUENT COPY COUNT COUNTER CREATEROLE CREATEDB CREATECLUSTER CSV CURSOR DATABASE DATABASES DATUMS DAY DAYS DEALLOCATE DEBEZIUM DEBUG DEBUGGING DEC DECIMAL DECLARE DECORRELATED DEFAULT DELIMITED DELIMITER DESC DETAILS DISCARD DISCOVER DOT DOUBLE EFFORT ELEMENT ENABLE ENABLED END ENDPOINT ENFORCED ENVELOPE ESCAPE EXCEPT EXECUTE EXPECTED EXPLAIN EXTRACT FACTOR FALSE FETCH FIELDS FILTER FIRST FLOAT FOREIGN FORMAT FORWARD FULLNAME GENERATOR GRAPH GREATEST GROUPS GZIP HAVING HEADER HEADERS HOLD HOST HOUR HOURS ID IDEMPOTENCE IDLE IF IGNORE INCLUDE INDEX INDEXES INFO INHERIT INLINE INT INTEGER INTERSECT INTERVAL INTROSPECTION ISNULL ISOLATION JSON KAFKA KEY KEYS KINESIS LAST LATEST LEADING LEAST LEVEL LIMIT LIST LOAD LOCAL LOG LOGICAL LOGIN MANAGED MAP MARKETING MATCHING MATERIALIZE MATERIALIZED MAX MECHANISMS MESSAGE METADATA MINUTE MINUTES MODE MONTH MONTHS MS NAME NAMES NEXT NO NOLOGIN NONE NOSUPERUSER NOTICE NOTIFICATIONS NULLS OBJECTS OFFSET ONLY OPERATOR OPTIMIZED OPTIMIZER OPTIONS ORDINALITY OUTER OVER OWNER PARTITION PASSWORD PHYSICAL PLAN PLANS PORT POSITION POSTGRES PRECEDING PRECISION PREFIX PREPARE PRIMARY PRIVATELINK PROGRESS PROTOBUF PUBLICATION QUERY QUOTE RAISE RANGE RAW READ REAL REFERENCES REFRESH REGEX REGION REGISTRY REMOTE RENAME REPEATABLE REPLACE REPLICA REPLICAS REPLICATION RESET RESTRICT RETENTION RIGHT ROLE ROLES ROLLBACK ROTATE S3 SASL SCALE SCAN SCHEMA SCHEMAS SCRIPT SECOND SECONDS SECRET SECRETS SEED SEQUENCES SERIALIZABLE SERVICE SESSION SHOW SIZE SMALLINT SNAPSHOT SOME SOURCE SOURCES SQS SSH SSL START STDIN STDOUT STRATEGY STRING SUBSCRIBE SUBSOURCE SUBSTRING SUPERUSER SYSTEM TABLE TABLES TAIL TEMP TEMPORARY TEST TEXT THEN TICK TIES TIME TIMELINE TIMEOUT TIMESTAMP TOKEN TOPIC TPCH TRACE TRAILING TRANSACTION TRIM TRUE TUNNEL TYPE TYPES UNBOUNDED UNCOMMITTED UNKNOWN UPSERT URL USER USERNAME USERS VALUE VARCHAR VARYING VIEW VIEWS WARNING WEBHOOK WINDOW WIRE WITHOUT WORK WORKERS WRITE YEAR YEARS ZONE ZONES """ sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_mysql.py000066400000000000000000002375541451700765000227750ustar00rootroot00000000000000"""The MySQL dialect. For now the only change is the parsing of comments. https://dev.mysql.com/doc/refman/8.0/en/differences-from-ansi.html """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, BinaryOperatorSegment, Bracketed, CodeSegment, CommentSegment, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_mysql_keywords import ( mysql_reserved_keywords, mysql_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") mysql_dialect = ansi_dialect.copy_as("mysql") mysql_dialect.patch_lexer_matchers( [ RegexLexer( "inline_comment", r"(-- |#)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("-- ", "#")}, ), # Pattern breakdown: # (?s) DOTALL (dot matches newline) # ( group1 start # ' single quote (start) # (?: non-capturing group: begin # \\' MySQL escaped single-quote # |'' or ANSI escaped single-quotes # |\\\\ or consecutive [escaped] backslashes # |[^'] or anything besides a single-quote # )* non-capturing group: end (zero or more times) # ' single quote (end of the single-quoted string) # (?!') negative lookahead: not single quote # ) group1 end RegexLexer( "single_quote", r"(?s)('(?:\\'|''|\\\\|[^'])*'(?!'))", CodeSegment, ), RegexLexer( "double_quote", r'(?s)("(?:\\"|""|\\\\|[^"])*"(?!"))', CodeSegment, ), ] ) mysql_dialect.insert_lexer_matchers( [ RegexLexer( "hexadecimal_literal", r"([xX]'([\da-fA-F][\da-fA-F])+'|0x[\da-fA-F]+)", LiteralSegment, segment_kwargs={"type": "numeric_literal"}, ), RegexLexer( "bit_value_literal", r"([bB]'[01]+'|0b[01]+)", LiteralSegment, segment_kwargs={"type": "numeric_literal"}, ), ], before="numeric_literal", ) # Set Keywords # Do not clear inherited unreserved ansi keywords. Too many are needed to parse well. # Just add MySQL unreserved keywords. mysql_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", mysql_unreserved_keywords ) mysql_dialect.sets("reserved_keywords").clear() mysql_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", mysql_reserved_keywords ) # Set the datetime units mysql_dialect.sets("datetime_units").clear() mysql_dialect.sets("datetime_units").update( [ # https://github.com/mysql/mysql-server/blob/1bfe02bdad6604d54913c62614bde57a055c8332/sql/sql_yacc.yy#L12321-L12345 # interval: "DAY_HOUR", "DAY_MICROSECOND", "DAY_MINUTE", "DAY_SECOND", "HOUR_MICROSECOND", "HOUR_MINUTE", "HOUR_SECOND", "MINUTE_MICROSECOND", "MINUTE_SECOND", "SECOND_MICROSECOND", "YEAR_MONTH", # interval_time_stamp "DAY", "WEEK", "HOUR", "MINUTE", "MONTH", "QUARTER", "SECOND", "MICROSECOND", "YEAR", ] ) mysql_dialect.replace( QuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", trim_chars=("`",), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("DoubleQuotedLiteralSegment"), Ref("SystemVariableSegment"), ] ), FromClauseTerminatorGrammar=ansi_dialect.get_grammar( "FromClauseTerminatorGrammar" ).copy( insert=[ Ref("IndexHintClauseSegment"), Ref("SelectPartitionClauseSegment"), Ref("ForClauseSegment"), Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("IntoClauseSegment"), ] ), WhereClauseTerminatorGrammar=ansi_dialect.get_grammar( "WhereClauseTerminatorGrammar" ).copy( insert=[ Ref("IntoClauseSegment"), ], ), BaseExpressionElementGrammar=ansi_dialect.get_grammar( "BaseExpressionElementGrammar" ).copy( insert=[ Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), Ref("VariableAssignmentSegment"), ] ), DateTimeLiteralGrammar=Sequence( # MySQL does not require the keyword to be specified: # https://dev.mysql.com/doc/refman/8.0/en/date-and-time-literals.html OneOf( "DATE", "TIME", "TIMESTAMP", optional=True, ), OneOf( TypedParser( "single_quote", LiteralSegment, type="date_constructor_literal", ), Ref("NumericLiteralSegment"), ), ), QuotedLiteralSegment=AnyNumberOf( # MySQL allows whitespace-concatenated string literals (#1488). # Since these string literals can have comments between them, # we use grammar to handle this. TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), Ref("DoubleQuotedLiteralSegment"), min_times=1, ), UniqueKeyGrammar=Sequence( "UNIQUE", Ref.keyword("KEY", optional=True), ), # Odd syntax, but pr CharCharacterSetGrammar=Ref.keyword("BINARY"), DelimiterGrammar=OneOf(Ref("SemicolonSegment"), Ref("TildeSegment")), TildeSegment=StringParser("~", SymbolSegment, type="statement_terminator"), ParameterNameSegment=RegexParser( r"`?[A-Za-z0-9_]*`?", CodeSegment, type="parameter" ), SingleIdentifierGrammar=ansi_dialect.get_grammar("SingleIdentifierGrammar").copy( insert=[Ref("SessionVariableNameSegment")] ), AndOperatorGrammar=OneOf( StringParser("AND", BinaryOperatorSegment), StringParser("&&", BinaryOperatorSegment), ), OrOperatorGrammar=OneOf( StringParser("OR", BinaryOperatorSegment), StringParser("||", BinaryOperatorSegment), StringParser("XOR", BinaryOperatorSegment), ), NotOperatorGrammar=OneOf( StringParser("NOT", KeywordSegment, type="keyword"), StringParser("!", CodeSegment, type="not_operator"), ), Expression_C_Grammar=Sequence( Sequence( Ref("SessionVariableNameSegment"), Ref("WalrusOperatorSegment"), optional=True, ), ansi_dialect.get_grammar("Expression_C_Grammar"), ), ColumnConstraintDefaultGrammar=OneOf( Bracketed(ansi_dialect.get_grammar("ColumnConstraintDefaultGrammar")), ansi_dialect.get_grammar("ColumnConstraintDefaultGrammar"), ), NakedIdentifierSegment=SegmentGenerator( lambda dialect: RegexParser( r"([A-Z0-9_]*[A-Z][A-Z0-9_]*)|_", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), ) mysql_dialect.add( DoubleQuotedLiteralSegment=TypedParser( "double_quote", LiteralSegment, type="quoted_literal", trim_chars=('"',), ), AtSignLiteralSegment=TypedParser( "at_sign_literal", LiteralSegment, type="at_sign_literal", ), SystemVariableSegment=RegexParser( r"@@(session|global)\.[A-Za-z0-9_]+", CodeSegment, type="system_variable", ), DoubleQuotedJSONPath=TypedParser( "double_quote", CodeSegment, type="json_path", trim_chars=('"',), ), SingleQuotedJSONPath=TypedParser( "single_quote", CodeSegment, type="json_path", trim_chars=("'",), ), ) class AliasExpressionSegment(BaseSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. """ type = "alias_expression" match_grammar = Sequence( Indent, Ref.keyword("AS", optional=True), OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedLiteralSegment"), ), Dedent, ) class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar = Sequence( Ref("SingleIdentifierGrammar"), # Column name OneOf( # Column type # DATETIME and TIMESTAMP take special logic Ref( "DatatypeSegment", exclude=OneOf("DATETIME", "TIMESTAMP"), ), Sequence( OneOf("DATETIME", "TIMESTAMP"), Sequence( Bracketed(Ref("NumericLiteralSegment"), optional=True), optional=True, ), Sequence(Sequence("NOT", optional=True), "NULL", optional=True), Sequence( "DEFAULT", OneOf( Sequence( OneOf("CURRENT_TIMESTAMP", "NOW"), Bracketed( Ref("NumericLiteralSegment", optional=True), optional=True, ), ), Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), optional=True, ), optional=True, ), Sequence( Sequence("ON", "UPDATE", optional=True), Sequence( OneOf("CURRENT_TIMESTAMP", "NOW"), Bracketed( Ref("NumericLiteralSegment", optional=True), optional=True, ), ), Sequence( Bracketed(Ref("NumericLiteralSegment")), optional=True, ), optional=True, ), ), ), Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """Create table segment. https://dev.mysql.com/doc/refman/8.0/en/create-table.html """ match_grammar = ansi.CreateTableStatementSegment.match_grammar.copy( insert=[ AnyNumberOf( Sequence( Ref.keyword("DEFAULT", optional=True), Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("ParameterNameSegment")), ), ), ], ) class CreateUserStatementSegment(ansi.CreateUserStatementSegment): """`CREATE USER` statement. https://dev.mysql.com/doc/refman/8.0/en/create-user.html """ match_grammar = Sequence( "CREATE", "USER", Ref("IfNotExistsGrammar", optional=True), Delimited( Sequence( Ref("RoleReferenceSegment"), Sequence( Delimited( Sequence( "IDENTIFIED", OneOf( Sequence( "BY", OneOf( Sequence("RANDOM", "PASSWORD"), Ref("QuotedLiteralSegment"), ), ), Sequence( "WITH", Ref("ObjectReferenceSegment"), Sequence( OneOf( Sequence( "BY", OneOf( Sequence("RANDOM", "PASSWORD"), Ref("QuotedLiteralSegment"), ), ), Sequence("AS", Ref("QuotedLiteralSegment")), Sequence( "INITIAL", "AUTHENTICATION", "IDENTIFIED", OneOf( Sequence( "BY", OneOf( Sequence( "RANDOM", "PASSWORD" ), Ref("QuotedLiteralSegment"), ), ), Sequence( "WITH", Ref("ObjectReferenceSegment"), "AS", Ref("QuotedLiteralSegment"), ), ), ), ), optional=True, ), ), ), ), delimiter="AND", ), optional=True, ), ), ), Sequence( "DEFAULT", "ROLE", Delimited(Ref("RoleReferenceSegment")), optional=True, ), Sequence( "REQUIRE", OneOf( "NONE", Delimited( OneOf( "SSL", "X509", Sequence("CIPHER", Ref("QuotedLiteralSegment")), Sequence("ISSUER", Ref("QuotedLiteralSegment")), Sequence("SUBJECT", Ref("QuotedLiteralSegment")), ), delimiter="AND", ), ), optional=True, ), Sequence( "WITH", AnyNumberOf( Sequence( OneOf( "MAX_QUERIES_PER_HOUR", "MAX_UPDATES_PER_HOUR", "MAX_CONNECTIONS_PER_HOUR", "MAX_USER_CONNECTIONS", ), Ref("NumericLiteralSegment"), ) ), optional=True, ), Sequence( AnyNumberOf( Sequence( "PASSWORD", "EXPIRE", Sequence( OneOf( "DEFAULT", "NEVER", Sequence("INTERVAL", Ref("NumericLiteralSegment"), "DAY"), ), optional=True, ), ), Sequence( "PASSWORD", "HISTORY", OneOf("DEFAULT", Ref("NumericLiteralSegment")), ), Sequence( "PASSWORD", "REUSE", "INTERVAL", OneOf("DEFAULT", Sequence(Ref("NumericLiteralSegment"), "DAY")), ), Sequence( "PASSWORD", "REQUIRE", "CURRENT", Sequence(OneOf("DEFAULT", "OPTIONAL"), optional=True), ), Sequence("FAILED_LOGIN_ATTEMPTS", Ref("NumericLiteralSegment")), Sequence( "PASSWORD_LOCK_TIME", OneOf(Ref("NumericLiteralSegment"), "UNBOUNDED"), ), ), optional=True, ), Sequence("ACCOUNT", OneOf("UNLOCK", "LOCK"), optional=True), Sequence( OneOf("COMMENT", "ATTRIBUTE"), Ref("QuotedLiteralSegment"), optional=True, ), ) class UpsertClauseListSegment(BaseSegment): """An `ON DUPLICATE KEY UPDATE` statement. https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html """ type = "upsert_clause_list" match_grammar = Sequence( "ON", "DUPLICATE", "KEY", "UPDATE", Delimited(Ref("SetClauseSegment")), ) class InsertRowAliasSegment(BaseSegment): """A row alias segment (used in `INSERT` statements). https://dev.mysql.com/doc/refman/8.0/en/insert.html """ type = "insert_row_alias" match_grammar = Sequence( "AS", Ref("SingleIdentifierGrammar"), Bracketed( Ref("SingleIdentifierListSegment"), optional=True, ), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. https://dev.mysql.com/doc/refman/8.0/en/insert.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf( "LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", optional=True, ), Ref.keyword("IGNORE", optional=True), Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), Sequence( "PARTITION", Bracketed( Ref("SingleIdentifierListSegment"), ), optional=True, ), Ref("BracketedColumnReferenceListGrammar", optional=True), AnySetOf( OneOf( Ref("ValuesClauseSegment"), Ref("SetClauseListSegment"), Sequence( OneOf( Ref("SelectableGrammar"), Sequence( "TABLE", Ref("TableReferenceSegment"), ), ), ), optional=False, ), Ref("InsertRowAliasSegment", optional=True), Ref("UpsertClauseListSegment", optional=True), ), ) class DeleteUsingClauseSegment(BaseSegment): """A `USING` clause froma `DELETE` Statement`.""" type = "using_clause" match_grammar = Sequence( "USING", Delimited( Ref("FromExpressionSegment"), ), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://dev.mysql.com/doc/refman/8.0/en/delete.html """ type = "delete_statement" match_grammar = Sequence( "DELETE", Ref.keyword("LOW_PRIORITY", optional=True), Ref.keyword("QUICK", optional=True), Ref.keyword("IGNORE", optional=True), OneOf( Sequence( "FROM", Delimited(Ref("TableReferenceSegment"), terminators=["USING"]), Ref("DeleteUsingClauseSegment"), Ref("WhereClauseSegment", optional=True), ), Sequence( Delimited(Ref("TableReferenceSegment"), terminators=["FROM"]), Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), ), Sequence( Ref("FromClauseSegment"), Ref("SelectPartitionClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), ), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more.""" match_grammar: Matchable = OneOf( ansi.ColumnConstraintSegment.match_grammar, Sequence("CHARACTER", "SET", Ref("NakedIdentifierSegment")), Sequence("COLLATE", Ref("CollationReferenceSegment")), ) class IndexTypeGrammar(BaseSegment): """index_type in table_constraint.""" type = "index_type" match_grammar = Sequence( "USING", OneOf("BTREE", "HASH"), ) class IndexOptionsSegment(BaseSegment): """index_option in `CREATE TABLE` and `ALTER TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/create-table.html https://dev.mysql.com/doc/refman/8.0/en/alter-table.html """ type = "index_option" match_grammar = AnySetOf( Sequence( "KEY_BLOCK_SIZE", Ref("EqualsSegment", optional=True), Ref("NumericLiteralSegment"), ), Ref("IndexTypeGrammar"), Sequence("WITH", "PARSER", Ref("ObjectReferenceSegment")), Ref("CommentClauseSegment"), OneOf("VISIBLE", "INVISIBLE"), # (SECONDARY_)ENGINE_ATTRIBUTE supported in `CREATE TABLE` Sequence( "ENGINE_ATTRIBUTE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "SECONDARY_ENGINE_ATTRIBUTE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), ) class TableConstraintSegment(BaseSegment): """A table constraint, e.g. for CREATE TABLE, ALTER TABLE. https://dev.mysql.com/doc/refman/8.0/en/create-table.html https://dev.mysql.com/doc/refman/8.0/en/alter-table.html """ type = "table_constraint" # e.g. CONSTRAINT constraint_1 PRIMARY KEY(column_1) match_grammar = OneOf( Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment", optional=True), optional=True, ), OneOf( # UNIQUE [INDEX | KEY] [index_name] [index_type] (key_part,...) # [index_option] ... Sequence( "UNIQUE", OneOf("INDEX", "KEY", optional=True), Ref("IndexReferenceSegment", optional=True), Ref("IndexTypeGrammar", optional=True), Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), ), # PRIMARY KEY [index_type] (key_part,...) [index_option] ... Sequence( Ref("PrimaryKeyGrammar"), Ref("IndexTypeGrammar", optional=True), # Columns making up PRIMARY KEY constraint Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), ), # FOREIGN KEY [index_name] (col_name,...) reference_definition Sequence( # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), Ref("IndexReferenceSegment", optional=True), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), "REFERENCES", Ref("ColumnReferenceSegment"), # Foreign columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), # Later add support for [MATCH FULL/PARTIAL/SIMPLE] ? # Later add support for [ ON DELETE/UPDATE action ] ? AnyNumberOf( Sequence( "ON", OneOf("DELETE", "UPDATE"), OneOf( "RESTRICT", "CASCADE", Sequence("SET", "NULL"), Sequence("NO", "ACTION"), Sequence("SET", "DEFAULT"), ), optional=True, ), ), ), # CHECK (expr) [[NOT] ENFORCED] Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), OneOf( "ENFORCED", Sequence("NOT", "ENFORCED"), optional=True, ), ), ), ), # {INDEX | KEY} [index_name] [index_type] (key_part,...) [index_option] ... Sequence( OneOf("INDEX", "KEY"), Ref("IndexReferenceSegment", optional=True), Ref("IndexTypeGrammar", optional=True), Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), ), # {FULLTEXT | SPATIAL} [INDEX | KEY] [index_name] (key_part,...) # [index_option] ... Sequence( OneOf("FULLTEXT", "SPATIAL"), OneOf("INDEX", "KEY", optional=True), Ref("IndexReferenceSegment", optional=True), Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), ), ) class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment): """A `CREATE INDEX` statement. https://dev.mysql.com/doc/refman/8.0/en/create-index.html https://mariadb.com/kb/en/create-index/ """ match_grammar = Sequence( "CREATE", OneOf("UNIQUE", "FULLTEXT", "SPATIAL", optional=True), "INDEX", Ref("IndexReferenceSegment"), Ref("IndexTypeGrammar", optional=True), "ON", Ref("TableReferenceSegment"), Ref("BracketedKeyPartListGrammar"), Ref("IndexOptionsSegment", optional=True), AnySetOf( Sequence( "ALGORITHM", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", "INPLACE", "COPY", "NOCOPY", "INSTANT"), ), Sequence( "LOCK", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", "NONE", "SHARED", "EXCLUSIVE"), ), ), ) class IntervalExpressionSegment(BaseSegment): """An interval expression segment. https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_adddate """ type = "interval_expression" match_grammar = Sequence( "INTERVAL", Ref("ExpressionSegment"), Ref("DatetimeUnitSegment"), ) mysql_dialect.add( OutputParameterSegment=StringParser( "OUT", SymbolSegment, type="parameter_direction" ), InputParameterSegment=StringParser("IN", SymbolSegment, type="parameter_direction"), InputOutputParameterSegment=StringParser( "INOUT", SymbolSegment, type="parameter_direction" ), ProcedureParameterGrammar=OneOf( Sequence( OneOf( Ref("OutputParameterSegment"), Ref("InputParameterSegment"), Ref("InputOutputParameterSegment"), optional=True, ), Ref("ParameterNameSegment", optional=True), Ref("DatatypeSegment"), ), Ref("DatatypeSegment"), ), LocalVariableNameSegment=RegexParser( r"`?[a-zA-Z0-9_$]*`?", CodeSegment, type="variable", ), SessionVariableNameSegment=RegexParser( r"[@][a-zA-Z0-9_$]*", CodeSegment, type="variable", ), WalrusOperatorSegment=StringParser(":=", SymbolSegment, type="assignment_operator"), VariableAssignmentSegment=Sequence( Ref("SessionVariableNameSegment"), Ref("WalrusOperatorSegment"), Ref("BaseExpressionElementGrammar"), ), ColumnPathOperatorSegment=StringParser( "->", SymbolSegment, type="column_path_operator" ), InlinePathOperatorSegment=StringParser( "->>", SymbolSegment, type="column_path_operator" ), BooleanDynamicSystemVariablesGrammar=OneOf( # Boolean dynamic system variables can be set to ON/OFF, TRUE/FALSE, or 0/1: # https://dev.mysql.com/doc/refman/8.0/en/dynamic-system-variables.html # This allows us to match ON/OFF & TRUE/FALSE as keywords and therefore apply # the correct capitalisation policy. OneOf("ON", "OFF"), OneOf("TRUE", "FALSE"), ), # (key_part, ...) # key_part: {col_name [(length)] | (expr)} [ASC | DESC] # https://dev.mysql.com/doc/refman/8.0/en/create-table.html # https://dev.mysql.com/doc/refman/8.0/en/alter-table.html # https://dev.mysql.com/doc/refman/8.0/en/create-index.html BracketedKeyPartListGrammar=Bracketed( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Sequence( Ref("ColumnReferenceSegment"), Bracketed(Ref("NumericLiteralSegment")), ), Bracketed(Ref("ExpressionSegment")), ), OneOf("ASC", "DESC", optional=True), ), ), ), ) mysql_dialect.insert_lexer_matchers( [ RegexLexer( "at_sign", r"@@?[a-zA-Z0-9_$]*(\.[a-zA-Z0-9_$]+)?", CodeSegment, segment_kwargs={"type": "at_sign_literal", "trim_chars": ("@",)}, ), ], before="word", ) mysql_dialect.insert_lexer_matchers( [ StringLexer("double_ampersand", "&&", CodeSegment), ], before="ampersand", ) mysql_dialect.insert_lexer_matchers( [ StringLexer("double_vertical_bar", "||", CodeSegment), ], before="vertical_bar", ) mysql_dialect.insert_lexer_matchers( [ StringLexer("walrus_operator", ":=", CodeSegment), ], before="equals", ) mysql_dialect.insert_lexer_matchers( [ StringLexer("inline_path_operator", "->>", CodeSegment), StringLexer("column_path_operator", "->", CodeSegment), ], before="greater_than", ) class RoleReferenceSegment(ansi.RoleReferenceSegment): """A reference to an account, role, or user. https://dev.mysql.com/doc/refman/8.0/en/account-names.html https://dev.mysql.com/doc/refman/8.0/en/role-names.html """ match_grammar: Matchable = OneOf( Sequence( OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("DoubleQuotedLiteralSegment"), ), Sequence( Ref("AtSignLiteralSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("DoubleQuotedLiteralSegment"), ), optional=True, allow_gaps=False, ), allow_gaps=True, ), "CURRENT_USER", ) class DeclareStatement(BaseSegment): """DECLARE statement. https://dev.mysql.com/doc/refman/8.0/en/declare-local-variable.html https://dev.mysql.com/doc/refman/8.0/en/declare-handler.html https://dev.mysql.com/doc/refman/8.0/en/declare-condition.html https://dev.mysql.com/doc/refman/8.0/en/declare-cursor.html """ type = "declare_statement" match_grammar = OneOf( Sequence( "DECLARE", Ref("NakedIdentifierSegment"), "CURSOR", "FOR", Ref("StatementSegment"), ), Sequence( "DECLARE", OneOf("CONTINUE", "EXIT", "UNDO"), "HANDLER", "FOR", OneOf( "SQLEXCEPTION", "SQLWARNING", Sequence("NOT", "FOUND"), Sequence( "SQLSTATE", Ref.keyword("VALUE", optional=True), Ref("QuotedLiteralSegment"), ), OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("NakedIdentifierSegment"), ), ), Sequence(Ref("StatementSegment")), ), Sequence( "DECLARE", Ref("NakedIdentifierSegment"), "CONDITION", "FOR", OneOf(Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment")), ), Sequence( "DECLARE", Ref("LocalVariableNameSegment"), Ref("DatatypeSegment"), Sequence( Ref.keyword("DEFAULT"), OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("FunctionSegment"), ), optional=True, ), ), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("DelimiterStatement"), Ref("CreateProcedureStatementSegment"), Ref("DeclareStatement"), Ref("SetAssignmentStatementSegment"), Ref("IfExpressionStatement"), Ref("WhileStatementSegment"), Ref("IterateStatementSegment"), Ref("RepeatStatementSegment"), Ref("LoopStatementSegment"), Ref("CallStoredProcedureSegment"), Ref("PrepareSegment"), Ref("ExecuteSegment"), Ref("DeallocateSegment"), Ref("GetDiagnosticsSegment"), Ref("ResignalSegment"), Ref("CursorOpenCloseSegment"), Ref("CursorFetchSegment"), Ref("DropProcedureStatementSegment"), Ref("AlterTableStatementSegment"), Ref("AlterViewStatementSegment"), Ref("CreateViewStatementSegment"), Ref("RenameTableStatementSegment"), Ref("ResetMasterStatementSegment"), Ref("PurgeBinaryLogsStatementSegment"), Ref("HelpStatementSegment"), Ref("CheckTableStatementSegment"), Ref("ChecksumTableStatementSegment"), Ref("AnalyzeTableStatementSegment"), Ref("RepairTableStatementSegment"), Ref("OptimizeTableStatementSegment"), Ref("UpsertClauseListSegment"), Ref("InsertRowAliasSegment"), Ref("FlushStatementSegment"), Ref("LoadDataSegment"), Ref("ReplaceSegment"), Ref("AlterDatabaseStatementSegment"), Ref("ReturnStatementSegment"), ], remove=[ # handle CREATE SCHEMA in CreateDatabaseStatementSegment Ref("CreateSchemaStatementSegment"), ], ) class DelimiterStatement(BaseSegment): """DELIMITER statement.""" type = "delimiter_statement" match_grammar = Ref.keyword("DELIMITER") class CreateProcedureStatementSegment(BaseSegment): """A `CREATE PROCEDURE` statement. https://dev.mysql.com/doc/refman/8.0/en/create-procedure.html """ type = "create_procedure_statement" match_grammar = Sequence( "CREATE", Ref("DefinerSegment", optional=True), "PROCEDURE", Ref("FunctionNameSegment"), Ref("ProcedureParameterListGrammar", optional=True), Ref("CommentClauseSegment", optional=True), Ref("CharacteristicStatement", optional=True), Ref("FunctionDefinitionGrammar"), ) class FunctionDefinitionGrammar(BaseSegment): """This is the body of a `CREATE FUNCTION` statement.""" type = "function_definition" match_grammar = Ref("TransactionStatementSegment") class CharacteristicStatement(BaseSegment): """A Characteristics statement for functions/procedures.""" type = "characteristic_statement" match_grammar = Sequence( OneOf("DETERMINISTIC", Sequence("NOT", "DETERMINISTIC")), Sequence("LANGUAGE", "SQL", optional=True), OneOf( Sequence("CONTAINS", "SQL", optional=True), Sequence("NO", "SQL", optional=True), Sequence("READS", "SQL", "DATA", optional=True), Sequence("MODIFIES", "SQL", "DATA", optional=True), optional=True, ), Sequence("SQL", "SECURITY", OneOf("DEFINER", "INVOKER"), optional=True), ) class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement. https://dev.mysql.com/doc/refman/8.0/en/create-procedure.html """ type = "create_function_statement" match_grammar = Sequence( "CREATE", Ref("DefinerSegment", optional=True), "FUNCTION", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), Sequence( "RETURNS", Ref("DatatypeSegment"), ), Ref("CommentClauseSegment", optional=True), Ref("CharacteristicStatement"), Ref("FunctionDefinitionGrammar"), ) class AlterTableStatementSegment(BaseSegment): """An `ALTER TABLE .. ALTER COLUMN` statement. Overriding ANSI to add `CHANGE COLUMN` and `DROP COLUMN` support. https://dev.mysql.com/doc/refman/8.0/en/alter-table.html https://mariadb.com/kb/en/alter-table/ """ type = "alter_table_statement" match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Delimited( OneOf( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")), ), # Add column Sequence( "ADD", Ref.keyword("COLUMN", optional=True), Ref("IfNotExistsGrammar", optional=True), Ref("ColumnDefinitionSegment"), OneOf( Sequence( OneOf("FIRST", "AFTER"), Ref("ColumnReferenceSegment") ), # Bracketed Version of the same Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), Sequence( "MODIFY", Ref.keyword("COLUMN", optional=True), Ref("ColumnDefinitionSegment"), OneOf( Sequence( OneOf("FIRST", "AFTER"), Ref("ColumnReferenceSegment") ), # Bracketed Version of the same Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), # Add constraint Sequence( "ADD", Ref("TableConstraintSegment"), ), # Change column Sequence( "CHANGE", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), Ref("ColumnDefinitionSegment"), OneOf( Sequence( OneOf( "FIRST", Sequence("AFTER", Ref("ColumnReferenceSegment")), ), ), optional=True, ), ), # Drop column Sequence( "DROP", OneOf( Sequence( Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), ), Sequence( OneOf("INDEX", "KEY", optional=True), Ref("IndexReferenceSegment"), ), Ref("PrimaryKeyGrammar"), Sequence( Ref("ForeignKeyGrammar"), Ref("ObjectReferenceSegment"), ), Sequence( OneOf("CHECK", "CONSTRAINT"), Ref("ObjectReferenceSegment"), ), ), ), # Alter constraint Sequence( "ALTER", OneOf("CHECK", "CONSTRAINT"), Ref("ObjectReferenceSegment"), OneOf( "ENFORCED", Sequence("NOT", "ENFORCED"), ), ), # Alter index Sequence( "ALTER", "INDEX", Ref("IndexReferenceSegment"), OneOf("VISIBLE", "INVISIBLE"), ), # Rename Sequence( "RENAME", OneOf( # Rename table Sequence( OneOf("AS", "TO", optional=True), Ref("TableReferenceSegment"), ), # Rename index Sequence( OneOf("INDEX", "KEY"), Ref("IndexReferenceSegment"), "TO", Ref("IndexReferenceSegment"), ), # Rename column Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), ), ), # Enable/Disable updating nonunique indexes Sequence( OneOf("DISABLE", "ENABLE"), "KEYS", ), ), ), ) class WithCheckOptionSegment(BaseSegment): """WITH [CASCADED | LOCAL] CHECK OPTION for CREATE/ALTER View Syntax. As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-view.html """ type = "with_check_options" match_grammar: Matchable = Sequence( "WITH", OneOf("CASCADED", "LOCAL", optional=True), "CHECK", "OPTION", ) class AlterViewStatementSegment(BaseSegment): """An `ALTER VIEW .. AS ..` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-view.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", Sequence( "ALGORITHM", Ref("EqualsSegment"), OneOf("UNDEFINED", "MERGE", "TEMPTABLE"), optional=True, ), Ref("DefinerSegment", optional=True), Sequence("SQL", "SECURITY", OneOf("DEFINER", "INVOKER"), optional=True), "VIEW", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed( OneOf( Ref("SelectStatementSegment"), Ref("SetExpressionSegment"), ) ), Ref("WithCheckOptionSegment", optional=True), ) class CreateViewStatementSegment(BaseSegment): """An `CREATE VIEW .. AS ..` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/create-view.html """ type = "create_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence( "ALGORITHM", Ref("EqualsSegment"), OneOf("UNDEFINED", "MERGE", "TEMPTABLE"), optional=True, ), Ref("DefinerSegment", optional=True), Sequence("SQL", "SECURITY", OneOf("DEFINER", "INVOKER"), optional=True), "VIEW", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed( OneOf( Ref("SelectStatementSegment"), Ref("SetExpressionSegment"), ) ), Ref("WithCheckOptionSegment", optional=True), ) class ProcedureParameterListGrammar(BaseSegment): """The parameters for a procedure ie. `(in/out/inout name datatype)`.""" type = "procedure_parameter_list" match_grammar = Bracketed( Delimited( Ref("ProcedureParameterGrammar"), optional=True, ), ) class SetAssignmentStatementSegment(BaseSegment): """A `SET` statement. https://dev.mysql.com/doc/refman/8.0/en/set-variable.html """ type = "set_statement" match_grammar = Sequence( "SET", Delimited( Sequence( OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment") ), OneOf( Ref("EqualsSegment"), Ref("WalrusOperatorSegment"), ), AnyNumberOf( Ref("QuotedLiteralSegment"), Ref("DoubleQuotedLiteralSegment"), Ref("SessionVariableNameSegment"), # Match boolean keywords before local variables. Ref("BooleanDynamicSystemVariablesGrammar"), Ref("LocalVariableNameSegment"), Ref("FunctionSegment"), Ref("ArithmeticBinaryOperatorGrammar"), Ref("ExpressionSegment"), ), ), ), ) class TransactionStatementSegment(BaseSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement. https://dev.mysql.com/doc/refman/8.0/en/commit.html https://dev.mysql.com/doc/refman/8.0/en/begin-end.html """ type = "transaction_statement" match_grammar = OneOf( Sequence("START", "TRANSACTION"), Sequence( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), optional=True ), Sequence( "BEGIN", Ref.keyword("WORK", optional=True), Ref("StatementSegment"), ), ), Sequence( "LEAVE", Ref("SingleIdentifierGrammar", optional=True), ), Sequence( "COMMIT", Ref.keyword("WORK", optional=True), Sequence("AND", Ref.keyword("NO", optional=True), "CHAIN", optional=True), ), Sequence( "ROLLBACK", Ref.keyword("WORK", optional=True), ), Sequence( "END", Ref("SingleIdentifierGrammar", optional=True), ), ) class IfExpressionStatement(BaseSegment): """IF-THEN-ELSE-ELSEIF-END IF statement. https://dev.mysql.com/doc/refman/8.0/en/if.html """ type = "if_then_statement" match_grammar = AnyNumberOf( Sequence( "IF", Ref("ExpressionSegment"), "THEN", Ref("StatementSegment"), ), Sequence( "ELSEIF", Ref("ExpressionSegment"), "THEN", Ref("StatementSegment"), ), Sequence("ELSE", Ref("StatementSegment"), optional=True), Sequence("END", "IF"), ) class DefinerSegment(BaseSegment): """This is the body of a `CREATE FUNCTION` and `CREATE TRIGGER` statements.""" type = "definer_segment" match_grammar = Sequence( "DEFINER", Ref("EqualsSegment"), Ref("RoleReferenceSegment"), ) class SelectClauseModifierSegment(BaseSegment): """Things that come after SELECT but before the columns.""" type = "select_clause_modifier" match_grammar = Sequence( OneOf("DISTINCT", "ALL", "DISTINCTROW", optional=True), Ref.keyword("HIGH_PRIORITY", optional=True), Ref.keyword("STRAIGHT_JOIN", optional=True), Ref.keyword("SQL_SMALL_RESULT", optional=True), Ref.keyword("SQL_BIG_RESULT", optional=True), Ref.keyword("SQL_BUFFER_RESULT", optional=True), Ref.keyword("SQL_CACHE", optional=True), Ref.keyword("SQL_NO_CACHE", optional=True), Ref.keyword("SQL_CALC_FOUND_ROWS", optional=True), optional=True, ) class IntoClauseSegment(BaseSegment): """This is an `INTO` clause for assigning variables in a select statement. https://dev.mysql.com/doc/refman/5.7/en/load-data.html https://dev.mysql.com/doc/refman/5.7/en/select-into.html """ type = "into_clause" match_grammar = Sequence( "INTO", OneOf( Delimited( AnyNumberOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), ), Sequence("DUMPFILE", Ref("QuotedLiteralSegment")), Sequence( "OUTFILE", Ref("QuotedLiteralSegment"), Sequence( "CHARACTER", "SET", Ref("NakedIdentifierSegment"), optional=True ), Sequence( OneOf("FIELDS", "COLUMNS"), Sequence( "TERMINATED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( Ref.keyword("OPTIONALLY", optional=True), "ENCLOSED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True ), optional=True, ), Sequence( "LINES", Sequence( "STARTING", "BY", Ref("QuotedLiteralSegment"), optional=True ), Sequence( "TERMINATED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), optional=True, ), ), ), ), parse_mode=ParseMode.GREEDY_ONCE_STARTED, terminators=[Ref("SelectClauseTerminatorGrammar")], ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A `SELECT` statement without any ORDER clauses or later. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ type = "select_statement" match_grammar = ( ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("IntoClauseSegment", optional=True)], before=Ref("FromClauseSegment", optional=True), ) .copy(insert=[Ref("ForClauseSegment", optional=True)]) .copy( insert=[Ref("IndexHintClauseSegment", optional=True)], before=Ref("WhereClauseSegment", optional=True), ) .copy( insert=[Ref("SelectPartitionClauseSegment", optional=True)], before=Ref("WhereClauseSegment", optional=True), terminators=[ Ref("IntoClauseSegment"), Ref("ForClauseSegment"), Ref("IndexHintClauseSegment"), Ref("SelectPartitionClauseSegment"), Ref("UpsertClauseListSegment"), ], ) ) class SelectClauseSegment(ansi.SelectClauseSegment): """A group of elements in a select target statement.""" match_grammar = ansi.SelectClauseSegment.match_grammar.copy( terminators=[Ref("IntoKeywordSegment")], ) class SelectStatementSegment(ansi.SelectStatementSegment): """A `SELECT` statement. https://dev.mysql.com/doc/refman/5.7/en/select.html """ # Inherit most of the parse grammar from the original. match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), Ref("IntoClauseSegment", optional=True), ], terminators=[ Ref("SetOperatorSegment"), Ref("UpsertClauseListSegment"), Ref("WithCheckOptionSegment"), ], # Overwrite the terminators, because we want to remove some from the # expression above. replace_terminators=True, ) class ForClauseSegment(BaseSegment): """This is the body of a `FOR` clause.""" type = "for_clause" match_grammar = OneOf( Sequence( Sequence( "FOR", OneOf("UPDATE", "SHARE"), ), Sequence("OF", Delimited(Ref("NakedIdentifierSegment")), optional=True), OneOf("NOWAIT", Sequence("SKIP", "LOCKED"), optional=True), ), Sequence("LOCK", "IN", "SHARE", "MODE"), optional=True, ) class IndexHintClauseSegment(BaseSegment): """This is the body of an index hint clause.""" type = "index_hint_clause" match_grammar = Sequence( OneOf("USE", "IGNORE", "FORCE"), OneOf("INDEX", "KEY"), Sequence( "FOR", OneOf( "JOIN", Sequence("ORDER", "BY"), Sequence("GROUP", "BY"), optional=True ), optional=True, ), Bracketed(Ref("ObjectReferenceSegment")), Ref("JoinOnConditionSegment", optional=True), ) class CallStoredProcedureSegment(BaseSegment): """This is a CALL statement used to execute a stored procedure. https://dev.mysql.com/doc/refman/8.0/en/call.html """ type = "call_segment" match_grammar = Sequence( "CALL", OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedIdentifierSegment"), ), Bracketed( AnyNumberOf( Delimited( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("DoubleQuotedLiteralSegment"), Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), Ref("FunctionSegment"), ), ), ), ) class SelectPartitionClauseSegment(BaseSegment): """This is the body of a partition clause.""" type = "partition_clause" match_grammar = Sequence( "PARTITION", Bracketed(Delimited(Ref("ObjectReferenceSegment"))), ) class WhileStatementSegment(BaseSegment): """A `WHILE-DO-END WHILE` statement. https://dev.mysql.com/doc/refman/8.0/en/while.html """ type = "while_statement" match_grammar = OneOf( Sequence( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), optional=True ), Sequence( "WHILE", Ref("ExpressionSegment"), "DO", AnyNumberOf( Ref("StatementSegment"), ), ), ), Sequence( "END", "WHILE", Ref("SingleIdentifierGrammar", optional=True), ), ) class PrepareSegment(BaseSegment): """This is the body of a `PREPARE` statement. https://dev.mysql.com/doc/refman/8.0/en/prepare.html """ type = "prepare_segment" match_grammar = Sequence( "PREPARE", Ref("NakedIdentifierSegment"), "FROM", OneOf( Ref("QuotedLiteralSegment"), Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), ), ) class GetDiagnosticsSegment(BaseSegment): """This is the body of a `GET DIAGNOSTICS` statement. https://dev.mysql.com/doc/refman/8.0/en/get-diagnostics.html """ type = "get_diagnostics_segment" match_grammar = Sequence( "GET", Sequence("CURRENT", "STACKED", optional=True), "DIAGNOSTICS", Delimited( Sequence( OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment") ), Ref("EqualsSegment"), OneOf("NUMBER", "ROW_COUNT"), ), optional=True, ), "CONDITION", OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), Ref("NumericLiteralSegment"), ), Delimited( Sequence( OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment") ), Ref("EqualsSegment"), OneOf( "CLASS_ORIGIN", "SUBCLASS_ORIGIN", "RETURNED_SQLSTATE", "MESSAGE_TEXT", "MYSQL_ERRNO", "CONSTRAINT_CATALOG", "CONSTRAINT_SCHEMA", "CONSTRAINT_NAME", "CATALOG_NAME", "SCHEMA_NAME", "TABLE_NAME", "COLUMN_NAME", "CURSOR_NAME", ), ), optional=True, ), ) class LoopStatementSegment(BaseSegment): """A `LOOP` statement. https://dev.mysql.com/doc/refman/8.0/en/loop.html """ type = "loop_statement" match_grammar = OneOf( Sequence( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), optional=True ), "LOOP", Delimited( Ref("StatementSegment"), ), ), Sequence( "END", "LOOP", Ref("SingleIdentifierGrammar", optional=True), ), ) class CursorOpenCloseSegment(BaseSegment): """This is a CLOSE or Open statement. https://dev.mysql.com/doc/refman/8.0/en/close.html https://dev.mysql.com/doc/refman/8.0/en/open.html """ type = "cursor_open_close_segment" match_grammar = Sequence( OneOf("CLOSE", "OPEN"), OneOf( Ref("SingleIdentifierGrammar"), Ref("QuotedIdentifierSegment"), ), ) class IterateStatementSegment(BaseSegment): """A `ITERATE` statement. https://dev.mysql.com/doc/refman/8.0/en/iterate.html """ type = "iterate_statement" match_grammar = Sequence( "ITERATE", Ref("SingleIdentifierGrammar"), ) class ExecuteSegment(BaseSegment): """This is the body of a `EXECUTE` statement. https://dev.mysql.com/doc/refman/8.0/en/execute.html """ type = "execute_segment" match_grammar = Sequence( "EXECUTE", Ref("NakedIdentifierSegment"), Sequence("USING", Delimited(Ref("SessionVariableNameSegment")), optional=True), ) class RepeatStatementSegment(BaseSegment): """A `REPEAT-UNTIL` statement. https://dev.mysql.com/doc/refman/8.0/en/repeat.html """ type = "repeat_statement" match_grammar = OneOf( Sequence( Sequence( Ref("SingleIdentifierGrammar"), Ref("ColonSegment"), optional=True ), "REPEAT", AnyNumberOf( Ref("StatementSegment"), ), ), Sequence( "UNTIL", Ref("ExpressionSegment"), Sequence( "END", "REPEAT", Ref("SingleIdentifierGrammar", optional=True), ), ), ) class DeallocateSegment(BaseSegment): """This is the body of a `DEALLOCATE/DROP` statement. https://dev.mysql.com/doc/refman/8.0/en/deallocate-prepare.html """ type = "deallocate_segment" match_grammar = Sequence( Sequence(OneOf("DEALLOCATE", "DROP"), "PREPARE"), Ref("NakedIdentifierSegment"), ) class ResignalSegment(BaseSegment): """This is the body of a `RESIGNAL` statement. https://dev.mysql.com/doc/refman/8.0/en/resignal.html """ type = "resignal_segment" match_grammar = Sequence( OneOf("SIGNAL", "RESIGNAL"), OneOf( Sequence( "SQLSTATE", Ref.keyword("VALUE", optional=True), Ref("QuotedLiteralSegment"), ), Ref("NakedIdentifierSegment"), optional=True, ), Sequence( "SET", Delimited( Sequence( OneOf( "CLASS_ORIGIN", "SUBCLASS_ORIGIN", "RETURNED_SQLSTATE", "MESSAGE_TEXT", "MYSQL_ERRNO", "CONSTRAINT_CATALOG", "CONSTRAINT_SCHEMA", "CONSTRAINT_NAME", "CATALOG_NAME", "SCHEMA_NAME", "TABLE_NAME", "COLUMN_NAME", "CURSOR_NAME", ), Ref("EqualsSegment"), OneOf( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), Ref("QuotedLiteralSegment"), ), ), ), optional=True, ), ) class CursorFetchSegment(BaseSegment): """This is a FETCH statement. https://dev.mysql.com/doc/refman/8.0/en/fetch.html """ type = "cursor_fetch_segment" match_grammar = Sequence( "FETCH", Sequence(Ref.keyword("NEXT", optional=True), "FROM", optional=True), Ref("NakedIdentifierSegment"), "INTO", Delimited( Ref("SessionVariableNameSegment"), Ref("LocalVariableNameSegment"), ), ) class DropIndexStatementSegment(ansi.DropIndexStatementSegment): """A `DROP INDEX` statement. https://dev.mysql.com/doc/refman/8.0/en/drop-index.html """ # DROP INDEX ON # [ALGORITHM [=] {DEFAULT | INPLACE | COPY} | LOCK [=] {DEFAULT | NONE | SHARED | # EXCLUSIVE}] match_grammar = Sequence( "DROP", "INDEX", Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), OneOf( Sequence( "ALGORITHM", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", "INPLACE", "COPY"), ), Sequence( "LOCK", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", "NONE", "SHARED", "EXCLUSIVE"), ), optional=True, ), ) class DropProcedureStatementSegment(BaseSegment): """A `DROP` statement that addresses stored procedures and functions. https://dev.mysql.com/doc/refman/8.0/en/drop-procedure.html """ type = "drop_procedure_statement" # DROP {PROCEDURE | FUNCTION} [IF EXISTS] sp_name match_grammar = Sequence( "DROP", OneOf("PROCEDURE", "FUNCTION"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP` statement that addresses loadable functions. https://dev.mysql.com/doc/refman/8.0/en/drop-function-loadable.html """ type = "drop_function_statement" # DROP FUNCTION [IF EXISTS] function_name match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), ) class RenameTableStatementSegment(BaseSegment): """A `RENAME TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/rename-table.html """ type = "rename_table_statement" match_grammar = Sequence( "RENAME", "TABLE", Delimited( Sequence( Ref("TableReferenceSegment"), "TO", Ref("TableReferenceSegment"), ), ), ) class ResetMasterStatementSegment(BaseSegment): """A `RESET MASTER` statement. https://dev.mysql.com/doc/refman/8.0/en/reset-master.html """ type = "reset_master_statement" match_grammar = Sequence( "RESET", "MASTER", Sequence("TO", Ref("NumericLiteralSegment"), optional=True), ) class PurgeBinaryLogsStatementSegment(BaseSegment): """A `PURGE BINARY LOGS` statement. https://dev.mysql.com/doc/refman/8.0/en/purge-binary-logs.html """ type = "purge_binary_logs_statement" match_grammar = Sequence( "PURGE", OneOf( "BINARY", "MASTER", ), "LOGS", OneOf( Sequence( "TO", Ref("QuotedLiteralSegment"), ), Sequence( "BEFORE", OneOf( Ref("ExpressionSegment"), ), ), ), ) class HelpStatementSegment(BaseSegment): """A `HELP` statement. https://dev.mysql.com/doc/refman/8.0/en/help.html """ type = "help_statement" match_grammar = Sequence( "HELP", Ref("QuotedLiteralSegment"), ) class CheckTableStatementSegment(BaseSegment): """A `CHECK TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/check-table.html """ type = "check_table_statement" match_grammar = Sequence( "CHECK", "TABLE", Delimited( Ref("TableReferenceSegment"), ), AnyNumberOf( Sequence("FOR", "UPGRADE"), "QUICK", "FAST", "MEDIUM", "EXTENDED", "CHANGED", min_times=1, ), ) class ChecksumTableStatementSegment(BaseSegment): """A `CHECKSUM TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/checksum-table.html """ type = "checksum_table_statement" match_grammar = Sequence( "CHECKSUM", "TABLE", Delimited( Ref("TableReferenceSegment"), ), OneOf( "QUICK", "EXTENDED", ), ) class AnalyzeTableStatementSegment(BaseSegment): """An `ANALYZE TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/analyze-table.html """ type = "analyze_table_statement" match_grammar = Sequence( "ANALYZE", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), "TABLE", OneOf( Sequence( Delimited( Ref("TableReferenceSegment"), ), ), Sequence( Ref("TableReferenceSegment"), "UPDATE", "HISTOGRAM", "ON", Delimited( Ref("ColumnReferenceSegment"), ), Sequence( "WITH", Ref("NumericLiteralSegment"), "BUCKETS", optional=True, ), ), Sequence( Ref("TableReferenceSegment"), "DROP", "HISTOGRAM", "ON", Delimited( Ref("ColumnReferenceSegment"), ), ), ), ) class RepairTableStatementSegment(BaseSegment): """A `REPAIR TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/repair-table.html """ type = "repair_table_statement" match_grammar = Sequence( "REPAIR", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), "TABLE", Delimited( Ref("TableReferenceSegment"), ), AnyNumberOf( "QUICK", "EXTENDED", "USE_FRM", ), ) class OptimizeTableStatementSegment(BaseSegment): """An `OPTIMIZE TABLE` statement. https://dev.mysql.com/doc/refman/8.0/en/optimize-table.html """ type = "optimize_table_statement" match_grammar = Sequence( "OPTIMIZE", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), "TABLE", Delimited( Ref("TableReferenceSegment"), ), ) class UpdateStatementSegment(BaseSegment): """An `Update` statement. As per https://dev.mysql.com/doc/refman/8.0/en/update.html """ type = "update_statement" match_grammar: Matchable = Sequence( "UPDATE", Ref.keyword("LOW_PRIORITY", optional=True), Ref.keyword("IGNORE", optional=True), Delimited(Ref("TableReferenceSegment"), Ref("FromExpressionSegment")), Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class FlushStatementSegment(BaseSegment): """A `Flush` statement. As per https://dev.mysql.com/doc/refman/8.0/en/flush.html """ type = "flush_statement" match_grammar: Matchable = Sequence( "FLUSH", OneOf( "NO_WRITE_TO_BINLOG", "LOCAL", optional=True, ), OneOf( Delimited( Sequence("BINARY", "LOGS"), Sequence("ENGINE", "LOGS"), Sequence("ERROR", "LOGS"), Sequence("GENERAL", "LOGS"), "HOSTS", "LOGS", "PRIVILEGES", "OPTIMIZER_COSTS", Sequence( "RELAY", "LOGS", Sequence( "FOR", "CHANNEL", Ref("ObjectReferenceSegment"), optional=True ), ), Sequence("SLOW", "LOGS"), "STATUS", "USER_RESOURCES", ), Sequence( "TABLES", Sequence( Delimited(Ref("TableReferenceSegment"), terminators=["WITH"]), optional=True, ), Sequence("WITH", "READ", "LOCK", optional=True), ), Sequence( "TABLES", Sequence( Delimited(Ref("TableReferenceSegment"), terminators=["FOR"]), optional=False, ), Sequence("FOR", "EXPORT", optional=True), ), ), ) class LoadDataSegment(BaseSegment): """A `LOAD DATA` statement. As per https://dev.mysql.com/doc/refman/8.0/en/load-data.html """ type = "load_data_statement" match_grammar = Sequence( "LOAD", "DATA", OneOf("LOW_PRIORITY", "CONCURRENT", optional=True), Sequence("LOCAL", optional=True), "INFILE", Ref("QuotedLiteralSegment"), OneOf("REPLACE", "IGNORE", optional=True), "INTO", "TABLE", Ref("TableReferenceSegment"), Ref("SelectPartitionClauseSegment", optional=True), Sequence("CHARACTER", "SET", Ref("NakedIdentifierSegment"), optional=True), Sequence( OneOf("FIELDS", "COLUMNS"), Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment"), optional=True), Sequence( Sequence("OPTIONALLY", optional=True), "ENCLOSED", "BY", Ref("QuotedLiteralSegment"), optional=True, ), Sequence("ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True), optional=True, ), Sequence( "LINES", Sequence("STARTING", "BY", Ref("QuotedLiteralSegment"), optional=True), Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment"), optional=True), optional=True, ), Sequence( "IGNORE", Ref("NumericLiteralSegment"), OneOf("LINES", "ROWS"), optional=True, ), Sequence( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), Sequence( "SET", Ref("Expression_B_Grammar"), optional=True, ), ) class ReplaceSegment(BaseSegment): """A `REPLACE` statement. As per https://dev.mysql.com/doc/refman/8.0/en/replace.html """ type = "replace_statement" match_grammar = Sequence( "REPLACE", OneOf("LOW_PRIORITY", "DELAYED", optional=True), Sequence("INTO", optional=True), Ref("TableReferenceSegment"), Ref("SelectPartitionClauseSegment", optional=True), OneOf( Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("ValuesClauseSegment"), ), Ref("SetClauseListSegment"), Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("SelectableGrammar"), Sequence( "TABLE", Ref("TableReferenceSegment"), ), ), ), ), ) class CreateTriggerStatementSegment(ansi.CreateTriggerStatementSegment): """Create Trigger Statement. As Specified in https://dev.mysql.com/doc/refman/8.0/en/create-trigger.html """ # "DEFINED = user", optional match_grammar = Sequence( "CREATE", Ref("DefinerSegment", optional=True), "TRIGGER", Ref("IfNotExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), OneOf("BEFORE", "AFTER"), OneOf("INSERT", "UPDATE", "DELETE"), "ON", Ref("TableReferenceSegment"), Sequence("FOR", "EACH", "ROW"), Sequence( OneOf("FOLLOWS", "PRECEDES"), Ref("SingleIdentifierGrammar"), optional=True ), OneOf( Ref("StatementSegment"), Sequence("BEGIN", Ref("StatementSegment"), "END"), ), ) class DropTriggerStatementSegment(ansi.DropTriggerStatementSegment): """A `DROP TRIGGER` Statement. As per https://dev.mysql.com/doc/refman/8.0/en/drop-trigger.html """ match_grammar = Sequence( "DROP", "TRIGGER", Ref("IfExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), ) class ColumnReferenceSegment(ansi.ColumnReferenceSegment): """A reference to column, field or alias. Also allows `column->path` and `column->>path` for JSON values. https://dev.mysql.com/doc/refman/8.0/en/json-search-functions.html#operator_json-column-path """ match_grammar = ansi.ColumnReferenceSegment.match_grammar.copy( insert=[ Sequence( ansi.ColumnReferenceSegment.match_grammar.copy(), OneOf( Ref("ColumnPathOperatorSegment"), Ref("InlinePathOperatorSegment"), ), OneOf( Ref("DoubleQuotedJSONPath"), Ref("SingleQuotedJSONPath"), ), ), ] ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/create-database.html """ match_grammar: Matchable = Sequence( "CREATE", OneOf("DATABASE", "SCHEMA"), Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), AnyNumberOf(Ref("CreateOptionSegment")), ) class CreateOptionSegment(BaseSegment): """A database characteristic. As specified in https://dev.mysql.com/doc/refman/8.0/en/create-database.html """ type = "create_option_segment" match_grammar = Sequence( Ref.keyword("DEFAULT", optional=True), OneOf( Sequence( "CHARACTER", "SET", Ref("EqualsSegment", optional=True), Ref("NakedIdentifierSegment"), ), Sequence( "COLLATE", Ref("EqualsSegment", optional=True), Ref("CollationReferenceSegment"), ), Sequence( "ENCRYPTION", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), ), ) class AlterDatabaseStatementSegment(BaseSegment): """A `ALTER DATABASE` statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-database.html """ type = "alter_database_statement" match_grammar: Matchable = Sequence( "ALTER", OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment", optional=True), AnyNumberOf(Ref("AlterOptionSegment")), ) class AlterOptionSegment(BaseSegment): """A database characteristic. As specified in https://dev.mysql.com/doc/refman/8.0/en/alter-database.html """ type = "alter_option_segment" match_grammar = Sequence( OneOf( Sequence( Ref.keyword("DEFAULT", optional=True), "CHARACTER", "SET", Ref("EqualsSegment", optional=True), Ref("NakedIdentifierSegment"), ), Sequence( Ref.keyword("DEFAULT", optional=True), "COLLATE", Ref("EqualsSegment", optional=True), Ref("CollationReferenceSegment"), ), Sequence( Ref.keyword("DEFAULT", optional=True), "ENCRYPTION", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "READ", "ONLY", Ref("EqualsSegment", optional=True), OneOf("DEFAULT", Ref("NumericLiteralSegment")), ), ), ) class ReturnStatementSegment(BaseSegment): """A RETURN statement. As specified in https://dev.mysql.com/doc/refman/8.0/en/return.html """ type = "return_statement" match_grammar = Sequence( "RETURN", Ref("ExpressionSegment"), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_mysql_keywords.py000066400000000000000000000166401451700765000247130ustar00rootroot00000000000000"""A List of MySQL SQL keywords. https://dev.mysql.com/doc/refman/8.0/en/keywords.html """ mysql_reserved_keywords = """ACCESSIBLE ADD ALL ALTER ANALYZE AND AS ASC ASENSITIVE BEFORE BETWEEN BIGINT BINARY BLOB BOTH BY CALL CASCADE CASE CHANGE CHAR CHARACTER CHECK COLLATE COLUMN CONDITION CONSTRAINT CONTINUE CONVERT CREATE CROSS CUME_DIST CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURSOR DATABASE DATABASES DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAY_SECOND DEC DECIMAL DECLARE DEFAULT DELAYED DELETE DENSE_RANK DESC DESCRIBE DETERMINISTIC DISTINCT DISTINCTROW DIV DOUBLE DROP DUAL EACH ELSE ELSEIF EMPTY ENCLOSED ESCAPED EXCEPT EXISTS EXIT EXPLAIN FALSE FETCH FIRST_VALUE FLOAT FLOAT4 FLOAT8 FOR FORCE FOREIGN FROM FULLTEXT GENERATED GET GRANT GROUP GROUPING GROUPS HAVING HIGH_PRIORITY HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND IF IGNORE IN INDEX INFILE INNER INOUT INSENSITIVE INSERT INT INT1 INT2 INT3 INT4 INT8 INTEGER INTERSECT INTERVAL INTO IO_AFTER_GTIDS IO_BEFORE_GTIDS IS ITERATE JOIN JSON_TABLE KEY KEYS KILL LAG LAST_VALUE LATERAL LEAD LEADING LEAVE LEFT LIKE LIMIT LINEAR LINES LOAD LOCALTIME LOCALTIMESTAMP LOCK LONG LONGBLOB LONGTEXT LOOP LOW_PRIORITY MASTER_BIND MASTER_SSL_VERIFY_SERVER_CERT MATCH MAXVALUE MEDIUMBLOB MEDIUMINT MEDIUMTEXT MIDDLEINT MINUTE_MICROSECOND MINUTE_SECOND MOD MODIFIES NATURAL NOT NO_WRITE_TO_BINLOG NTH_VALUE NTILE NULL NUMERIC OF ON OPTIMIZE OPTIMIZER_COSTS OPTION OPTIONALLY OR ORDER OUT OUTER OUTFILE OVER PARTITION PERCENT_RANK PRECISION PRIMARY PROCEDURE PURGE RANGE RANK READ READS READ_WRITE REAL RECURSIVE REFERENCES REGEXP RELEASE RENAME REPEAT REPLACE REQUIRE RESIGNAL RESTRICT RETURN REVOKE RIGHT RLIKE ROW_NUMBER SCHEMA SCHEMAS SECOND_MICROSECOND SELECT SENSITIVE SEPARATOR SET SHOW SIGNAL SMALLINT SPATIAL SPECIFIC SQL SQLEXCEPTION SQLSTATE SQLWARNING SQL_BIG_RESULT SQL_CALC_FOUND_ROWS SQL_SMALL_RESULT SSL STARTING STORED STRAIGHT_JOIN SYSTEM TABLE TERMINATED THEN TINYBLOB TINYINT TINYTEXT TO TRAILING TRIGGER TRUE UNDO UNION UNIQUE UNLOCK UNSIGNED UPDATE USAGE USE USING UTC_DATE UTC_TIME UTC_TIMESTAMP VALUES VARBINARY VARCHAR VARCHARACTER VARYING VIRTUAL WHEN WHERE WHILE WINDOW WITH WRITE XOR YEAR_MONTH ZEROFILL """ mysql_unreserved_keywords = """ACCOUNT ACTION ACTIVE ADMIN AFTER AGAINST AGGREGATE ALGORITHM ALWAYS ANALYSE ANY ARRAY ASCII AT ATTRIBUTE AUTHENTICATION AUTOEXTEND_SIZE AUTO_INCREMENT AVG AVG_ROW_LENGTH BACKUP BEGIN BINLOG BIT BLOCK BOOL BOOLEAN BTREE BUCKETS BYTE CACHE CASCADED CATALOG_NAME CHAIN CHALLENGE_RESPONSE CHANGED CHANNEL CHARSET CHECKSUM CIPHER CLASS_ORIGIN CLIENT CLONE CLOSE COALESCE CODE COLLATION COLUMNS COLUMN_FORMAT COLUMN_NAME COMMENT COMMIT COMMITTED COMPACT COMPLETION COMPONENT COMPRESSED COMPRESSION CONCURRENT CONNECTION CONSISTENT CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONTAINS CONTEXT CPU CUBE CUME_DIST CURRENT CURSOR_NAME DATA DATAFILE DATE DATETIME DAY DEALLOCATE DEFAULT_AUTH DEFINER DEFINITION DELAY_KEY_WRITE DENSE_RANK DESCRIPTION DES_KEY_FILE DIAGNOSTICS DIRECTORY DISABLE DISCARD DISK DO DUMPFILE DUPLICATE DYNAMIC EMPTY ENABLE ENCRYPTION END ENDS ENFORCED ENGINE ENGINES ENGINE_ATTRIBUTE ENUM ERROR ERRORS ESCAPE EVENT EVENTS EVERY EXCHANGE EXCLUDE EXECUTE EXPANSION EXPIRE EXPORT EXTENDED EXTENT_SIZE FACTOR FAILED_LOGIN_ATTEMPTS FAST FAULTS FIELDS FILE FILE_BLOCK_SIZE FILTER FINISH FIRST FIRST_VALUE FIXED FLUSH FOLLOWING FOLLOWS FORMAT FOUND FULL FUNCTION GENERAL GEOMCOLLECTION GEOMETRY GEOMETRYCOLLECTION GET_FORMAT GET_MASTER_PUBLIC_KEY GET_SOURCE_PUBLIC_KEY GLOBAL GRANTS GROUPING GROUPS GROUP_REPLICATION GTID_ONLY HANDLER HASH HELP HISTOGRAM HISTORY HOST HOSTS HOUR IDENTIFIED IGNORE_SERVER_IDS IMPORT INACTIVE INDEXES INITIAL INITIAL_SIZE INITIATE INSERT_METHOD INSTALL INSTANCE INTERSECT INVISIBLE INVOKER IO IO_THREAD IPC ISOLATION ISSUER JSON JSON_TABLE JSON_VALUE KEYRING KEY_BLOCK_SIZE LAG LANGUAGE LAST LAST_VALUE LATERAL LEAD LEAVES LESS LEVEL LINESTRING LIST LOCAL LOCKED LOCKS LOGFILE LOGS MASTER MASTER_AUTO_POSITION MASTER_COMPRESSION_ALGORITHMS MASTER_CONNECT_RETRY MASTER_DELAY MASTER_HEARTBEAT_PERIOD MASTER_HOST MASTER_LOG_FILE MASTER_LOG_POS MASTER_PASSWORD MASTER_PORT MASTER_PUBLIC_KEY_PATH MASTER_RETRY_COUNT MASTER_SERVER_ID MASTER_SSL MASTER_SSL_CA MASTER_SSL_CAPATH MASTER_SSL_CERT MASTER_SSL_CIPHER MASTER_SSL_CRL MASTER_SSL_CRLPATH MASTER_SSL_KEY MASTER_TLS_CIPHERSUITES MASTER_TLS_VERSION MASTER_USER MASTER_ZSTD_COMPRESSION_LEVEL MAX_CONNECTIONS_PER_HOUR MAX_QUERIES_PER_HOUR MAX_ROWS MAX_SIZE MAX_UPDATES_PER_HOUR MAX_USER_CONNECTIONS MEDIUM MEMBER MEMORY MERGE MESSAGE_TEXT MICROSECOND MIGRATE MINUTE MIN_ROWS MODE MODIFY MONTH MULTILINESTRING MULTIPOINT MULTIPOLYGON MUTEX MYSQL_ERRNO NAME NAMES NATIONAL NCHAR NDB NDBCLUSTER NESTED NETWORK_NAMESPACE NEVER NEW NEXT NO NODEGROUP NONE NOWAIT NO_WAIT NTH_VALUE NTILE NULLS NUMBER NVARCHAR OF OFF OFFSET OJ OLD ONE ONLY OPEN OPTIONAL OPTIONS ORDINALITY ORGANIZATION OTHERS OVER OWNER PACK_KEYS PAGE PARSER PARSE_GCOL_EXPR PARTIAL PARTITIONING PARTITIONS PASSWORD PASSWORD_LOCK_TIME PATH PERCENT_RANK PERSIST PERSIST_ONLY PHASE PLUGIN PLUGINS PLUGIN_DIR POINT POLYGON PORT PRECEDES PRECEDING PREPARE PRESERVE PREV PRIVILEGES PRIVILEGE_CHECKS_USER PROCESS PROCESSLIST PROFILE PROFILES PROXY QUARTER QUERY QUICK RANDOM RANK READ_ONLY REBUILD RECOVER RECURSIVE REDOFILE REDO_BUFFER_SIZE REDUNDANT REFERENCE REGISTRATION RELAY RELAYLOG RELAY_LOG_FILE RELAY_LOG_POS RELAY_THREAD RELOAD REMOTE REMOVE REORGANIZE REPAIR REPEATABLE REPLICA REPLICAS REPLICATE_DO_DB REPLICATE_DO_TABLE REPLICATE_IGNORE_DB REPLICATE_IGNORE_TABLE REPLICATE_REWRITE_DB REPLICATE_WILD_DO_TABLE REPLICATE_WILD_IGNORE_TABLE REPLICATION REQUIRE_ROW_FORMAT RESET RESOURCE RESPECT RESTART RESTORE RESUME RETAIN RETURNED_SQLSTATE RETURNING RETURNS REUSE REVERSE ROLE ROLLBACK ROLLUP ROTATE ROUTINE ROW ROWS ROW_COUNT ROW_FORMAT ROW_NUMBER RTREE SAVEPOINT SCHEDULE SCHEMA_NAME SECOND SECONDARY SECONDARY_ENGINE SECONDARY_ENGINE_ATTRIBUTE SECONDARY_LOAD SECONDARY_UNLOAD SECURITY SERIAL SERIALIZABLE SERVER SESSION SHARE SHUTDOWN SIGNED SIMPLE SKIP SLAVE SLOW SNAPSHOT SOCKET SOME SONAME SOUNDS SOURCE SOURCE_AUTO_POSITION SOURCE_BIND SOURCE_COMPRESSION_ALGORITHMS SOURCE_CONNECT_RETRY SOURCE_DELAY SOURCE_HEARTBEAT_PERIOD SOURCE_HOST SOURCE_LOG_FILE SOURCE_LOG_POS SOURCE_PASSWORD SOURCE_PORT SOURCE_PUBLIC_KEY_PATH SOURCE_RETRY_COUNT SOURCE_SSL SOURCE_SSL_CA SOURCE_SSL_CAPATH SOURCE_SSL_CERT SOURCE_SSL_CIPHER SOURCE_SSL_CRL SOURCE_SSL_CRLPATH SOURCE_SSL_KEY SOURCE_SSL_VERIFY_SERVER_CERT SOURCE_TLS_CIPHERSUITES SOURCE_TLS_VERSION SOURCE_USER SOURCE_ZSTD_COMPRESSION_LEVEL SQL_AFTER_GTIDS SQL_AFTER_MTS_GAPS SQL_BEFORE_GTIDS SQL_BUFFER_RESULT SQL_CACHE SQL_NO_CACHE SQL_THREAD SQL_TSI_DAY SQL_TSI_HOUR SQL_TSI_MINUTE SQL_TSI_MONTH SQL_TSI_QUARTER SQL_TSI_SECOND SQL_TSI_WEEK SQL_TSI_YEAR SRID STACKED START STARTS STATS_AUTO_RECALC STATS_PERSISTENT STATS_SAMPLE_PAGES STATUS STOP STORAGE STREAM STRING SUBCLASS_ORIGIN SUBJECT SUBPARTITION SUBPARTITIONS SUPER SUSPEND SWAPS SWITCHES SYSTEM TABLES TABLESPACE TABLE_CHECKSUM TABLE_NAME TEMPORARY TEMPTABLE TEXT THAN THREAD_PRIORITY TIES TIME TIMESTAMP TIMESTAMPADD TIMESTAMPDIFF TLS TRANSACTION TRIGGERS TRUNCATE TYPE TYPES UNBOUNDED UNCOMMITTED UNDEFINED UNDOFILE UNDO_BUFFER_SIZE UNICODE UNINSTALL UNKNOWN UNREGISTER UNTIL UPGRADE USER USER_RESOURCES USE_FRM VALIDATION VALUE VARIABLES VCPU VIEW VISIBLE WAIT WARNINGS WEEK WEIGHT_STRING WINDOW WITHOUT WORK WRAPPER X509 XA XID XML YEAR ZONE """ # These are not MySQL keywords, but SQLFluff needs them to parse well. mysql_unreserved_keywords += """NOW SHARED INPLACE NOCOPY INSTANT """ sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_oracle.py000066400000000000000000000661741451700765000230730ustar00rootroot00000000000000"""The Oracle dialect. This inherits from the ansi dialect. """ from typing import cast from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseFileSegment, BaseSegment, Bracketed, BracketedSegment, CodeSegment, CommentSegment, Delimited, IdentifierSegment, LiteralSegment, Matchable, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi ansi_dialect = load_raw_dialect("ansi") oracle_dialect = ansi_dialect.copy_as("oracle") oracle_dialect.sets("unreserved_keywords").difference_update(["COMMENT"]) oracle_dialect.sets("reserved_keywords").update( [ "COMMENT", "ON", "UPDATE", "INDEXTYPE", "PROMPT", "FORCE", "OVERFLOW", "ERROR", "PRIVATE", "DEFINITION", "CONNECT", "SIBLINGS", "START", "CONNECT_BY_ROOT", "PIVOT", "FOR", "UNPIVOT", ] ) oracle_dialect.sets("unreserved_keywords").update( ["EDITIONABLE", "EDITIONING", "NONEDITIONABLE", "KEEP"] ) oracle_dialect.sets("bare_functions").clear() oracle_dialect.sets("bare_functions").update( [ "current_date", "current_timestamp", "dbtimezone", "localtimestamp", "sessiontimestamp", "sysdate", "systimestamp", ] ) oracle_dialect.patch_lexer_matchers( [ RegexLexer("word", r"[a-zA-Z][0-9a-zA-Z_$#]*", WordSegment), RegexLexer( "single_quote", r"'([^'\\]|\\|\\.|'')*'", CodeSegment, ), ] ) oracle_dialect.insert_lexer_matchers( [ RegexLexer( "prompt_command", r"PROMPT([^(\r\n)])*((?=\n)|(?=\r\n))?", CommentSegment, ), StringLexer("at_sign", "@", CodeSegment), ], before="word", ) oracle_dialect.insert_lexer_matchers( # JSON Operators: https://www.postgresql.org/docs/9.5/functions-json.html [ StringLexer("right_arrow", "=>", CodeSegment), ], before="equals", ) oracle_dialect.add( AtSignSegment=StringParser("@", SymbolSegment, type="at_sign"), RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"), OnCommitGrammar=Sequence( "ON", "COMMIT", OneOf( Sequence(OneOf("DROP", "PRESERVE"), Ref.keyword("DEFINITION")), Sequence(OneOf("DELETE", "PRESERVE"), Ref.keyword("ROWS")), ), ), ConnectByRootGrammar=Sequence("CONNECT_BY_ROOT", Ref("NakedIdentifierSegment")), PlusJoinSegment=Bracketed( StringParser("+", SymbolSegment, type="plus_join_symbol") ), PlusJoinGrammar=OneOf( Sequence( OneOf(Ref("ColumnReferenceSegment"), Ref("FunctionSegment")), Ref("EqualsSegment"), Ref("ColumnReferenceSegment"), Ref("PlusJoinSegment"), ), Sequence( Ref("ColumnReferenceSegment"), Ref("PlusJoinSegment"), Ref("EqualsSegment"), OneOf(Ref("ColumnReferenceSegment"), Ref("FunctionSegment")), ), ), IntervalUnitsGrammar=OneOf("YEAR", "MONTH", "DAY", "HOUR", "MINUTE", "SECOND"), PivotForInGrammar=Sequence( "FOR", OptionallyBracketed(Delimited(Ref("ColumnReferenceSegment"))), "IN", Bracketed( Delimited( Sequence( Ref("Expression_D_Grammar"), Ref("AliasExpressionSegment", optional=True), ) ) ), ), UnpivotNullsGrammar=Sequence(OneOf("INCLUDE", "EXCLUDE"), "NULLS"), ) oracle_dialect.replace( # https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/DROP-TABLE.html DropBehaviorGrammar=Sequence( Sequence( "CASCADE", "CONSTRAINTS", optional=True, ), Ref.keyword("PURGE", optional=True), optional=True, ), NakedIdentifierSegment=SegmentGenerator( lambda dialect: RegexParser( r"[A-Z0-9_]*[A-Z][A-Z0-9_#$]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), PostFunctionGrammar=AnyNumberOf( Ref("WithinGroupClauseSegment"), Ref("FilterClauseGrammar"), Ref("OverClauseSegment", optional=True), ), FunctionContentsExpressionGrammar=OneOf( Ref("ExpressionSegment"), Ref("NamedArgumentSegment"), ), FunctionContentsGrammar=ansi_dialect.get_grammar("FunctionContentsGrammar").copy( insert=[ Ref("ListaggOverflowClauseSegment"), ] ), TemporaryGrammar=Sequence( OneOf("GLOBAL", "PRIVATE"), Ref.keyword("TEMPORARY"), optional=True, ), ParameterNameSegment=RegexParser( r'[A-Z_][A-Z0-9_$]*|"[^"]*"', CodeSegment, type="parameter" ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("SqlplusVariableGrammar"), ], before=Ref("ArrayLiteralSegment"), ), BaseExpressionElementGrammar=ansi_dialect.get_grammar( "BaseExpressionElementGrammar" ).copy( insert=[ Ref("ConnectByRootGrammar"), ] ), Expression_D_Grammar=Sequence( OneOf( Ref("PlusJoinGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Bracketed( OneOf( # We're using the expression segment here rather than the grammar so # that in the parsed structure we get nested elements. Ref("ExpressionSegment"), Ref("SelectableGrammar"), Delimited( Ref( "ColumnReferenceSegment" ), # WHERE (a,b,c) IN (select a,b,c FROM...) Ref( "FunctionSegment" ), # WHERE (a, substr(b,1,3)) IN (select c,d FROM...) Ref("LiteralGrammar"), # WHERE (a, 2) IN (SELECT b, c FROM ...) Ref("LocalAliasSegment"), # WHERE (LOCAL.a, LOCAL.b) IN (...) ), ), parse_mode=ParseMode.GREEDY, ), # Allow potential select statement without brackets Ref("SelectStatementSegment"), Ref("LiteralGrammar"), Ref("IntervalExpressionSegment"), Ref("TypedStructLiteralSegment"), Ref("ArrayExpressionSegment"), Ref("ColumnReferenceSegment"), # For triggers, we allow "NEW.*" but not just "*" nor "a.b.*" # So can't use WildcardIdentifierSegment nor WildcardExpressionSegment Sequence( Ref("SingleIdentifierGrammar"), Ref("ObjectReferenceDelimiterGrammar"), Ref("StarSegment"), ), Sequence( Ref("StructTypeSegment"), Bracketed(Delimited(Ref("ExpressionSegment"))), ), Sequence( Ref("DatatypeSegment"), # Don't use the full LiteralGrammar here # because only some of them are applicable. # Notably we shouldn't use QualifiedNumericLiteralSegment # here because it looks like an arithmetic operation. OneOf( Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("BooleanLiteralGrammar"), Ref("NullLiteralSegment"), Ref("DateTimeLiteralGrammar"), ), ), Ref("LocalAliasSegment"), terminators=[Ref("CommaSegment")], ), Ref("AccessorGrammar", optional=True), allow_gaps=True, ), DateTimeLiteralGrammar=Sequence( OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), Sequence( Ref("IntervalUnitsGrammar"), Sequence("TO", Ref("IntervalUnitsGrammar"), optional=True), ), ), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """An `ALTER TABLE` statement. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ALTER-TABLE.html If possible, please keep the order below the same as Oracle's doc: """ match_grammar: Matchable = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), OneOf( # @TODO all stuff inside this "Delimited" is not validated for Oracle Delimited( OneOf( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")), ), ), ), Ref("AlterTablePropertiesSegment"), Ref("AlterTableColumnClausesSegment"), Ref("AlterTableConstraintClauses"), ), ) class AlterTablePropertiesSegment(BaseSegment): """ALTER TABLE `alter_table_properties` per defined in Oracle's grammar. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ALTER-TABLE.html If possible, please match the order of this sequence with what's defined in Oracle's alter_table_properties grammar. """ type = "alter_table_properties" # TODO: There are many more alter_table_properties to implement match_grammar = OneOf( # Rename Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), ) class AlterTableColumnClausesSegment(BaseSegment): """ALTER TABLE `column_clauses` per defined in Oracle's grammar. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ALTER-TABLE.html If possible, please match the order of this sequence with what's defined in Oracle's column_clauses grammar. """ type = "alter_table_column_clauses" match_grammar = OneOf( # add_column_clause # modify_column_clause Sequence( OneOf( "ADD", "MODIFY", ), OneOf( Ref("ColumnDefinitionSegment"), Bracketed(Delimited(Ref("ColumnDefinitionSegment"))), ), ), # drop_column_clause # @TODO: extend drop_column_clause Sequence( "DROP", OneOf( Sequence("COLUMN", Ref("ColumnReferenceSegment")), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), ), # @TODO: add_period_clause # @TODO: drop_period_clause # rename_column_clause Sequence( "RENAME", "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ) # @TODO: modify_collection_retrieval # @TODO: modify_LOB_storage_clause # @TODO: alter_varray_col_properties ) class AlterTableConstraintClauses(BaseSegment): """ALTER TABLE `constraint_clauses` per defined in Oracle's grammar. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ALTER-TABLE.html If possible, please match the order of this sequence with what's defined in Oracle's constraint_clauses grammar. """ type = "alter_table_constraint_clauses" match_grammar = OneOf( Sequence( "ADD", Ref("TableConstraintSegment"), ), # @TODO MODIFY # @TODO RENAME # @TODO DROP # drop_constraint_clause Sequence( "DROP", OneOf( Sequence( "PRIMARY", "KEY", ), Sequence( "UNIQUE", Bracketed(Ref("ColumnReferenceSegment")), ), Sequence("CONSTRAINT", Ref("ObjectReferenceSegment")), ), Ref.keyword("CASCADE", optional=True), Sequence( OneOf( "KEEP", "DROP", ), "INDEX", optional=True, ), Ref.keyword("ONLINE", optional=True), ), ) class ExecuteFileSegment(BaseSegment): """A reference to an indextype.""" type = "execute_file_statement" match_grammar = Sequence( OneOf( Sequence( Ref("AtSignSegment"), Ref("AtSignSegment", optional=True), ), "START", ), # Probably should have a better file definition but this will do for now AnyNumberOf( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), Ref("DivideSegment"), ), ) class IndexTypeReferenceSegment(BaseSegment): """A reference to an indextype.""" type = "indextype_reference" match_grammar = ansi.ObjectReferenceSegment.match_grammar.copy() # Adding Oracle specific statements. class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments. Override ANSI to allow exclusion of ExecuteFileSegment. """ type = "statement" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("CommentStatementSegment"), ], ) class FileSegment(BaseFileSegment): """A segment representing a whole file or script. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. Override ANSI to allow addition of ExecuteFileSegment without ending in DelimiterGrammar """ match_grammar = AnyNumberOf( Ref("ExecuteFileSegment"), Delimited( Ref("StatementSegment"), delimiter=AnyNumberOf(Ref("DelimiterGrammar"), min_times=1), allow_gaps=True, allow_trailing=True, ), ) class CommentStatementSegment(BaseSegment): """A `Comment` statement. COMMENT [text] https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_4009.htm """ type = "comment_statement" match_grammar = Sequence( "COMMENT", "ON", Sequence( OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), ), Sequence( "OPERATOR", Ref("ObjectReferenceSegment"), ), Sequence( "INDEXTYPE", Ref("IndexTypeReferenceSegment"), ), Sequence( "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), ), ), Sequence("IS", OneOf(Ref("QuotedLiteralSegment"), "NULL")), ), ) # need to ignore type due to mypy rules on type variables # see https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases # for details class TableReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an table, CTE, subquery or alias. Extended from ANSI to allow Database Link syntax using AtSignSegment """ type = "table_reference" match_grammar: Matchable = Delimited( Ref("SingleIdentifierGrammar"), delimiter=OneOf( Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment")), Ref("AtSignSegment"), ), terminators=[ "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ], allow_gaps=False, ) class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" # https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/CREATE-VIEW.html match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence(Ref.keyword("NO", optional=True), "FORCE", optional=True), OneOf( "EDITIONING", Sequence("EDITIONABLE", Ref.keyword("EDITIONING", optional=True)), "NONEDITIONABLE", optional=True, ), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class WithinGroupClauseSegment(BaseSegment): """An WITHIN GROUP clause for window functions.""" type = "withingroup_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed(Ref("OrderByClauseSegment", optional=False)), ) class ListaggOverflowClauseSegment(BaseSegment): """ON OVERFLOW clause of listagg function.""" type = "listagg_overflow_clause" match_grammar = Sequence( "ON", "OVERFLOW", OneOf( "ERROR", Sequence( "TRUNCATE", Ref("SingleQuotedIdentifierSegment", optional=True), OneOf("WITH", "WITHOUT", optional=True), Ref.keyword("COUNT", optional=True), ), ), ) class NamedArgumentSegment(BaseSegment): """Named argument to a function. https://docs.oracle.com/en/database/oracle/oracle-database/21/lnpls/plsql-subprograms.html#GUID-A7D51201-1711-4F33-827F-70042700801F """ type = "named_argument" match_grammar = Sequence( Ref("NakedIdentifierSegment"), Ref("RightArrowSegment"), Ref("ExpressionSegment"), ) class CreateTableStatementSegment(BaseSegment): """A CREATE TABLE statement. https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/CREATE-TABLE.html https://oracle-base.com/articles/misc/temporary-tables https://oracle-base.com/articles/18c/private-temporary-tables-18c """ type = "create_table_statement" match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), ) ), Ref("CommentClauseSegment", optional=True), Ref("OnCommitGrammar", optional=True), ), # Create AS syntax: Sequence( Ref("OnCommitGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), Ref("TableEndClauseSegment", optional=True), ) class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name OneOf( AnyNumberOf( Sequence( Ref("ColumnConstraintSegment"), Ref.keyword("ENABLE", optional=True), ) ), Sequence( Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ), ), ) class SqlplusVariableGrammar(BaseSegment): """SQLPlus Bind Variables :thing. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqpug/using-substitution-variables-sqlplus.html """ type = "sqlplus_variable" match_grammar = Sequence( OptionallyBracketed( Ref("ColonSegment"), Ref("ParameterNameSegment"), ) ) class ConnectByClauseSegment(BaseSegment): """`CONNECT BY` clause used in Hierarchical Queries. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Hierarchical-Queries.html """ type = "connectby_clause" match_grammar: Matchable = Sequence( "CONNECT", "BY", Ref.keyword("NOCYCLE", optional=True), Ref("ExpressionSegment"), ) class StartWithClauseSegment(BaseSegment): """`START WITH` clause used in Hierarchical Queries. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Hierarchical-Queries.html """ type = "startwith_clause" match_grammar: Matchable = Sequence( "START", "WITH", Ref("ExpressionSegment"), ) class HierarchicalQueryClauseSegment(BaseSegment): """Hiearchical Query. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Hierarchical-Queries.html """ type = "hierarchical_query_clause" match_grammar: Matchable = OneOf( Sequence( Ref("ConnectByClauseSegment"), Ref("StartWithClauseSegment", optional=True), ), Sequence( Ref("StartWithClauseSegment"), Ref("ConnectByClauseSegment"), ), ) class OrderByClauseSegment(ansi.OrderByClauseSegment): """A `ORDER BY` clause like in `SELECT`.""" match_grammar: Matchable = ansi.OrderByClauseSegment.match_grammar.copy( insert=[Ref.keyword("SIBLINGS", optional=True)], before=Ref("ByKeywordSegment") ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A `SELECT` statement without any ORDER clauses or later. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("HierarchicalQueryClauseSegment", optional=True), Ref("PivotSegment", optional=True), Ref("UnpivotSegment", optional=True), ], before=Ref("GroupByClauseSegment", optional=True), terminators=[ Ref("HierarchicalQueryClauseSegment"), Ref("PivotSegment", optional=True), Ref("UnpivotSegment", optional=True), ], ) class SelectStatementSegment(ansi.SelectStatementSegment): """A `SELECT` statement.""" match_grammar: Matchable = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ], replace_terminators=True, terminators=cast( Sequence, ansi.SelectStatementSegment.match_grammar ).terminators, ) class GreaterThanOrEqualToSegment(ansi.CompositeComparisonOperatorSegment): """Allow spaces between operators.""" match_grammar = OneOf( Sequence( Ref("RawGreaterThanSegment"), Ref("RawEqualsSegment"), ), Sequence( Ref("RawNotSegment"), Ref("RawLessThanSegment"), ), ) class LessThanOrEqualToSegment(ansi.CompositeComparisonOperatorSegment): """Allow spaces between operators.""" match_grammar = OneOf( Sequence( Ref("RawLessThanSegment"), Ref("RawEqualsSegment"), ), Sequence( Ref("RawNotSegment"), Ref("RawGreaterThanSegment"), ), ) class NotEqualToSegment(ansi.CompositeComparisonOperatorSegment): """Allow spaces between operators.""" match_grammar = OneOf( Sequence(Ref("RawNotSegment"), Ref("RawEqualsSegment")), Sequence(Ref("RawLessThanSegment"), Ref("RawGreaterThanSegment")), ) class PivotSegment(BaseSegment): """Pivot clause. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/SELECT.html """ type = "pivot_clause" match_grammar: Matchable = Sequence( "PIVOT", Ref.keyword("XML", optional=True), Bracketed( Delimited( Ref("FunctionSegment"), Ref("AliasExpressionSegment", optional=True) ), Ref("PivotForInGrammar"), ), ) class UnpivotSegment(BaseSegment): """Unpivot clause. https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/SELECT.html """ type = "unpivot_clause" match_grammar: Matchable = Sequence( "UNPIVOT", Ref("UnpivotNullsGrammar", optional=True), Bracketed( OptionallyBracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("PivotForInGrammar"), ), ) class ObjectReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an object.""" # Allow whitespace match_grammar: Matchable = Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("ObjectReferenceDelimiterGrammar"), terminators=[Ref("ObjectReferenceTerminatorGrammar")], allow_gaps=True, ) class ColumnReferenceSegment(ObjectReferenceSegment): """A reference to column, field or alias.""" type = "column_reference" class FunctionNameSegment(BaseSegment): """Function name, including any prefix bits, e.g. project or schema.""" type = "function_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name Delimited( OneOf( Ref("FunctionNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), terminators=[Ref("BracketedSegment")], ), delimiter=Ref("AtSignSegment"), ), allow_gaps=False, ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_postgres.py000066400000000000000000005140771451700765000234740ustar00rootroot00000000000000"""The PostgreSQL dialect.""" from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseSegment, Bracketed, BracketedSegment, CodeSegment, CommentSegment, Dedent, Delimited, IdentifierSegment, Indent, LiteralSegment, Matchable, NewlineSegment, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringParser, SymbolSegment, TypedParser, WordSegment, ) from sqlfluff.core.parser.grammar.anyof import AnySetOf from sqlfluff.core.parser.lexer import StringLexer from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_postgres_keywords import ( get_keywords, postgres_keywords, postgres_postgis_datatype_keywords, ) ansi_dialect = load_raw_dialect("ansi") postgres_dialect = ansi_dialect.copy_as("postgres") postgres_dialect.insert_lexer_matchers( # JSON Operators: https://www.postgresql.org/docs/9.5/functions-json.html [ StringLexer("right_arrow", "=>", CodeSegment), ], before="equals", ) postgres_dialect.insert_lexer_matchers( # JSON Operators: https://www.postgresql.org/docs/9.5/functions-json.html [ # Explanation for the regex # - (?s) Switch - .* includes newline characters # - U& - must start with U& # - (('')+?(?!')|('.*?(?>|#>>|->|#>|@>|<@|\?\||\?|\?&|#-", SymbolSegment, ), StringLexer("at", "@", CodeSegment), # https://www.postgresql.org/docs/current/sql-syntax-lexical.html RegexLexer( "bit_string_literal", # binary (e.g. b'1001') or hex (e.g. X'1FF') r"[bBxX]'[0-9a-fA-F]*'", CodeSegment, ), ], before="like_operator", ) postgres_dialect.insert_lexer_matchers( [ # Explanation for the regex # \\([^(\\\r\n)])+((\\\\)|(?=\n)|(?=\r\n))? # \\ Starts with backslash # ([^\\\r\n])+ Anything that is not a newline or a # backslash # ( # (\\\\) Double backslash # | OR # (?=\n) The next character is a newline # | OR # (?=\r\n) The next 2 characters are a carriage # return and a newline # ) # ? The previous clause is optional RegexLexer( # For now we'll just treat meta syntax like comments and so just ignore # them. In future we may want to enhance this to actually parse them to # ensure they are valid meta commands. "meta_command", r"\\([^\\\r\n])+((\\\\)|(?=\n)|(?=\r\n))?", CommentSegment, ), RegexLexer( # pg_stat_statements which is an official postgres extension used for # storing the query logs replaces the actual literals used in the # query with $n where n is integer value. This grammar is for parsing # those literals. # ref: https://www.postgresql.org/docs/current/pgstatstatements.html "dollar_numeric_literal", r"\$\d+", LiteralSegment, ), ], before="word", # Final thing to search for - as psql specific ) postgres_dialect.patch_lexer_matchers( [ # Patching comments to remove hash comments RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--")}, ), # In Postgres, the only escape character is ' for single quote strings RegexLexer( "single_quote", r"(?s)('')+?(?!')|('.*?(? # Typename --> SimpleTypename --> GenericType --> type_function_name --> # { unreserved_keyword | type_func_name_keyword | IDENT } # We'll just match any normal code/keyword string here to keep it simple. Ref("PropertiesNakedIdentifierSegment"), ), CascadeRestrictGrammar=OneOf("CASCADE", "RESTRICT"), ExtendedTableReferenceGrammar=OneOf( Ref("TableReferenceSegment"), Sequence("ONLY", OptionallyBracketed(Ref("TableReferenceSegment"))), Sequence(Ref("TableReferenceSegment"), Ref("StarSegment")), ), RightArrowSegment=StringParser("=>", SymbolSegment, type="right_arrow"), OnKeywordAsIdentifierSegment=StringParser( "ON", IdentifierSegment, type="naked_identifier" ), DollarNumericLiteralSegment=TypedParser( "dollar_numeric_literal", LiteralSegment, type="dollar_numeric_literal" ), ForeignDataWrapperGrammar=Sequence("FOREIGN", "DATA", "WRAPPER"), OptionsListGrammar=Sequence( Delimited(Ref("NakedIdentifierFullSegment"), Ref("QuotedLiteralSegment")) ), OptionsGrammar=Sequence( "OPTIONS", Bracketed(AnyNumberOf(Ref("OptionsListGrammar"))) ), CreateUserMappingGrammar=Sequence("CREATE", "USER", "MAPPING"), SessionInformationUserFunctionsGrammar=OneOf( "USER", "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER" ), ImportForeignSchemaGrammar=Sequence("IMPORT", "FOREIGN", "SCHEMA"), ) postgres_dialect.replace( LikeGrammar=OneOf("LIKE", "ILIKE", Sequence("SIMILAR", "TO")), StringBinaryOperatorGrammar=OneOf(Ref("ConcatSegment"), "COLLATE"), ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("GreaterThanSegment"), Ref("LessThanSegment"), Ref("GreaterThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment"), Ref("NotEqualToSegment"), Ref("LikeOperatorSegment"), Sequence("IS", "DISTINCT", "FROM"), Sequence("IS", "NOT", "DISTINCT", "FROM"), Ref("OverlapSegment"), Ref("NotExtendRightSegment"), Ref("NotExtendLeftSegment"), Ref("AdjacentSegment"), ), NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( # Can’t begin with $, must only contain digits, letters, underscore it $ but # can’t be all digits. r"([A-Z_]+|[0-9]+[A-Z_$])[A-Z0-9_$]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), ParameterNameSegment=RegexParser( r'[A-Z_][A-Z0-9_$]*|"[^"]*"', CodeSegment, type="parameter" ), FunctionNameIdentifierSegment=RegexParser( r"[A-Z_][A-Z0-9_$]*", CodeSegment, type="function_name_identifier", ), FunctionContentsExpressionGrammar=OneOf( Ref("ExpressionSegment"), Ref("NamedArgumentSegment"), ), QuotedLiteralSegment=OneOf( # Postgres allows newline-concatenated string literals (#1488). # Since these string literals can have comments between them, # we use grammar to handle this. # Note we CANNOT use Delimited as it's greedy and swallows the # last Newline - see #2495 Sequence( TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), ), ), Sequence( TypedParser( "bit_string_literal", LiteralSegment, type="quoted_literal", ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), TypedParser( "bit_string_literal", LiteralSegment, type="quoted_literal", ), ), ), Delimited( TypedParser( "unicode_single_quote", LiteralSegment, type="quoted_literal", ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), TypedParser( "unicode_single_quote", LiteralSegment, type="quoted_literal", ), ), ), Delimited( TypedParser( "escaped_single_quote", LiteralSegment, type="quoted_literal", ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), TypedParser( "escaped_single_quote", LiteralSegment, type="quoted_literal", ), ), ), Delimited( TypedParser( "dollar_quote", LiteralSegment, type="quoted_literal", ), AnyNumberOf( Ref("MultilineConcatenateDelimiterGrammar"), TypedParser( "dollar_quote", LiteralSegment, type="quoted_literal", ), ), ), ), QuotedIdentifierSegment=OneOf( TypedParser("double_quote", IdentifierSegment, type="quoted_identifier"), TypedParser("unicode_double_quote", LiteralSegment, type="quoted_literal"), ), PostFunctionGrammar=AnyNumberOf( Ref("WithinGroupClauseSegment"), Ref("OverClauseSegment"), # Filter clause supported by both Postgres and SQLite Ref("FilterClauseGrammar"), ), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add JSON operators Ref("JsonOperatorSegment"), ), FunctionParameterGrammar=Sequence( OneOf("IN", "OUT", "INOUT", "VARIADIC", optional=True), OneOf( Ref("DatatypeSegment"), Sequence(Ref("ParameterNameSegment"), Ref("DatatypeSegment")), ), Sequence( OneOf("DEFAULT", Ref("EqualsSegment")), Ref("ExpressionSegment"), optional=True, ), ), FrameClauseUnitGrammar=OneOf("RANGE", "ROWS", "GROUPS"), # Postgres supports the non-standard ISNULL and NONNULL comparison operators. See # https://www.postgresql.org/docs/14/functions-comparison.html IsNullGrammar=Ref.keyword("ISNULL"), NotNullGrammar=Ref.keyword("NOTNULL"), JoinKeywordsGrammar=Sequence("JOIN", Sequence("LATERAL", optional=True)), SelectClauseTerminatorGrammar=OneOf( "INTO", "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", Ref("CommaSegment"), Ref("SetOperatorSegment"), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("DollarNumericLiteralSegment"), Ref("PsqlVariableGrammar"), ], before=Ref("ArrayLiteralSegment"), ), FromClauseTerminatorGrammar=ansi_dialect.get_grammar( "FromClauseTerminatorGrammar" ).copy( insert=[Ref("ForClauseSegment")], ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", "RETURNING", Sequence("ON", "CONFLICT"), Ref("ForClauseSegment"), ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", Sequence("WITH", "DATA"), Ref("ForClauseSegment"), ), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), # PostgreSQL supports the non-standard "RETURNING" keyword, and therefore the # INSERT/UPDATE/DELETE statements can also be used in subqueries. NonWithSelectableGrammar=OneOf( Ref("SetExpressionSegment"), OptionallyBracketed(Ref("SelectStatementSegment")), Ref("NonSetSelectableGrammar"), # moved from NonWithNonSelectableGrammar: Ref("UpdateStatementSegment"), Ref("InsertStatementSegment"), Ref("DeleteStatementSegment"), ), NonWithNonSelectableGrammar=OneOf(), ) class OverlapSegment(ansi.CompositeComparisonOperatorSegment): """Overlaps range operator.""" match_grammar = Sequence( Ref("AmpersandSegment"), Ref("AmpersandSegment"), allow_gaps=False ) class NotExtendRightSegment(ansi.CompositeComparisonOperatorSegment): """Not extend right range operator.""" match_grammar = Sequence( Ref("AmpersandSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False ) class NotExtendLeftSegment(ansi.CompositeComparisonOperatorSegment): """Not extend left range operator.""" match_grammar = Sequence( Ref("AmpersandSegment"), Ref("RawLessThanSegment"), allow_gaps=False ) class AdjacentSegment(ansi.CompositeComparisonOperatorSegment): """Adjacent range operator.""" match_grammar = Sequence( Ref("MinusSegment"), Ref("PipeSegment"), Ref("MinusSegment"), allow_gaps=False ) class PsqlVariableGrammar(BaseSegment): """PSQl Variables :thing, :'thing', :"thing".""" type = "psql_variable" match_grammar = Sequence( OptionallyBracketed( Ref("ColonSegment"), OneOf( Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), ), ) ) class ArrayAccessorSegment(ansi.ArrayAccessorSegment): """Overwrites Array Accessor in ANSI to allow n many consecutive brackets. Postgres can also have array access like python [:2] or [2:] so numbers on either side of the slice segment are optional. """ match_grammar = Bracketed( OneOf( # These three are for a single element access: [n] Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), # This is for slice access: [n:m], [:m], [n:], and [:] Sequence( OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), optional=True, ), Ref("SliceSegment"), OneOf( Ref("QualifiedNumericLiteralSegment"), Ref("NumericLiteralSegment"), Ref("ExpressionSegment"), optional=True, ), ), ), bracket_type="square", ) class DateTimeTypeIdentifier(BaseSegment): """Date Time Type.""" type = "datetime_type_identifier" match_grammar = OneOf( "DATE", Sequence( OneOf("TIME", "TIMESTAMP"), Bracketed(Ref("NumericLiteralSegment"), optional=True), Sequence(OneOf("WITH", "WITHOUT"), "TIME", "ZONE", optional=True), ), Sequence( OneOf("INTERVAL", "TIMETZ", "TIMESTAMPTZ"), Bracketed(Ref("NumericLiteralSegment"), optional=True), ), ) class DateTimeLiteralGrammar(BaseSegment): """Literal Date Time.""" type = "datetime_literal" match_grammar = Sequence( Ref("DateTimeTypeIdentifier", optional=True), Ref("QuotedLiteralSegment"), ) class DatatypeSegment(ansi.DatatypeSegment): """A data type segment. Supports timestamp with(out) time zone. Doesn't currently support intervals. """ match_grammar = Sequence( # Some dialects allow optional qualification of data types with schemas Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=False, optional=True, ), OneOf( Ref("WellKnownTextGeometrySegment"), Ref("DateTimeTypeIdentifier"), Sequence( OneOf( # numeric types "SMALLINT", "INTEGER", "INT", "INT2", "INT4", "INT8", "BIGINT", "FLOAT4", "FLOAT8", "REAL", Sequence("DOUBLE", "PRECISION"), "SMALLSERIAL", "SERIAL", "SERIAL2", "SERIAL4", "SERIAL8", "BIGSERIAL", # numeric types [(precision)] Sequence( OneOf("FLOAT"), Ref("BracketedArguments", optional=True), ), # numeric types [precision ["," scale])] Sequence( OneOf("DECIMAL", "NUMERIC"), Ref("BracketedArguments", optional=True), ), # monetary type "MONEY", # character types OneOf( Sequence( OneOf( "BPCHAR", "CHAR", # CHAR VARYING is not documented, but it's # in the real grammar: # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L14262 Sequence("CHAR", "VARYING"), "CHARACTER", Sequence("CHARACTER", "VARYING"), "VARCHAR", ), Ref("BracketedArguments", optional=True), ), "TEXT", ), # binary type "BYTEA", # boolean types OneOf("BOOLEAN", "BOOL"), # geometric types OneOf("POINT", "LINE", "LSEG", "BOX", "PATH", "POLYGON", "CIRCLE"), # network address types OneOf("CIDR", "INET", "MACADDR", "MACADDR8"), # text search types OneOf("TSVECTOR", "TSQUERY"), # bit string types Sequence( "BIT", OneOf("VARYING", optional=True), Ref("BracketedArguments", optional=True), ), # uuid type "UUID", # xml type "XML", # json types OneOf("JSON", "JSONB"), # range types "INT4RANGE", "INT8RANGE", "NUMRANGE", "TSRANGE", "TSTZRANGE", "DATERANGE", # pg_lsn type "PG_LSN", ), ), # user defined data types Ref("DatatypeIdentifierSegment"), ), # array types OneOf( AnyNumberOf( Bracketed( Ref("ExpressionSegment", optional=True), bracket_type="square" ) ), Ref("ArrayTypeSegment"), Ref("SizedArrayTypeSegment"), optional=True, ), ) class ArrayTypeSegment(ansi.ArrayTypeSegment): """Prefix for array literals specifying the type.""" type = "array_type" match_grammar = Ref.keyword("ARRAY") class IndexAccessMethodSegment(BaseSegment): """Index access method (e.g. `USING gist`).""" type = "index_access_method" match_grammar = Ref("SingleIdentifierGrammar") class OperatorClassReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an operator class.""" type = "operator_class_reference" class DefinitionParameterSegment(BaseSegment): """A single definition parameter. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6320 """ type = "definition_parameter" match_grammar: Matchable = Sequence( Ref("PropertiesNakedIdentifierSegment"), Sequence( Ref("EqualsSegment"), # could also contain ParameterNameSegment: Ref("DefinitionArgumentValueGrammar"), optional=True, ), ) class DefinitionParametersSegment(BaseSegment): """List of definition parameters. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6313 """ type = "definition_parameters" match_grammar: Matchable = Bracketed( Delimited( Ref("DefinitionParameterSegment"), ) ) class CreateCastStatementSegment(ansi.CreateCastStatementSegment): """A `CREATE CAST` statement. https://www.postgresql.org/docs/15/sql-createcast.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8951 """ match_grammar: Matchable = Sequence( "CREATE", "CAST", Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), OneOf( Sequence( "WITH", "FUNCTION", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), Sequence("WITHOUT", "FUNCTION"), Sequence("WITH", "INOUT"), ), OneOf( Sequence("AS", "ASSIGNMENT", optional=True), Sequence("AS", "IMPLICIT", optional=True), optional=True, ), ) class DropCastStatementSegment(ansi.DropCastStatementSegment): """A `DROP CAST` statement. https://www.postgresql.org/docs/15/sql-dropcast.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8995 """ match_grammar: Matchable = Sequence( "DROP", "CAST", Sequence("IF", "EXISTS", optional=True), Bracketed( Ref("DatatypeSegment"), "AS", Ref("DatatypeSegment"), ), Ref("DropBehaviorGrammar", optional=True), ) class RelationOptionSegment(BaseSegment): """Relation option element from reloptions. It is very similar to DefinitionParameterSegment except that it allows qualified names (e.g. namespace.attr = 5). https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L3016-L3035 """ type = "relation_option" match_grammar: Matchable = Sequence( Ref("PropertiesNakedIdentifierSegment"), Sequence( Ref("DotSegment"), Ref("PropertiesNakedIdentifierSegment"), optional=True, ), Sequence( Ref("EqualsSegment"), # could also contain ParameterNameSegment: Ref("DefinitionArgumentValueGrammar"), optional=True, ), ) class RelationOptionsSegment(BaseSegment): """List of relation options. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L3003-L3014 """ type = "relation_options" match_grammar: Matchable = Bracketed( Delimited( Ref("RelationOptionSegment"), ) ) class CreateFunctionStatementSegment(ansi.CreateFunctionStatementSegment): """A `CREATE FUNCTION` statement. This version in the ANSI dialect should be a "common subset" of the structure of the code for those dialects. postgres: https://www.postgresql.org/docs/13/sql-createfunction.html """ match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence( # Optional function return type "RETURNS", OneOf( Sequence( "TABLE", Bracketed( Delimited( OneOf( Ref("DatatypeSegment"), Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), ), ), ) ), optional=True, ), Sequence( "SETOF", Ref("DatatypeSegment"), ), Ref("DatatypeSegment"), ), optional=True, ), Ref("FunctionDefinitionGrammar"), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement. As per the specification: https://www.postgresql.org/docs/14/sql-dropfunction.html """ type = "drop_function_statement" match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Delimited( Sequence( Ref("ObjectReferenceSegment"), Ref("FunctionParameterListGrammar", optional=True), ) ), Ref("DropBehaviorGrammar", optional=True), ) class AlterFunctionStatementSegment(BaseSegment): """A `ALTER FUNCTION` statement. As per the specification: https://www.postgresql.org/docs/14/sql-alterfunction.html """ type = "alter_function_statement" match_grammar = Sequence( "ALTER", "FUNCTION", Delimited( Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ) ), OneOf( Ref("AlterFunctionActionSegment", optional=True), Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), ), ) class AlterFunctionActionSegment(BaseSegment): """Alter Function Action Segment. https://www.postgresql.org/docs/14/sql-alterfunction.html """ type = "alter_function_action_segment" match_grammar = Sequence( OneOf( OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", ), OneOf("IMMUTABLE", "STABLE", "VOLATILE"), Sequence(Ref.keyword("NOT", optional=True), "LEAKPROOF"), Sequence( Ref.keyword("EXTERNAL", optional=True), "SECURITY", OneOf("DEFINER", "INVOKER"), ), Sequence("PARALLEL", OneOf("UNSAFE", "RESTRICTED", "SAFE")), Sequence("COST", Ref("NumericLiteralSegment")), Sequence("ROWS", Ref("NumericLiteralSegment")), Sequence("SUPPORT", Ref("ParameterNameSegment")), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), "DEFAULT", ), ), Sequence("FROM", "CURRENT"), ), ), Sequence("RESET", OneOf("ALL", Ref("ParameterNameSegment"))), ), Ref.keyword("RESTRICT", optional=True), ) class AlterProcedureActionSegment(BaseSegment): """Alter Procedure Action Segment. https://www.postgresql.org/docs/14/sql-alterprocedure.html """ type = "alter_procedure_action_segment" match_grammar = Sequence( OneOf( Sequence( Ref.keyword("EXTERNAL", optional=True), "SECURITY", OneOf("DEFINER", "INVOKER"), ), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), "DEFAULT", ), ), Sequence("FROM", "CURRENT"), ), ), Sequence("RESET", OneOf("ALL", Ref("ParameterNameSegment"))), ), Ref.keyword("RESTRICT", optional=True), ) class AlterProcedureStatementSegment(BaseSegment): """An `ALTER PROCEDURE` statement. https://www.postgresql.org/docs/14/sql-alterprocedure.html """ type = "alter_procedure_statement" match_grammar = Sequence( "ALTER", "PROCEDURE", Delimited( Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ) ), OneOf( Ref("AlterProcedureActionSegment", optional=True), Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), Delimited( OneOf( Ref("ParameterNameSegment"), Ref("LiteralGrammar"), ), ), ), Sequence("FROM", "CURRENT"), ), ), Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), ), ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE PROCEDURE` statement. https://www.postgresql.org/docs/14/sql-createprocedure.html TODO: Just a basic statement for now, without full syntax. based on CreateFunctionStatementSegment without a return type. """ type = "create_procedure_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), "PROCEDURE", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Ref("FunctionDefinitionGrammar"), ) class DropProcedureStatementSegment(BaseSegment): """A `DROP PROCEDURE` statement. https://www.postgresql.org/docs/11/sql-dropprocedure.html """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Delimited( Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), ), OneOf( "CASCADE", "RESTRICT", optional=True, ), ) class WellKnownTextGeometrySegment(BaseSegment): """A Data Type Segment to identify Well Known Text Geometric Data Types. As specified in https://postgis.net/stuff/postgis-3.1.pdf This approach is to maximise 'accepted code' for the parser, rather than be overly restrictive. """ type = "wkt_geometry_type" _geometry_type_keywords = [x[0] for x in postgres_postgis_datatype_keywords] match_grammar = OneOf( Sequence( OneOf(*_geometry_type_keywords), Bracketed( Delimited( OptionallyBracketed(Delimited(Ref("SimpleGeometryGrammar"))), # 2D Arrays of coordinates - to specify surfaces Bracketed( Delimited(Bracketed(Delimited(Ref("SimpleGeometryGrammar")))) ), Ref("WellKnownTextGeometrySegment"), ) ), ), Sequence( OneOf("GEOMETRY", "GEOGRAPHY"), Bracketed( Sequence( OneOf(*_geometry_type_keywords, "GEOMETRY", "GEOGRAPHY"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), ) ), ), ) class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment.""" type = "semi_structured_expression" match_grammar = Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), Ref("ArrayAccessorSegment", optional=True), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, ), allow_gaps=True, ) class FunctionDefinitionGrammar(ansi.FunctionDefinitionGrammar): """This is the body of a `CREATE FUNCTION AS` statement. https://www.postgresql.org/docs/13/sql-createfunction.html """ match_grammar = Sequence( AnyNumberOf( Ref("LanguageClauseSegment"), Sequence("TRANSFORM", "FOR", "TYPE", Ref("ParameterNameSegment")), Ref.keyword("WINDOW"), OneOf("IMMUTABLE", "STABLE", "VOLATILE"), Sequence(Ref.keyword("NOT", optional=True), "LEAKPROOF"), OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", ), Sequence( Ref.keyword("EXTERNAL", optional=True), "SECURITY", OneOf("INVOKER", "DEFINER"), ), Sequence("PARALLEL", OneOf("UNSAFE", "RESTRICTED", "SAFE")), Sequence("COST", Ref("NumericLiteralSegment")), Sequence("ROWS", Ref("NumericLiteralSegment")), Sequence("SUPPORT", Ref("ParameterNameSegment")), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), Delimited( OneOf( Ref("ParameterNameSegment"), Ref("LiteralGrammar"), ), ), ), Sequence("FROM", "CURRENT"), ), ), Sequence( "AS", OneOf( Ref("QuotedLiteralSegment"), Sequence( Ref("QuotedLiteralSegment"), Ref("CommaSegment"), Ref("QuotedLiteralSegment"), ), ), ), Sequence( "BEGIN", "ATOMIC", Ref("SelectStatementSegment"), Ref("SemicolonSegment"), "END", Ref("SemicolonSegment"), ), ), Sequence( "WITH", Bracketed(Delimited(Ref("ParameterNameSegment"))), optional=True, ), ) class IntoClauseSegment(BaseSegment): """Into Clause Segment. As specified in https://www.postgresql.org/docs/14/sql-selectinto.html """ type = "into_clause" match_grammar = Sequence( "INTO", OneOf("TEMPORARY", "TEMP", "UNLOGGED", optional=True), Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ) class ForClauseSegment(BaseSegment): """`FOR ...` clause in `SELECT` statements. As specified in https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE. """ type = "for_clause" match_grammar = Sequence( "FOR", OneOf( "UPDATE", Sequence("NO", "KEY", "UPDATE"), "SHARE", Sequence("KEY", "SHARE"), ), Sequence( "OF", Delimited( Ref("TableReferenceSegment"), ), optional=True, ), OneOf( "NOWAIT", Sequence("SKIP", "LOCKED"), optional=True, ), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Overrides ANSI Statement, to allow for SELECT INTO statements.""" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("IntoClauseSegment", optional=True), ], before=Ref("FromClauseSegment", optional=True), terminators=[ Sequence("WITH", Ref.keyword("NO", optional=True), "DATA"), Sequence("ON", "CONFLICT"), Ref.keyword("RETURNING"), Ref("WithCheckOptionSegment"), ], ) class SelectStatementSegment(ansi.SelectStatementSegment): """Overrides ANSI as the parse grammar copy needs to be reapplied.""" # Inherit most of the parse grammar from the unordered version. match_grammar: Matchable = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ] ).copy( insert=[Ref("ForClauseSegment", optional=True)], before=Ref("LimitClauseSegment", optional=True), # Overwrite the terminators, because we want to remove some. replace_terminators=True, terminators=[ Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), Sequence("ON", "CONFLICT"), Ref.keyword("RETURNING"), Ref("WithCheckOptionSegment"), ], ) class SelectClauseSegment(ansi.SelectClauseSegment): """Overrides ANSI to allow INTO as a terminator.""" match_grammar = Sequence( "SELECT", Ref("SelectClauseModifierSegment", optional=True), Indent, Delimited( Ref("SelectClauseElementSegment"), # In Postgres you don't need an element so make it optional optional=True, allow_trailing=True, ), Dedent, terminators=[ "INTO", "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "OVERLAPS", Ref("SetOperatorSegment"), Sequence("WITH", Ref.keyword("NO", optional=True), "DATA"), Ref("WithCheckOptionSegment"), ], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns.""" match_grammar = OneOf( Sequence( "DISTINCT", Sequence( "ON", Bracketed(Delimited(Ref("ExpressionSegment"))), optional=True, ), ), "ALL", ) class WithinGroupClauseSegment(BaseSegment): """An WITHIN GROUP clause for window functions. https://www.postgresql.org/docs/current/functions-aggregate.html. """ type = "withingroup_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed(Ref("OrderByClauseSegment", optional=True)), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`.""" type = "groupby_clause" match_grammar = Sequence( "GROUP", "BY", Indent, Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), Bracketed(), # Allows empty parentheses ), terminators=[ Sequence("ORDER", "BY"), "LIMIT", "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), ], ), Dedent, ) class CreateRoleStatementSegment(ansi.CreateRoleStatementSegment): """A `CREATE ROLE` statement. As per: https://www.postgresql.org/docs/current/sql-createrole.html """ type = "create_role_statement" match_grammar = Sequence( "CREATE", OneOf("ROLE", "USER"), Ref("RoleReferenceSegment"), Sequence( Ref.keyword("WITH", optional=True), AnySetOf( OneOf("SUPERUSER", "NOSUPERUSER"), OneOf("CREATEDB", "NOCREATEDB"), OneOf("CREATEROLE", "NOCREATEROLE"), OneOf("INHERIT", "NOINHERIT"), OneOf("LOGIN", "NOLOGIN"), OneOf("REPLICATION", "NOREPLICATION"), OneOf("BYPASSRLS", "NOBYPASSRLS"), Sequence("CONNECTION", "LIMIT", Ref("NumericLiteralSegment")), Sequence("PASSWORD", OneOf(Ref("QuotedLiteralSegment"), "NULL")), Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment")), Sequence("IN", "ROLE", Ref("RoleReferenceSegment")), Sequence("IN", "GROUP", Ref("RoleReferenceSegment")), Sequence("ROLE", Ref("RoleReferenceSegment")), Sequence("ADMIN", Ref("RoleReferenceSegment")), Sequence("USER", Ref("RoleReferenceSegment")), Sequence("SYSID", Ref("NumericLiteralSegment")), ), optional=True, ), ) class AlterRoleStatementSegment(BaseSegment): """An `ALTER ROLE` statement. As per: https://www.postgresql.org/docs/current/sql-alterrole.html """ type = "alter_role_statement" match_grammar = Sequence( "ALTER", OneOf("ROLE", "USER"), OneOf( # role_specification Sequence( OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", Ref("RoleReferenceSegment"), ), Ref.keyword("WITH", optional=True), AnySetOf( OneOf("SUPERUSER", "NOSUPERUSER"), OneOf("CREATEDB", "NOCREATEDB"), OneOf("CREATEROLE", "NOCREATEROLE"), OneOf("INHERIT", "NOINHERIT"), OneOf("LOGIN", "NOLOGIN"), OneOf("REPLICATION", "NOREPLICATION"), OneOf("BYPASSRLS", "NOBYPASSRLS"), Sequence("CONNECTION", "LIMIT", Ref("NumericLiteralSegment")), Sequence( Ref.keyword("ENCRYPTED", optional=True), "PASSWORD", OneOf(Ref("QuotedLiteralSegment"), "NULL"), ), Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment")), ), ), # name only Sequence( Ref("RoleReferenceSegment"), Sequence("RENAME", "TO", Ref("RoleReferenceSegment")), ), # role_specification | all Sequence( OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", "ALL", Ref("RoleReferenceSegment"), ), Sequence( "IN", "DATABASE", Ref("DatabaseReferenceSegment"), optional=True, ), OneOf( Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), OneOf( "DEFAULT", Delimited( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815 Ref("OnKeywordAsIdentifierSegment"), ), ), ), Sequence( "FROM", "CURRENT", ), ), ), Sequence("RESET", OneOf(Ref("ParameterNameSegment"), "ALL")), ), ), ), ) class ExplainStatementSegment(ansi.ExplainStatementSegment): """An `Explain` statement. EXPLAIN [ ( option [, ...] ) ] statement EXPLAIN [ ANALYZE ] [ VERBOSE ] statement https://www.postgresql.org/docs/14/sql-explain.html """ match_grammar = Sequence( "EXPLAIN", OneOf( Sequence( OneOf( "ANALYZE", "ANALYSE", optional=True, ), Ref.keyword("VERBOSE", optional=True), ), Bracketed(Delimited(Ref("ExplainOptionSegment"))), optional=True, ), ansi.ExplainStatementSegment.explainable_stmt, ) class ExplainOptionSegment(BaseSegment): """An `Explain` statement option. ANALYZE [ boolean ] VERBOSE [ boolean ] COSTS [ boolean ] SETTINGS [ boolean ] BUFFERS [ boolean ] WAL [ boolean ] TIMING [ boolean ] SUMMARY [ boolean ] FORMAT { TEXT | XML | JSON | YAML } https://www.postgresql.org/docs/14/sql-explain.html """ type = "explain_option" match_grammar = OneOf( Sequence( OneOf( "ANALYZE", "ANALYSE", "VERBOSE", "COSTS", "SETTINGS", "BUFFERS", "WAL", "TIMING", "SUMMARY", ), Ref("BooleanLiteralGrammar", optional=True), ), Sequence( "FORMAT", OneOf("TEXT", "XML", "JSON", "YAML"), ), ) class CreateSchemaStatementSegment(ansi.CreateSchemaStatementSegment): """A `CREATE SCHEMA` statement. https://www.postgresql.org/docs/15/sql-createschema.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1493 """ match_grammar: Matchable = Sequence( "CREATE", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), OneOf( Sequence( # schema name defaults to role if not provided Ref("SchemaReferenceSegment", optional=True), "AUTHORIZATION", Ref("RoleReferenceSegment"), ), Ref("SchemaReferenceSegment"), ), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement. As specified in https://www.postgresql.org/docs/13/sql-createtable.html """ match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar", optional=True), ), "UNLOGGED", optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( # A single COLLATE segment can come before or # after constraint segments OneOf( Ref("ColumnConstraintSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), ), ), ), ), Ref("TableConstraintSegment"), Sequence( "LIKE", Ref("TableReferenceSegment"), AnyNumberOf(Ref("LikeOptionSegment"), optional=True), ), ), optional=True, ) ), Sequence( "INHERITS", Bracketed(Delimited(Ref("TableReferenceSegment"))), optional=True, ), ), # Create OF syntax: Sequence( "OF", Ref("ParameterNameSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Ref("TableConstraintSegment"), ), optional=True, ), ), # Create PARTITION OF syntax Sequence( "PARTITION", "OF", Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Sequence("WITH", "OPTIONS", optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Ref("TableConstraintSegment"), ), optional=True, ), OneOf( Sequence("FOR", "VALUES", Ref("PartitionBoundSpecSegment")), "DEFAULT", ), ), ), AnyNumberOf( Sequence( "PARTITION", "BY", OneOf("RANGE", "LIST", "HASH"), Bracketed( AnyNumberOf( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Ref("FunctionSegment"), ), AnyNumberOf( Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("ParameterNameSegment", optional=True), ), ), ) ) ), ), Sequence("USING", Ref("ParameterNameSegment")), OneOf( Sequence("WITH", Ref("RelationOptionsSegment")), Sequence("WITHOUT", "OIDS"), ), Sequence( "ON", "COMMIT", OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"), ), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment")), ), ) class CreateTableAsStatementSegment(BaseSegment): """A `CREATE TABLE AS` statement. As specified in https://www.postgresql.org/docs/13/sql-createtableas.html """ type = "create_table_as_statement" match_grammar = Sequence( "CREATE", OneOf( Sequence( OneOf("GLOBAL", "LOCAL", optional=True), Ref("TemporaryGrammar"), ), "UNLOGGED", optional=True, ), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), AnyNumberOf( Bracketed( Delimited(Ref("ColumnReferenceSegment")), optional=True, ), Sequence("USING", Ref("ParameterNameSegment"), optional=True), OneOf( Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), ), optional=True, ), ) ) ), ), Sequence("WITHOUT", "OIDS"), optional=True, ), Sequence( "ON", "COMMIT", OneOf(Sequence("PRESERVE", "ROWS"), Sequence("DELETE", "ROWS"), "DROP"), optional=True, ), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True), ), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), OptionallyBracketed(Sequence("TABLE", Ref("TableReferenceSegment"))), Ref("ValuesClauseSegment"), OptionallyBracketed(Sequence("EXECUTE", Ref("FunctionSegment"))), ), Ref("WithDataClauseSegment", optional=True), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """An `ALTER TABLE` statement. Matches the definition in https://www.postgresql.org/docs/13/sql-altertable.html """ match_grammar = Sequence( "ALTER", "TABLE", OneOf( Sequence( Ref("IfExistsGrammar", optional=True), Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), Ref("StarSegment", optional=True), OneOf( Delimited(Ref("AlterTableActionSegment")), Sequence( "RENAME", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence( "RENAME", "CONSTRAINT", Ref("ParameterNameSegment"), "TO", Ref("ParameterNameSegment"), ), ), ), Sequence( Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("TableReferenceSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Sequence( "ATTACH", "PARTITION", Ref("ParameterNameSegment"), OneOf( Sequence("FOR", "VALUES", Ref("PartitionBoundSpecSegment")), "DEFAULT", ), ), Sequence( "DETACH", "PARTITION", Ref("ParameterNameSegment"), Ref.keyword("CONCURRENTLY", optional=True), Ref.keyword("FINALIZE", optional=True), ), ), ), Sequence( "ALL", "IN", "TABLESPACE", Ref("TablespaceReferenceSegment"), Sequence( "OWNED", "BY", Delimited(Ref("ObjectReferenceSegment")), optional=True, ), "SET", "TABLESPACE", Ref("TablespaceReferenceSegment"), Ref.keyword("NOWAIT", optional=True), ), ), ) class AlterTableActionSegment(BaseSegment): """Alter Table Action Segment. https://www.postgresql.org/docs/13/sql-altertable.html """ type = "alter_table_action_segment" match_grammar = OneOf( Sequence( "ADD", Ref.keyword("COLUMN", optional=True), Ref("IfNotExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ), Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence( Sequence("SET", "DATA", optional=True), "TYPE", Ref("DatatypeSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True ), Sequence("USING", OneOf(Ref("ExpressionSegment")), optional=True), ), Sequence( "SET", "DEFAULT", OneOf( OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ) ), ), Sequence("DROP", "DEFAULT"), Sequence(OneOf("SET", "DROP", optional=True), "NOT", "NULL"), Sequence("DROP", "EXPRESSION", Ref("IfExistsGrammar", optional=True)), Sequence( "ADD", "GENERATED", OneOf("ALWAYS", Sequence("BY", "DEFAULT")), "AS", "IDENTITY", Bracketed( AnyNumberOf(Ref("AlterSequenceOptionsSegment")), optional=True ), ), Sequence( OneOf( Sequence( "SET", "GENERATED", OneOf("ALWAYS", Sequence("BY", "DEFAULT")), ), Sequence("SET", Ref("AlterSequenceOptionsSegment")), Sequence( "RESTART", Sequence("WITH", Ref("NumericLiteralSegment")) ), ) ), Sequence( "DROP", "IDENTITY", Ref("IfExistsGrammar", optional=True), ), Sequence("SET", "STATISTICS", Ref("NumericLiteralSegment")), Sequence("SET", Ref("RelationOptionsSegment")), # Documentation says you can only provide keys in RESET options, but the # actual grammar lets you pass in values too. Sequence("RESET", Ref("RelationOptionsSegment")), Sequence( "SET", "STORAGE", OneOf("PLAIN", "EXTERNAL", "EXTENDED", "MAIN") ), ), ), Sequence("ADD", Ref("TableConstraintSegment")), Sequence("ADD", Ref("TableConstraintUsingIndexSegment")), Sequence( "ALTER", "CONSTRAINT", Ref("ParameterNameSegment"), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ), Sequence("VALIDATE", "CONSTRAINT", Ref("ParameterNameSegment")), Sequence( "DROP", "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("ParameterNameSegment"), Ref("DropBehaviorGrammar", optional=True), ), Sequence( OneOf("ENABLE", "DISABLE"), "TRIGGER", OneOf(Ref("ParameterNameSegment"), "ALL", "USER"), ), Sequence( "ENABLE", OneOf("REPLICA", "ALWAYS"), "TRIGGER", Ref("ParameterNameSegment") ), Sequence( OneOf( "ENABLE", "DISABLE", Sequence("ENABLE", "REPLICA"), Sequence("ENABLE", "RULE"), ), "RULE", Ref("ParameterNameSegment"), ), Sequence( OneOf("DISABLE", "ENABLE", "FORCE", Sequence("NO", "FORCE")), "ROW", "LEVEL", "SECURITY", ), Sequence("CLUSTER", "ON", Ref("ParameterNameSegment")), Sequence("SET", "WITHOUT", OneOf("CLUSTER", "OIDS")), Sequence("SET", "TABLESPACE", Ref("TablespaceReferenceSegment")), Sequence("SET", OneOf("LOGGED", "UNLOGGED")), Sequence("SET", Ref("RelationOptionsSegment")), # Documentation says you can only provide keys in RESET options, but the # actual grammar lets you pass in values too. Sequence("RESET", Ref("RelationOptionsSegment")), Sequence( Ref.keyword("NO", optional=True), "INHERIT", Ref("TableReferenceSegment") ), Sequence("OF", Ref("ParameterNameSegment")), Sequence("NOT", "OF"), Sequence( "OWNER", "TO", OneOf( Ref("ParameterNameSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( "REPLICA", "IDENTITY", OneOf( "DEFAULT", Sequence("USING", "INDEX", Ref("IndexReferenceSegment")), "FULL", "NOTHING", ), ), ) class VersionIdentifierSegment(BaseSegment): """A reference to an version.""" type = "version_identifier" # match grammar (don't allow whitespace) match_grammar: Matchable = OneOf( Ref("QuotedLiteralSegment"), Ref("NakedIdentifierSegment"), ) class CreateExtensionStatementSegment(BaseSegment): """A `CREATE EXTENSION` statement. https://www.postgresql.org/docs/9.1/sql-createextension.html """ type = "create_extension_statement" match_grammar: Matchable = Sequence( "CREATE", "EXTENSION", Ref("IfNotExistsGrammar", optional=True), Ref("ExtensionReferenceSegment"), Ref.keyword("WITH", optional=True), Sequence("SCHEMA", Ref("SchemaReferenceSegment"), optional=True), Sequence("VERSION", Ref("VersionIdentifierSegment"), optional=True), Sequence("FROM", Ref("VersionIdentifierSegment"), optional=True), ) class DropExtensionStatementSegment(BaseSegment): """A `DROP EXTENSION` statement. https://www.postgresql.org/docs/14/sql-dropextension.html """ type = "drop_extension_statement" match_grammar: Matchable = Sequence( "DROP", "EXTENSION", Ref("IfExistsGrammar", optional=True), Ref("ExtensionReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class PublicationReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a publication.""" type = "publication_reference" match_grammar: Matchable = Ref("SingleIdentifierGrammar") class PublicationTableSegment(BaseSegment): """Specification for a single table object in a publication.""" type = "publication_table" match_grammar: Matchable = Sequence( Ref("ExtendedTableReferenceGrammar"), Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence("WHERE", Bracketed(Ref("ExpressionSegment")), optional=True), ) class PublicationObjectsSegment(BaseSegment): """Specification for one or more objects in a publication. Unlike the underlying PG grammar which has one object per PublicationObjSpec and so requires one to track the previous object type if it's a "continuation object type", this grammar groups together the continuation objects, e.g. "TABLE a, b, TABLE c, d" results in two segments: one containing references "a, b", and the other contianing "c, d". https://www.postgresql.org/docs/15/sql-createpublication.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10435-L10530 """ type = "publication_objects" match_grammar: Matchable = OneOf( Sequence( "TABLE", Delimited( Ref("PublicationTableSegment"), terminators=[Sequence(Ref("CommaSegment"), OneOf("TABLE", "TABLES"))], ), ), Sequence( "TABLES", "IN", "SCHEMA", Delimited( OneOf(Ref("SchemaReferenceSegment"), "CURRENT_SCHEMA"), terminators=[Sequence(Ref("CommaSegment"), OneOf("TABLE", "TABLES"))], ), ), ) class CreatePublicationStatementSegment(BaseSegment): """A `CREATE PUBLICATION` statement. https://www.postgresql.org/docs/15/sql-createpublication.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10390-L10530 """ type = "create_publication_statement" match_grammar: Matchable = Sequence( "CREATE", "PUBLICATION", Ref("PublicationReferenceSegment"), OneOf( Sequence("FOR", "ALL", "TABLES"), Sequence("FOR", Delimited(Ref("PublicationObjectsSegment"))), optional=True, ), Sequence( "WITH", Ref("DefinitionParametersSegment"), optional=True, ), ) class AlterPublicationStatementSegment(BaseSegment): """A `ALTER PUBLICATION` statement. https://www.postgresql.org/docs/15/sql-alterpublication.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L10549 """ type = "alter_publication_statement" match_grammar: Matchable = Sequence( "ALTER", "PUBLICATION", Ref("PublicationReferenceSegment"), OneOf( Sequence("SET", Ref("DefinitionParametersSegment")), Sequence("ADD", Delimited(Ref("PublicationObjectsSegment"))), Sequence("SET", Delimited(Ref("PublicationObjectsSegment"))), Sequence("DROP", Delimited(Ref("PublicationObjectsSegment"))), Sequence("RENAME", "TO", Ref("PublicationReferenceSegment")), Sequence( "OWNER", "TO", OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", # must come last; CURRENT_USER isn't reserved: Ref("RoleReferenceSegment"), ), ), ), ) class DropPublicationStatementSegment(BaseSegment): """A `DROP PUBLICATION` statement. https://www.postgresql.org/docs/15/sql-droppublication.html """ type = "drop_publication_statement" match_grammar: Matchable = Sequence( "DROP", "PUBLICATION", Ref("IfExistsGrammar", optional=True), Delimited(Ref("PublicationReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class CreateMaterializedViewStatementSegment(BaseSegment): """A `CREATE MATERIALIZED VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-creatematerializedview.html """ type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "MATERIALIZED", "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence("USING", Ref("ParameterNameSegment"), optional=True), Sequence("WITH", Ref("RelationOptionsSegment"), optional=True), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), OptionallyBracketed(Sequence("TABLE", Ref("TableReferenceSegment"))), Ref("ValuesClauseSegment"), OptionallyBracketed(Sequence("EXECUTE", Ref("FunctionSegment"))), ), Ref("WithDataClauseSegment", optional=True), ) class AlterMaterializedViewStatementSegment(BaseSegment): """A `ALTER MATERIALIZED VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-altermaterializedview.html """ type = "alter_materialized_view_statement" match_grammar = Sequence( "ALTER", "MATERIALIZED", "VIEW", OneOf( Sequence( Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Delimited(Ref("AlterMaterializedViewActionSegment")), Sequence( "RENAME", Sequence("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence("RENAME", "TO", Ref("TableReferenceSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), ), ), Sequence( Ref("TableReferenceSegment"), Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), Sequence( "ALL", "IN", "TABLESPACE", Ref("TablespaceReferenceSegment"), Sequence( "OWNED", "BY", Delimited(Ref("ObjectReferenceSegment")), optional=True, ), "SET", "TABLESPACE", Ref("TablespaceReferenceSegment"), Sequence("NOWAIT", optional=True), ), ), ) class AlterMaterializedViewActionSegment(BaseSegment): """Alter Materialized View Action Segment. https://www.postgresql.org/docs/14/sql-altermaterializedview.html """ type = "alter_materialized_view_action_segment" match_grammar = OneOf( Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence("SET", "STATISTICS", Ref("NumericLiteralSegment")), Sequence( "SET", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), Ref("LiteralGrammar"), ), ) ), ), Sequence( "RESET", Bracketed(Delimited(Ref("ParameterNameSegment"))), ), Sequence( "SET", "STORAGE", OneOf("PLAIN", "EXTERNAL", "EXTENDED", "MAIN") ), Sequence("SET", "COMPRESSION", Ref("ParameterNameSegment")), ), ), Sequence("CLUSTER", "ON", Ref("ParameterNameSegment")), Sequence("SET", "WITHOUT", "CLUSTER"), Sequence( "SET", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True ), ) ) ), ), Sequence( "RESET", Bracketed(Delimited(Ref("ParameterNameSegment"))), ), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), ) class RefreshMaterializedViewStatementSegment(BaseSegment): """A `REFRESH MATERIALIZED VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-refreshmaterializedview.html """ type = "refresh_materialized_view_statement" match_grammar = Sequence( "REFRESH", "MATERIALIZED", "VIEW", Ref.keyword("CONCURRENTLY", optional=True), Ref("TableReferenceSegment"), Ref("WithDataClauseSegment", optional=True), ) class DropMaterializedViewStatementSegment(BaseSegment): """A `DROP MATERIALIZED VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-dropmaterializedview.html """ type = "drop_materialized_view_statement" match_grammar = Sequence( "DROP", "MATERIALIZED", "VIEW", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class WithCheckOptionSegment(BaseSegment): """WITH [ CASCADED | LOCAL ] CHECK OPTION for Postgres' CREATE VIEWS. https://www.postgresql.org/docs/14/sql-createview.html """ type = "with_check_option" match_grammar: Matchable = Sequence( "WITH", OneOf("CASCADED", "LOCAL"), "CHECK", "OPTION" ) class AlterPolicyStatementSegment(BaseSegment): """An ALTER POLICY statement. As specified in https://www.postgresql.org/docs/current/sql-alterpolicy.html """ type = "alter_policy_statement" match_grammar = Sequence( "ALTER", "POLICY", Ref("ObjectReferenceSegment"), "ON", Ref("TableReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence( "TO", Delimited( OneOf( Ref("RoleReferenceSegment"), "PUBLIC", "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ) ), optional=True, ), Sequence("USING", Bracketed(Ref("ExpressionSegment")), optional=True), Sequence( "WITH", "CHECK", Bracketed(Ref("ExpressionSegment")), optional=True ), ), ) class CreateViewStatementSegment(BaseSegment): """An `Create VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-createview.html """ type = "create_view_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryGrammar", optional=True), Ref.keyword("RECURSIVE", optional=True), "VIEW", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence("WITH", Ref("RelationOptionsSegment"), optional=True), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), Ref("ValuesClauseSegment"), ), Ref("WithCheckOptionSegment", optional=True), ) class AlterViewStatementSegment(BaseSegment): """An `ALTER VIEW` statement. As specified in https://www.postgresql.org/docs/14/sql-alterview.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence( "SET", "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ), ), Sequence("DROP", "DEFAULT"), ), ), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( "RENAME", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), Sequence("RENAME", "TO", Ref("TableReferenceSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), Sequence( "SET", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True, ), ) ) ), ), Sequence( "RESET", Bracketed(Delimited(Ref("ParameterNameSegment"))), ), ), ) class DropViewStatementSegment(ansi.DropViewStatementSegment): """A `DROP VIEW` statement. https://www.postgresql.org/docs/15/sql-dropview.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6698-L6719 """ match_grammar: Matchable = Sequence( "DROP", "VIEW", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TableReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. As specified in https://www.postgresql.org/docs/14/sql-createdatabase.html """ match_grammar = Sequence( "CREATE", "DATABASE", Ref("DatabaseReferenceSegment"), Ref.keyword("WITH", optional=True), AnyNumberOf( Sequence( "OWNER", Ref("EqualsSegment", optional=True), Ref("ObjectReferenceSegment"), ), Sequence( "TEMPLATE", Ref("EqualsSegment", optional=True), Ref("ObjectReferenceSegment"), ), Sequence( "ENCODING", Ref("EqualsSegment", optional=True), OneOf(Ref("QuotedLiteralSegment"), "DEFAULT"), ), OneOf( # LOCALE This is a shortcut for setting LC_COLLATE and LC_CTYPE at once. # If you specify this, you cannot specify either of those parameters. Sequence( "LOCALE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), AnyNumberOf( Sequence( "LC_COLLATE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "LC_CTYPE", Ref("EqualsSegment", optional=True), Ref("QuotedLiteralSegment"), ), ), ), Sequence( "TABLESPACE", Ref("EqualsSegment", optional=True), OneOf(Ref("TablespaceReferenceSegment"), "DEFAULT"), ), Sequence( "ALLOW_CONNECTIONS", Ref("EqualsSegment", optional=True), Ref("BooleanLiteralGrammar"), ), Sequence( "CONNECTION", "LIMIT", Ref("EqualsSegment", optional=True), Ref("NumericLiteralSegment"), ), Sequence( "IS_TEMPLATE", Ref("EqualsSegment", optional=True), Ref("BooleanLiteralGrammar"), ), ), ) class AlterDatabaseStatementSegment(BaseSegment): """A `ALTER DATABASE` statement. As specified in https://www.postgresql.org/docs/14/sql-alterdatabase.html """ type = "alter_database_statement" match_grammar = Sequence( "ALTER", "DATABASE", Ref("DatabaseReferenceSegment"), OneOf( Sequence( Ref.keyword("WITH", optional=True), AnyNumberOf( Sequence("ALLOW_CONNECTIONS", Ref("BooleanLiteralGrammar")), Sequence( "CONNECTION", "LIMIT", Ref("NumericLiteralSegment"), ), Sequence("IS_TEMPLATE", Ref("BooleanLiteralGrammar")), min_times=1, ), ), Sequence("RENAME", "TO", Ref("DatabaseReferenceSegment")), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence("SET", "TABLESPACE", Ref("TablespaceReferenceSegment")), Sequence( "SET", Ref("ParameterNameSegment"), OneOf( Sequence( OneOf("TO", Ref("EqualsSegment")), OneOf("DEFAULT", Ref("LiteralGrammar")), ), Sequence("FROM", "CURRENT"), ), ), Sequence("RESET", OneOf("ALL", Ref("ParameterNameSegment"))), optional=True, ), ) class DropDatabaseStatementSegment(ansi.DropDatabaseStatementSegment): """A `DROP DATABASE` statement. As specified in https://www.postgresql.org/docs/14/sql-dropdatabase.html """ match_grammar = Sequence( "DROP", "DATABASE", Ref("IfExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Sequence( Ref.keyword("WITH", optional=True), Bracketed("FORCE"), optional=True, ), ) class VacuumStatementSegment(BaseSegment): """A `VACUUM` statement. https://www.postgresql.org/docs/15/sql-vacuum.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L11658 """ type = "vacuum_statement" match_grammar = Sequence( "VACUUM", OneOf( Sequence( Ref.keyword("FULL", optional=True), Ref.keyword("FREEZE", optional=True), Ref.keyword("VERBOSE", optional=True), OneOf("ANALYZE", "ANALYSE", optional=True), ), Bracketed( Delimited( Sequence( OneOf( "FULL", "FREEZE", "VERBOSE", "ANALYZE", "ANALYSE", "DISABLE_PAGE_SKIPPING", "SKIP_LOCKED", "INDEX_CLEANUP", "PROCESS_TOAST", "TRUNCATE", "PARALLEL", ), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815 Ref("OnKeywordAsIdentifierSegment"), optional=True, ), ), ), ), optional=True, ), Delimited( Sequence( Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), ), optional=True, ), ) class LikeOptionSegment(BaseSegment): """Like Option Segment. As specified in https://www.postgresql.org/docs/13/sql-createtable.html """ type = "like_option_segment" match_grammar = Sequence( OneOf("INCLUDING", "EXCLUDING"), OneOf( "COMMENTS", "CONSTRAINTS", "DEFAULTS", "GENERATED", "IDENTITY", "INDEXES", "STATISTICS", "STORAGE", "ALL", ), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more. https://www.postgresql.org/docs/13/sql-altertable.html """ # Column constraint from # https://www.postgresql.org/docs/12/sql-createtable.html match_grammar = Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), Sequence("NO", "INHERIT", optional=True), ), Sequence( # DEFAULT "DEFAULT", OneOf( Ref("ShorthandCastSegment"), Ref("LiteralGrammar"), Ref("FunctionSegment"), Ref("BareFunctionSegment"), Ref("ExpressionSegment"), ), ), Sequence("GENERATED", "ALWAYS", "AS", Ref("ExpressionSegment"), "STORED"), Sequence( "GENERATED", OneOf("ALWAYS", Sequence("BY", "DEFAULT")), "AS", "IDENTITY", Bracketed( AnyNumberOf(Ref("AlterSequenceOptionsSegment")), optional=True ), ), Sequence( "UNIQUE", Sequence( "NULLS", Ref.keyword("NOT", optional=True), "DISTINCT", optional=True, ), Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True), Sequence( "USING", "INDEX", "TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True, ), ), Sequence( "PRIMARY", "KEY", Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True), Sequence( "USING", "INDEX", "TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True, ), ), Ref("ReferenceDefinitionGrammar"), # REFERENCES reftable [ ( refcolumn) ] ), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ) class PartitionBoundSpecSegment(BaseSegment): """Partition bound spec. As per https://www.postgresql.org/docs/13/sql-altertable.html. """ type = "partition_bound_spec" match_grammar = OneOf( Sequence( "IN", Bracketed(Delimited(Ref("ExpressionSegment"))), ), Sequence( "FROM", Bracketed( Delimited( OneOf(Ref("ExpressionSegment"), "MINVALUE", "MAXVALUE"), ) ), "TO", Bracketed( Delimited( OneOf(Ref("ExpressionSegment"), "MINVALUE", "MAXVALUE"), ) ), ), Sequence( "WITH", Bracketed( Sequence( "MODULUS", Ref("NumericLiteralSegment"), Ref("CommaSegment"), "REMAINDER", Ref("NumericLiteralSegment"), ) ), ), ) class TableConstraintSegment(ansi.TableConstraintSegment): """A table constraint, e.g. for CREATE TABLE. As specified in https://www.postgresql.org/docs/13/sql-altertable.html """ match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( "CHECK", Bracketed(Ref("ExpressionSegment")), Sequence("NO", "INHERIT", optional=True), ), Sequence( # UNIQUE ( column_name [, ... ] ) "UNIQUE", Sequence( "NULLS", Ref.keyword("NOT", optional=True), "DISTINCT", optional=True, ), Ref("BracketedColumnReferenceListGrammar"), Ref("IndexParametersSegment", optional=True), ), Sequence( # PRIMARY KEY ( column_name [, ... ] ) index_parameters Ref("PrimaryKeyGrammar"), # Columns making up PRIMARY KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref("IndexParametersSegment", optional=True), ), Sequence( "EXCLUDE", Sequence("USING", Ref("IndexAccessMethodSegment"), optional=True), Bracketed(Delimited(Ref("ExclusionConstraintElementSegment"))), Ref("IndexParametersSegment", optional=True), Sequence("WHERE", Bracketed(Ref("ExpressionSegment")), optional=True), ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] "FOREIGN", "KEY", # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] ), ), AnyNumberOf( OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE")), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE") ), Sequence("NOT", "VALID"), Sequence("NO", "INHERIT"), ), ) class TableConstraintUsingIndexSegment(BaseSegment): """table_constraint_using_index. As specified in: https://www.postgresql.org/docs/13/sql-altertable.html. """ type = "table_constraint" match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), Sequence( OneOf("UNIQUE", Ref("PrimaryKeyGrammar")), "USING", "INDEX", Ref("IndexReferenceSegment"), ), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ) class IndexParametersSegment(BaseSegment): """index_parameters. As specified in https://www.postgresql.org/docs/13/sql-altertable.html. """ type = "index_parameters" match_grammar = Sequence( Sequence("INCLUDE", Ref("BracketedColumnReferenceListGrammar"), optional=True), Sequence("WITH", Ref("DefinitionParametersSegment"), optional=True), Sequence( "USING", "INDEX", "TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True, ), ) class ReferentialActionSegment(BaseSegment): """Foreign Key constraints. https://www.postgresql.org/docs/13/infoschema-referential-constraints.html """ type = "referential_action" match_grammar = OneOf( "CASCADE", Sequence("SET", "NULL"), Sequence("SET", "DEFAULT"), "RESTRICT", Sequence("NO", "ACTION"), ) class IndexElementOptionsSegment(BaseSegment): """Index element options segment. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8057 """ type = "index_element_options" match_grammar = Sequence( Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), Sequence( Ref( "OperatorClassReferenceSegment", exclude=Sequence("NULLS", OneOf("FIRST", "LAST")), ), Ref("RelationOptionsSegment", optional=True), # args for opclass optional=True, ), OneOf("ASC", "DESC", optional=True), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ) class IndexElementSegment(BaseSegment): """Index element segment. As found in https://www.postgresql.org/docs/15/sql-altertable.html. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L8089 """ type = "index_element" match_grammar = Sequence( OneOf( Ref("ColumnReferenceSegment"), # TODO: This is still not perfect. This corresponds to # func_expr_windowless in the grammar and we don't currently # implement everything it provides. Ref("FunctionSegment"), Bracketed(Ref("ExpressionSegment")), ), Ref("IndexElementOptionsSegment", optional=True), ) class ExclusionConstraintElementSegment(BaseSegment): """Exclusion constraint element segment. As found in https://www.postgresql.org/docs/15/sql-altertable.html. https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L4277 """ type = "exclusion_constraint_element" match_grammar = Sequence( Ref("IndexElementSegment"), "WITH", Ref("ComparisonOperatorGrammar"), ) class AlterDefaultPrivilegesStatementSegment(BaseSegment): """`ALTER DEFAULT PRIVILEGES` statement. ``` ALTER DEFAULT PRIVILEGES [ FOR { ROLE | USER } target_role [, ...] ] [ IN SCHEMA schema_name [, ...] ] abbreviated_grant_or_revoke ``` https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_statement" match_grammar = Sequence( "ALTER", "DEFAULT", "PRIVILEGES", Sequence( "FOR", OneOf("ROLE", "USER"), Delimited( Ref("ObjectReferenceSegment"), terminators=["IN", "GRANT", "REVOKE"], ), optional=True, ), Sequence( "IN", "SCHEMA", Delimited( Ref("SchemaReferenceSegment"), terminators=["GRANT", "REVOKE"], ), optional=True, ), OneOf( Ref("AlterDefaultPrivilegesGrantSegment"), Ref("AlterDefaultPrivilegesRevokeSegment"), ), ) class AlterDefaultPrivilegesObjectPrivilegesSegment(BaseSegment): """`ALTER DEFAULT PRIVILEGES` object privileges. https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_object_privilege" match_grammar = OneOf( Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), Delimited( "CREATE", "DELETE", "EXECUTE", "INSERT", "REFERENCES", "SELECT", "TRIGGER", "TRUNCATE", "UPDATE", "USAGE", terminators=["ON"], ), ) class AlterDefaultPrivilegesSchemaObjectsSegment(BaseSegment): """`ALTER DEFAULT PRIVILEGES` schema object types. https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_schema_object" match_grammar = OneOf( "TABLES", "FUNCTIONS", "ROUTINES", "SEQUENCES", "TYPES", "SCHEMAS", ) class AlterDefaultPrivilegesToFromRolesSegment(BaseSegment): """The segment after `TO` / `FROM` in `ALTER DEFAULT PRIVILEGES`. `{ [ GROUP ] role_name | PUBLIC } [, ...]` https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_to_from_roles" match_grammar = OneOf( Sequence( Ref.keyword("GROUP", optional=True), Ref("RoleReferenceSegment"), ), "PUBLIC", ) class AlterDefaultPrivilegesGrantSegment(BaseSegment): """`GRANT` for `ALTER DEFAULT PRIVILEGES`. https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_grant" match_grammar = Sequence( "GRANT", Ref("AlterDefaultPrivilegesObjectPrivilegesSegment"), "ON", Ref("AlterDefaultPrivilegesSchemaObjectsSegment"), "TO", Delimited( Ref("AlterDefaultPrivilegesToFromRolesSegment"), terminators=["WITH"], ), Sequence("WITH", "GRANT", "OPTION", optional=True), ) class AlterDefaultPrivilegesRevokeSegment(BaseSegment): """`REVOKE` for `ALTER DEFAULT PRIVILEGES`. https://www.postgresql.org/docs/13/sql-alterdefaultprivileges.html """ type = "alter_default_privileges_revoke" match_grammar = Sequence( "REVOKE", Sequence("GRANT", "OPTION", "FOR", optional=True), Ref("AlterDefaultPrivilegesObjectPrivilegesSegment"), "ON", Ref("AlterDefaultPrivilegesSchemaObjectsSegment"), "FROM", Delimited( Ref("AlterDefaultPrivilegesToFromRolesSegment"), terminators=["RESTRICT", "CASCADE"], ), Ref("DropBehaviorGrammar", optional=True), ) class DropOwnedStatementSegment(BaseSegment): """A `DROP OWNED` statement. https://www.postgresql.org/docs/15/sql-drop-owned.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6667 """ type = "drop_owned_statement" match_grammar = Sequence( "DROP", "OWNED", "BY", Delimited( OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", # must come last; CURRENT_USER isn't reserved: Ref("RoleReferenceSegment"), ), ), Ref("DropBehaviorGrammar", optional=True), ) class ReassignOwnedStatementSegment(BaseSegment): """A `REASSIGN OWNED` statement. https://www.postgresql.org/docs/15/sql-reassign-owned.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6678 """ type = "reassign_owned_statement" match_grammar = Sequence( "REASSIGN", "OWNED", "BY", Delimited( OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", # must come last; CURRENT_USER isn't reserved: Ref("RoleReferenceSegment"), ), ), "TO", OneOf( "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", # must come last; CURRENT_USER isn't reserved: Ref("RoleReferenceSegment"), ), ) class CommentOnStatementSegment(BaseSegment): """`COMMENT ON` statement. https://www.postgresql.org/docs/13/sql-comment.html """ type = "comment_clause" match_grammar = Sequence( "COMMENT", "ON", Sequence( OneOf( Sequence( OneOf( "TABLE", # TODO: Create a ViewReferenceSegment "VIEW", ), Ref("TableReferenceSegment"), ), Sequence( "CAST", Bracketed( Sequence( Ref("ObjectReferenceSegment"), "AS", Ref("ObjectReferenceSegment"), ), ), ), Sequence( "COLUMN", # TODO: Does this correctly emit a Table Reference? Ref("ColumnReferenceSegment"), ), Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), Sequence( "ON", Ref.keyword("DOMAIN", optional=True), Ref("ObjectReferenceSegment"), ), ), Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ), Sequence( "EXTENSION", Ref("ExtensionReferenceSegment"), ), Sequence( "FUNCTION", Ref("FunctionNameSegment"), Sequence(Ref("FunctionParameterListGrammar"), optional=True), ), Sequence( "INDEX", Ref("IndexReferenceSegment"), ), Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), # TODO: Split out individual items if they have references Sequence( OneOf( "COLLATION", "CONVERSION", "DOMAIN", "LANGUAGE", "POLICY", "PUBLICATION", "ROLE", "RULE", "SEQUENCE", "SERVER", "STATISTICS", "SUBSCRIPTION", "TABLESPACE", "TRIGGER", "TYPE", Sequence("ACCESS", "METHOD"), Sequence("EVENT", "TRIGGER"), Sequence("FOREIGN", "DATA", "WRAPPER"), Sequence("FOREIGN", "TABLE"), Sequence("MATERIALIZED", "VIEW"), Sequence("TEXT", "SEARCH", "CONFIGURATION"), Sequence("TEXT", "SEARCH", "DICTIONARY"), Sequence("TEXT", "SEARCH", "PARSER"), Sequence("TEXT", "SEARCH", "TEMPLATE"), ), Ref("ObjectReferenceSegment"), Sequence("ON", Ref("ObjectReferenceSegment"), optional=True), ), Sequence( OneOf( "AGGREGATE", "PROCEDURE", "ROUTINE", ), Ref("ObjectReferenceSegment"), Bracketed( Sequence( # TODO: Is this too permissive? Anything(), optional=True, ), optional=True, ), ), ), Sequence("IS", OneOf(Ref("QuotedLiteralSegment"), "NULL")), ), ) class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment): """A `CREATE INDEX` statement. As specified in https://www.postgresql.org/docs/13/sql-createindex.html """ match_grammar = Sequence( "CREATE", Ref.keyword("UNIQUE", optional=True), "INDEX", Ref.keyword("CONCURRENTLY", optional=True), Sequence( Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), optional=True, ), "ON", Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), Sequence("USING", Ref("IndexAccessMethodSegment"), optional=True), Bracketed(Delimited(Ref("IndexElementSegment"))), Sequence( "INCLUDE", Bracketed(Delimited(Ref("IndexElementSegment"))), optional=True ), Sequence("NULLS", Ref.keyword("NOT", optional=True), "DISTINCT", optional=True), Sequence("WITH", Ref("RelationOptionsSegment"), optional=True), Sequence("TABLESPACE", Ref("TablespaceReferenceSegment"), optional=True), Sequence("WHERE", Ref("ExpressionSegment"), optional=True), ) class AlterIndexStatementSegment(BaseSegment): """An ALTER INDEX segment. As per https://www.postgresql.org/docs/14/sql-alterindex.html """ type = "alter_index_statement" match_grammar = Sequence( "ALTER", "INDEX", OneOf( Sequence( Ref("IfExistsGrammar", optional=True), Ref("IndexReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("IndexReferenceSegment")), Sequence("SET", "TABLESPACE", Ref("TablespaceReferenceSegment")), Sequence("ATTACH", "PARTITION", Ref("IndexReferenceSegment")), Sequence( Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), Sequence( "SET", Bracketed( Delimited( Sequence( Ref("ParameterNameSegment"), Sequence( Ref("EqualsSegment"), Ref("LiteralGrammar"), optional=True, ), ) ) ), ), Sequence( "RESET", Bracketed(Delimited(Ref("ParameterNameSegment"))) ), Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("NumericLiteralSegment"), "SET", "STATISTICS", Ref("NumericLiteralSegment"), ), ), ), Sequence( "ALL", "IN", "TABLESPACE", Ref("TablespaceReferenceSegment"), Sequence( "OWNED", "BY", Delimited(Ref("RoleReferenceSegment")), optional=True ), "SET", "TABLESPACE", Ref("TablespaceReferenceSegment"), Ref.keyword("NOWAIT", optional=True), ), ), ) class ReindexStatementSegment(BaseSegment): """A Reindex Statement Segment. As per https://www.postgresql.org/docs/14/sql-reindex.html """ type = "reindex_statement_segment" match_grammar = Sequence( "REINDEX", Bracketed( Delimited( Sequence("CONCURRENTLY", Ref("BooleanLiteralGrammar", optional=True)), Sequence( "TABLESPACE", Ref("TablespaceReferenceSegment"), ), Sequence("VERBOSE", Ref("BooleanLiteralGrammar", optional=True)), ), optional=True, ), OneOf( Sequence( "INDEX", Ref.keyword("CONCURRENTLY", optional=True), Ref("IndexReferenceSegment"), ), Sequence( "TABLE", Ref.keyword("CONCURRENTLY", optional=True), Ref("TableReferenceSegment"), ), Sequence( "SCHEMA", Ref.keyword("CONCURRENTLY", optional=True), Ref("SchemaReferenceSegment"), ), Sequence( OneOf("DATABASE", "SYSTEM"), Ref.keyword("CONCURRENTLY", optional=True), Ref("DatabaseReferenceSegment"), ), ), ) class DropIndexStatementSegment(ansi.DropIndexStatementSegment): """A `DROP INDEX` statement. https://www.postgresql.org/docs/15/sql-dropindex.html https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6698-L6719 https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L6808-L6829 """ match_grammar: Matchable = Sequence( "DROP", "INDEX", Ref.keyword("CONCURRENTLY", optional=True), Ref("IfExistsGrammar", optional=True), Delimited(Ref("IndexReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class FrameClauseSegment(ansi.FrameClauseSegment): """A frame clause for window functions. As specified in https://www.postgresql.org/docs/13/sql-expressions.html """ _frame_extent = ansi.FrameClauseSegment._frame_extent _frame_exclusion = Sequence( "EXCLUDE", OneOf(Sequence("CURRENT", "ROW"), "GROUP", "TIES", Sequence("NO", "OTHERS")), optional=True, ) match_grammar = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), _frame_exclusion, ) class CreateSequenceOptionsSegment(ansi.CreateSequenceOptionsSegment): """Options for Create Sequence statement. As specified in https://www.postgresql.org/docs/13/sql-createsequence.html """ match_grammar = OneOf( Sequence("AS", Ref("DatatypeSegment")), Sequence( "INCREMENT", Ref.keyword("BY", optional=True), Ref("NumericLiteralSegment") ), OneOf( Sequence("MINVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MINVALUE"), ), OneOf( Sequence("MAXVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MAXVALUE"), ), Sequence( "START", Ref.keyword("WITH", optional=True), Ref("NumericLiteralSegment") ), Sequence("CACHE", Ref("NumericLiteralSegment")), OneOf("CYCLE", Sequence("NO", "CYCLE")), Sequence("OWNED", "BY", OneOf("NONE", Ref("ColumnReferenceSegment"))), ) class CreateSequenceStatementSegment(BaseSegment): """Create Sequence Statement. As specified in https://www.postgresql.org/docs/13/sql-createsequence.html """ type = "create_sequence_statement" match_grammar = Sequence( "CREATE", Ref("TemporaryGrammar", optional=True), "SEQUENCE", Ref("IfNotExistsGrammar", optional=True), Ref("SequenceReferenceSegment"), AnyNumberOf(Ref("CreateSequenceOptionsSegment"), optional=True), ) class AlterSequenceOptionsSegment(ansi.AlterSequenceOptionsSegment): """Dialect-specific options for ALTER SEQUENCE statement. As specified in https://www.postgresql.org/docs/13/sql-altersequence.html """ match_grammar = OneOf( Sequence("AS", Ref("DatatypeSegment")), Sequence( "INCREMENT", Ref.keyword("BY", optional=True), Ref("NumericLiteralSegment") ), OneOf( Sequence("MINVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MINVALUE"), ), OneOf( Sequence("MAXVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MAXVALUE"), ), # N.B. The SEQUENCE NAME keywords are undocumented but are produced # by the pg_dump utility. See discussion in issue #1857. Sequence("SEQUENCE", "NAME", Ref("SequenceReferenceSegment")), Sequence( "START", Ref.keyword("WITH", optional=True), Ref("NumericLiteralSegment") ), Sequence( "RESTART", Ref.keyword("WITH", optional=True), Ref("NumericLiteralSegment") ), Sequence("CACHE", Ref("NumericLiteralSegment")), Sequence(Ref.keyword("NO", optional=True), "CYCLE"), Sequence("OWNED", "BY", OneOf("NONE", Ref("ColumnReferenceSegment"))), ) class AlterSequenceStatementSegment(ansi.AlterSequenceStatementSegment): """Alter Sequence Statement. As specified in https://www.postgresql.org/docs/13/sql-altersequence.html """ match_grammar = Sequence( "ALTER", "SEQUENCE", Ref("IfExistsGrammar", optional=True), Ref("SequenceReferenceSegment"), OneOf( AnyNumberOf(Ref("AlterSequenceOptionsSegment", optional=True)), Sequence( "OWNER", "TO", OneOf(Ref("ParameterNameSegment"), "CURRENT_USER", "SESSION_USER"), ), Sequence("RENAME", "TO", Ref("SequenceReferenceSegment")), Sequence("SET", "SCHEMA", Ref("SchemaReferenceSegment")), ), ) class DropSequenceStatementSegment(ansi.DropSequenceStatementSegment): """Drop Sequence Statement. As specified in https://www.postgresql.org/docs/13/sql-dropsequence.html """ match_grammar = Sequence( "DROP", "SEQUENCE", Ref("IfExistsGrammar", optional=True), Delimited(Ref("SequenceReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class AnalyzeStatementSegment(BaseSegment): """Analyze Statement Segment. As specified in https://www.postgresql.org/docs/13/sql-analyze.html """ type = "analyze_statement" _option = Sequence( OneOf("VERBOSE", "SKIP_LOCKED"), Ref("BooleanLiteralGrammar", optional=True) ) _tables_and_columns = Sequence( Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), ) match_grammar = Sequence( OneOf("ANALYZE", "ANALYSE"), OneOf(Bracketed(Delimited(_option)), "VERBOSE", optional=True), Delimited(_tables_and_columns, optional=True), ) # Adding PostgreSQL specific statements class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("AlterDefaultPrivilegesStatementSegment"), Ref("DropOwnedStatementSegment"), Ref("ReassignOwnedStatementSegment"), Ref("CommentOnStatementSegment"), Ref("AnalyzeStatementSegment"), Ref("CreateTableAsStatementSegment"), Ref("AlterTriggerStatementSegment"), Ref("SetStatementSegment"), Ref("AlterPolicyStatementSegment"), Ref("CreatePolicyStatementSegment"), Ref("DropPolicyStatementSegment"), Ref("CreateDomainStatementSegment"), Ref("AlterDomainStatementSegment"), Ref("DropDomainStatementSegment"), Ref("CreateMaterializedViewStatementSegment"), Ref("AlterMaterializedViewStatementSegment"), Ref("DropMaterializedViewStatementSegment"), Ref("RefreshMaterializedViewStatementSegment"), Ref("AlterDatabaseStatementSegment"), Ref("DropDatabaseStatementSegment"), Ref("VacuumStatementSegment"), Ref("AlterFunctionStatementSegment"), Ref("CreateViewStatementSegment"), Ref("AlterViewStatementSegment"), Ref("ListenStatementSegment"), Ref("NotifyStatementSegment"), Ref("UnlistenStatementSegment"), Ref("LoadStatementSegment"), Ref("ResetStatementSegment"), Ref("DiscardStatementSegment"), Ref("AlterProcedureStatementSegment"), Ref("CreateProcedureStatementSegment"), Ref("DropProcedureStatementSegment"), Ref("CopyStatementSegment"), Ref("DoStatementSegment"), Ref("AlterIndexStatementSegment"), Ref("ReindexStatementSegment"), Ref("AlterRoleStatementSegment"), Ref("CreateExtensionStatementSegment"), Ref("DropExtensionStatementSegment"), Ref("CreatePublicationStatementSegment"), Ref("AlterPublicationStatementSegment"), Ref("DropPublicationStatementSegment"), Ref("CreateTypeStatementSegment"), Ref("AlterTypeStatementSegment"), Ref("AlterSchemaStatementSegment"), Ref("LockTableStatementSegment"), Ref("ClusterStatementSegment"), Ref("CreateCollationStatementSegment"), Ref("CallStoredProcedureSegment"), Ref("CreateServerStatementSegment"), Ref("CreateUserMappingStatementSegment"), Ref("ImportForeignSchemaStatementSegment"), ], ) class CreateTriggerStatementSegment(ansi.CreateTriggerStatementSegment): """Create Trigger Statement. As Specified in https://www.postgresql.org/docs/14/sql-createtrigger.html """ match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Ref.keyword("CONSTRAINT", optional=True), "TRIGGER", Ref("TriggerReferenceSegment"), OneOf("BEFORE", "AFTER", Sequence("INSTEAD", "OF")), Delimited( "INSERT", "DELETE", "TRUNCATE", Sequence( "UPDATE", Sequence( "OF", Delimited( Ref("ColumnReferenceSegment"), terminators=["OR", "ON"], ), optional=True, ), ), delimiter="OR", ), "ON", Ref("TableReferenceSegment"), AnyNumberOf( Sequence("FROM", Ref("TableReferenceSegment")), OneOf( Sequence("NOT", "DEFERRABLE"), Sequence( Ref.keyword("DEFERRABLE", optional=True), OneOf( Sequence("INITIALLY", "IMMEDIATE"), Sequence("INITIALLY", "DEFERRED"), ), ), ), Sequence( "REFERENCING", OneOf("OLD", "NEW"), "TABLE", "AS", Ref("TableReferenceSegment"), Sequence( OneOf("OLD", "NEW"), "TABLE", "AS", Ref("TableReferenceSegment"), optional=True, ), ), Sequence( "FOR", Ref.keyword("EACH", optional=True), OneOf("ROW", "STATEMENT") ), Sequence("WHEN", Bracketed(Ref("ExpressionSegment"))), ), Sequence( "EXECUTE", OneOf("FUNCTION", "PROCEDURE"), Ref("FunctionSegment"), ), ) class AlterTriggerStatementSegment(BaseSegment): """Alter Trigger Statement. As Specified in https://www.postgresql.org/docs/14/sql-altertrigger.html """ type = "alter_trigger" match_grammar = Sequence( "ALTER", "TRIGGER", Ref("TriggerReferenceSegment"), "ON", Ref("TableReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("TriggerReferenceSegment")), Sequence( Ref.keyword("NO", optional=True), "DEPENDS", "ON", "EXTENSION", Ref("ExtensionReferenceSegment"), ), ), ) class DropTriggerStatementSegment(ansi.DropTriggerStatementSegment): """Drop Trigger Statement. As Specified in https://www.postgresql.org/docs/14/sql-droptrigger.html """ match_grammar = Sequence( "DROP", "TRIGGER", Ref("IfExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), "ON", Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. """ match_grammar = Sequence( Ref.keyword("AS", optional=True), OneOf( Sequence( Ref("SingleIdentifierGrammar"), Bracketed(Ref("SingleIdentifierListSegment"), optional=True), ), Sequence( Ref("SingleIdentifierGrammar", optional=True), Bracketed( Delimited( Sequence(Ref("ParameterNameSegment"), Ref("DatatypeSegment")) ) ), ), ), ) class AsAliasExpressionSegment(BaseSegment): """A reference to an object with an `AS` clause. This is used in `InsertStatementSegment` in Postgres since the `AS` is not optional in this context. N.B. We keep as a separate segment since the `alias_expression` type is required for rules to interpret the alias. """ type = "alias_expression" match_grammar = Sequence( Indent, "AS", Ref("SingleIdentifierGrammar"), Dedent, ) class OperationClassReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an operation class.""" type = "operation_class_reference" class ConflictActionSegment(BaseSegment): """A Conflict Action Statement used within an INSERT statement. As specified in https://www.postgresql.org/docs/14/sql-insert.html """ type = "conflict_action" match_grammar = Sequence( "DO", OneOf( "NOTHING", Sequence( "UPDATE", "SET", Delimited( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), OneOf(Ref("ExpressionSegment"), "DEFAULT"), ), Sequence( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("EqualsSegment"), Ref.keyword("ROW", optional=True), Bracketed( Delimited(OneOf(Ref("ExpressionSegment"), "DEFAULT")) ), ), Sequence( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("EqualsSegment"), Bracketed(Ref("SelectableGrammar")), ), ) ), Sequence("WHERE", Ref("ExpressionSegment"), optional=True), ), ), ) class ConflictTargetSegment(BaseSegment): """A Conflict Target Statement used within an INSERT statement. As specified in https://www.postgresql.org/docs/14/sql-insert.html """ type = "conflict_target" match_grammar = OneOf( Sequence( Bracketed( Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), Bracketed(Ref("ExpressionSegment")), Ref("FunctionSegment"), ), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("OperationClassReferenceSegment", optional=True), ) ) ), Sequence("WHERE", Ref("ExpressionSegment"), optional=True), ), Sequence("ON", "CONSTRAINT", Ref("ParameterNameSegment")), ) class InsertStatementSegment(ansi.InsertStatementSegment): """An `INSERT` statement. https://www.postgresql.org/docs/14/sql-insert.html """ match_grammar = Sequence( "INSERT", "INTO", Ref("TableReferenceSegment"), Ref("AsAliasExpressionSegment", optional=True), Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence("OVERRIDING", OneOf("SYSTEM", "USER"), "VALUE", optional=True), OneOf( Sequence("DEFAULT", "VALUES"), Ref("SelectableGrammar"), ), Sequence( "ON", "CONFLICT", Ref("ConflictTargetSegment", optional=True), Ref("ConflictActionSegment"), optional=True, ), Sequence( "RETURNING", OneOf( Ref("StarSegment"), Delimited( Sequence( Ref("ExpressionSegment"), Ref("AsAliasExpressionSegment", optional=True), ), ), ), optional=True, ), ) class DropTypeStatementSegment(ansi.DropTypeStatementSegment): """Drop Type Statement. As specified in https://www.postgresql.org/docs/14/sql-droptype.html """ match_grammar = Sequence( "DROP", "TYPE", Ref("IfExistsGrammar", optional=True), Delimited(Ref("DatatypeSegment")), Ref("DropBehaviorGrammar", optional=True), ) class SetStatementSegment(BaseSegment): """Set Statement. As specified in https://www.postgresql.org/docs/14/sql-set.html Also: https://www.postgresql.org/docs/15/sql-set-role.html (still a VariableSetStmt) https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1584 """ type = "set_statement" match_grammar = Sequence( "SET", OneOf("SESSION", "LOCAL", optional=True), OneOf( Sequence( Ref("ParameterNameSegment"), OneOf("TO", Ref("EqualsSegment")), OneOf( "DEFAULT", Delimited( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), # https://github.com/postgres/postgres/blob/4380c2509d51febad34e1fac0cfaeb98aaa716c5/src/backend/parser/gram.y#L1810-L1815 Ref("OnKeywordAsIdentifierSegment"), ), ), ), Sequence( "TIME", "ZONE", OneOf(Ref("QuotedLiteralSegment"), "LOCAL", "DEFAULT") ), Sequence("SCHEMA", Ref("QuotedLiteralSegment")), Sequence("ROLE", OneOf("NONE", Ref("RoleReferenceSegment"))), ), ) class CreatePolicyStatementSegment(BaseSegment): """A `CREATE POLICY` statement. As Specified in https://www.postgresql.org/docs/14/sql-createpolicy.html """ type = "create_policy_statement" match_grammar = Sequence( "CREATE", "POLICY", Ref("ObjectReferenceSegment"), "ON", Ref("TableReferenceSegment"), Sequence("AS", OneOf("PERMISSIVE", "RESTRICTIVE"), optional=True), Sequence( "FOR", OneOf("ALL", "SELECT", "INSERT", "UPDATE", "DELETE"), optional=True ), Sequence( "TO", Delimited( OneOf( Ref("ObjectReferenceSegment"), "PUBLIC", "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ) ), optional=True, ), Sequence("USING", Bracketed(Ref("ExpressionSegment")), optional=True), Sequence("WITH", "CHECK", Bracketed(Ref("ExpressionSegment")), optional=True), ) class CallStoredProcedureSegment(BaseSegment): """This is a CALL statement used to execute a stored procedure. https://www.postgresql.org/docs/14/sql-call.html """ type = "call_statement" match_grammar = Sequence( "CALL", Ref("FunctionSegment"), ) class CreateDomainStatementSegment(BaseSegment): """A `CREATE Domain` statement. As Specified in https://www.postgresql.org/docs/current/sql-createdomain.html """ type = "create_domain_statement" match_grammar = Sequence( "CREATE", "DOMAIN", Ref("ObjectReferenceSegment"), Sequence("AS", optional=True), Ref("DatatypeSegment"), Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), Sequence("DEFAULT", Ref("ExpressionSegment"), optional=True), AnyNumberOf( Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), Sequence("CHECK", Ref("ExpressionSegment")), ), ), ), ) class AlterDomainStatementSegment(BaseSegment): """An `ALTER DOMAIN` statement. As Specified in https://www.postgresql.org/docs/current/sql-alterdomain.html """ type = "alter_domain_statement" match_grammar: Matchable = Sequence( "ALTER", "DOMAIN", Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", "DEFAULT", Ref("ExpressionSegment"), ), Sequence( "DROP", "DEFAULT", ), Sequence(OneOf("SET", "DROP"), "NOT", "NULL"), Sequence( "ADD", Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True, ), OneOf( Sequence(Ref.keyword("NOT", optional=True), "NULL"), Sequence("CHECK", Ref("ExpressionSegment")), ), Sequence("NOT", "VALID", optional=True), ), Sequence( "DROP", "CONSTRAINT", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf("RESTRICT", "CASCADE", optional=True), ), Sequence( "RENAME", "CONSTRAINT", Ref("ObjectReferenceSegment"), "TO", Ref("ObjectReferenceSegment"), ), Sequence( "VALIDATE", "CONSTRAINT", Ref("ObjectReferenceSegment"), ), Sequence( "OWNER", "TO", OneOf( Ref("ObjectReferenceSegment"), "CURRENT_ROLE", "CURRENT_USER", "SESSION_USER", ), ), Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( "SET", "SCHEMA", Ref("ObjectReferenceSegment"), ), ), ) class DropDomainStatementSegment(BaseSegment): """Drop Domain Statement. As Specified in https://www.postgresql.org/docs/current/sql-dropdomain.html """ type = "drop_domain_statement" match_grammar = Sequence( "DROP", "DOMAIN", Ref("IfExistsGrammar", optional=True), Delimited(Ref("ObjectReferenceSegment")), Ref("DropBehaviorGrammar", optional=True), ) class DropPolicyStatementSegment(BaseSegment): """A `DROP POLICY` statement. As Specified in https://www.postgresql.org/docs/14/sql-droppolicy.html """ type = "drop_policy_statement" match_grammar = Sequence( "DROP", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "ON", Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class LoadStatementSegment(BaseSegment): """A `LOAD` statement. As Specified in https://www.postgresql.org/docs/14/sql-load.html """ type = "load_statement" match_grammar = Sequence( "LOAD", Ref("QuotedLiteralSegment"), ) class ResetStatementSegment(BaseSegment): """A `RESET` statement. As Specified in https://www.postgresql.org/docs/14/sql-reset.html Also, RESET ROLE from: https://www.postgresql.org/docs/15/sql-set-role.html """ type = "reset_statement" match_grammar = Sequence( "RESET", OneOf("ALL", "ROLE", Ref("ParameterNameSegment")), ) class DiscardStatementSegment(BaseSegment): """A `DISCARD` statement. As Specified in https://www.postgresql.org/docs/14/sql-discard.html """ type = "discard_statement" match_grammar = Sequence( "DISCARD", OneOf( "ALL", "PLANS", "SEQUENCES", "TEMPORARY", "TEMP", ), ) class ListenStatementSegment(BaseSegment): """A `LISTEN` statement. As Specified in https://www.postgresql.org/docs/14/sql-listen.html """ type = "listen_statement" match_grammar = Sequence("LISTEN", Ref("SingleIdentifierGrammar")) class NotifyStatementSegment(BaseSegment): """A `NOTIFY` statement. As Specified in https://www.postgresql.org/docs/14/sql-notify.html """ type = "notify_statement" match_grammar = Sequence( "NOTIFY", Ref("SingleIdentifierGrammar"), Sequence( Ref("CommaSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ) class UnlistenStatementSegment(BaseSegment): """A `UNLISTEN` statement. As Specified in https://www.postgresql.org/docs/14/sql-unlisten.html """ type = "unlisten_statement" match_grammar = Sequence( "UNLISTEN", OneOf( Ref("SingleIdentifierGrammar"), Ref("StarSegment"), ), ) class TruncateStatementSegment(ansi.TruncateStatementSegment): """`TRUNCATE TABLE` statement. https://www.postgresql.org/docs/14/sql-truncate.html """ match_grammar = Sequence( "TRUNCATE", Ref.keyword("TABLE", optional=True), Delimited( OneOf( Sequence( Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), ), Sequence( Ref("TableReferenceSegment"), Ref("StarSegment", optional=True), ), ), ), Sequence( OneOf("RESTART", "CONTINUE"), "IDENTITY", optional=True, ), Ref( "DropBehaviorGrammar", optional=True, ), ) class CopyStatementSegment(BaseSegment): """A `COPY` statement. As Specified in https://www.postgresql.org/docs/14/sql-copy.html """ type = "copy_statement" _target_subset = OneOf( Ref("QuotedLiteralSegment"), Sequence("PROGRAM", Ref("QuotedLiteralSegment")) ) _table_definition = Sequence( Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), ) _option = Sequence( Ref.keyword("WITH", optional=True), Bracketed( Delimited( AnySetOf( Sequence("FORMAT", Ref("SingleIdentifierGrammar")), Sequence("FREEZE", Ref("BooleanLiteralGrammar", optional=True)), Sequence("DELIMITER", Ref("QuotedLiteralSegment")), Sequence("NULL", Ref("QuotedLiteralSegment")), Sequence("HEADER", Ref("BooleanLiteralGrammar", optional=True)), Sequence("QUOTE", Ref("QuotedLiteralSegment")), Sequence("ESCAPE", Ref("QuotedLiteralSegment")), Sequence( "FORCE_QUOTE", OneOf( Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("StarSegment"), ), ), Sequence( "FORCE_NOT_NULL", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "FORCE_NULL", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence("ENCODING", Ref("QuotedLiteralSegment")), ) ) ), optional=True, ) match_grammar = Sequence( "COPY", OneOf( Sequence( _table_definition, "FROM", OneOf( _target_subset, Sequence("STDIN"), ), _option, Sequence("WHERE", Ref("ExpressionSegment"), optional=True), ), Sequence( OneOf( _table_definition, Bracketed(Ref("UnorderedSelectStatementSegment")) ), "TO", OneOf( _target_subset, Sequence("STDOUT"), ), _option, ), ), ) class LanguageClauseSegment(BaseSegment): """Clause specifying language used for executing anonymous code blocks.""" type = "language_clause" match_grammar = Sequence( "LANGUAGE", OneOf(Ref("NakedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment")), ) class DoStatementSegment(BaseSegment): """A `DO` statement for executing anonymous code blocks. As specified in https://www.postgresql.org/docs/14/sql-do.html """ type = "do_statement" match_grammar = Sequence( "DO", OneOf( Sequence( Ref("LanguageClauseSegment", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( Ref("QuotedLiteralSegment"), Ref("LanguageClauseSegment", optional=True), ), ), ) class CTEDefinitionSegment(ansi.CTEDefinitionSegment): """A CTE Definition from a WITH statement. https://www.postgresql.org/docs/14/queries-with.html TODO: Data-Modifying Statements (INSERT, UPDATE, DELETE) in WITH """ match_grammar = Sequence( Ref("SingleIdentifierGrammar"), Ref("CTEColumnList", optional=True), "AS", Sequence(Ref.keyword("NOT", optional=True), "MATERIALIZED", optional=True), Bracketed( Ref("SelectableGrammar"), parse_mode=ParseMode.GREEDY, ), OneOf( Sequence( "SEARCH", OneOf( "BREADTH", "DEPTH", ), "FIRST", "BY", Ref("ColumnReferenceSegment"), "SET", Ref("ColumnReferenceSegment"), ), Sequence( "CYCLE", Ref("ColumnReferenceSegment"), "SET", Ref("ColumnReferenceSegment"), "USING", Ref("ColumnReferenceSegment"), ), optional=True, ), ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause within in `WITH` or `SELECT`.""" match_grammar = Sequence( "VALUES", Delimited( Bracketed( Delimited( Ref("ExpressionSegment"), # DEFAULT keyword used in # INSERT INTO statement. "DEFAULT", ), parse_mode=ParseMode.GREEDY, ), ), Ref("AliasExpressionSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class DeleteStatementSegment(ansi.DeleteStatementSegment): """A `DELETE` statement. https://www.postgresql.org/docs/14/sql-delete.html """ match_grammar = Sequence( "DELETE", "FROM", Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), Ref("StarSegment", optional=True), Ref("AliasExpressionSegment", optional=True), Sequence( "USING", Indent, Delimited( Sequence( Ref("TableExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, optional=True, ), OneOf( Sequence("WHERE", "CURRENT", "OF", Ref("ObjectReferenceSegment")), Ref("WhereClauseSegment"), optional=True, ), Sequence( "RETURNING", OneOf( Ref("StarSegment"), Delimited( Sequence( Ref("ExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), ), optional=True, ), ) class SetClauseSegment(BaseSegment): """SQL 1992 set clause. ::= ::= | | DEFAULT ::= """ type = "set_clause" match_grammar: Matchable = Sequence( OneOf( Sequence( Ref("ColumnReferenceSegment"), Ref("ArrayAccessorSegment", optional=True), Ref("EqualsSegment"), OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), "DEFAULT", ), AnyNumberOf(Ref("ShorthandCastSegment")), ), Sequence( Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), Ref("EqualsSegment"), Bracketed( OneOf( # Potentially a bracketed SELECT Ref("SelectableGrammar"), # Or a delimited list of literals Delimited( Sequence( OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), "DEFAULT", ), AnyNumberOf(Ref("ShorthandCastSegment")), ), ), ), ), ), ), ) class UpdateStatementSegment(BaseSegment): """An `Update` statement. https://www.postgresql.org/docs/current/sql-update.html """ type = "update_statement" match_grammar: Matchable = Sequence( # TODO add [ WITH [ RECURSIVE ] with_query [, ...] ] "UPDATE", Ref.keyword("ONLY", optional=True), Ref("TableReferenceSegment"), # SET is not a reserved word in all dialects (e.g. RedShift) # So specifically exclude as an allowed implicit alias to avoid parsing errors Ref("AliasExpressionSegment", exclude=Ref.keyword("SET"), optional=True), Ref("SetClauseListSegment"), Ref("FromClauseSegment", optional=True), OneOf( Sequence("WHERE", "CURRENT", "OF", Ref("ObjectReferenceSegment")), Ref("WhereClauseSegment"), optional=True, ), Sequence( "RETURNING", OneOf( Ref("StarSegment"), Delimited( Sequence( Ref("ExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), ), optional=True, ), ) class CreateTypeStatementSegment(BaseSegment): """A `CREATE TYPE` statement. https://www.postgresql.org/docs/current/sql-createtype.html """ type = "create_type_statement" match_grammar: Matchable = Sequence( "CREATE", "TYPE", Ref("ObjectReferenceSegment"), Sequence("AS", OneOf("ENUM", "RANGE", optional=True), optional=True), Bracketed(Delimited(Anything(), optional=True), optional=True), ) class AlterTypeStatementSegment(BaseSegment): """An `ALTER TYPE` statement. https://www.postgresql.org/docs/current/sql-altertype.html """ type = "alter_type_statement" match_grammar: Matchable = Sequence( "ALTER", "TYPE", Ref("ObjectReferenceSegment"), OneOf( Sequence( "OWNER", "TO", OneOf( "CURRENT_USER", "SESSION_USER", "CURRENT_ROLE", Ref("ObjectReferenceSegment"), ), ), Sequence( "RENAME", "VALUE", Ref("QuotedLiteralSegment"), "TO", Ref("QuotedLiteralSegment"), ), Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( "SET", "SCHEMA", Ref("SchemaReferenceSegment"), ), Delimited( Sequence( "ADD", "ATTRIBUTE", Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("CascadeRestrictGrammar", optional=True), ), Sequence( "ALTER", "ATTRIBUTE", Ref("ColumnReferenceSegment"), Sequence("SET", "DATA", optional=True), "TYPE", Ref("DatatypeSegment"), Sequence( "COLLATE", Ref("CollationReferenceSegment"), optional=True, ), Ref("CascadeRestrictGrammar", optional=True), ), Sequence( "DROP", "ATTRIBUTE", Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), Ref("CascadeRestrictGrammar", optional=True), ), Sequence( "RENAME", "ATTRIBUTE", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), Ref("CascadeRestrictGrammar", optional=True), ), ), Sequence( "ADD", "VALUE", Ref("IfNotExistsGrammar", optional=True), Ref("QuotedLiteralSegment"), Sequence( OneOf("BEFORE", "AFTER"), Ref("QuotedLiteralSegment"), optional=True ), ), ), ) class CreateCollationStatementSegment(BaseSegment): """A `CREATE COLLATION` statement. https://www.postgresql.org/docs/current/sql-createcollation.html """ type = "create_collation_statement" match_grammar: Matchable = Sequence( "CREATE", "COLLATION", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Bracketed( Delimited( Sequence( "LOCALE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "LC_COLLATE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "LC_CTYPE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "PROVIDER", Ref("EqualsSegment"), OneOf("ICU", "LIBC"), ), Sequence( "DETERMINISTIC", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "VERSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) ), Sequence( "FROM", Ref("ObjectReferenceSegment"), ), ), ) class AlterSchemaStatementSegment(BaseSegment): """An `ALTER SCHEMA` statement. https://www.postgresql.org/docs/current/sql-alterschema.html """ type = "alter_schema_statement" match_grammar = Sequence( "ALTER", "SCHEMA", Ref("SchemaReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("SchemaReferenceSegment"), ), Sequence( "OWNER", "TO", Ref("RoleReferenceSegment"), ), ), ) class LockTableStatementSegment(BaseSegment): """An `LOCK TABLE` statement. https://www.postgresql.org/docs/14/sql-lock.html """ type = "lock_table_statement" match_grammar: Matchable = Sequence( "LOCK", Ref.keyword("TABLE", optional=True), Ref.keyword("ONLY", optional=True), OneOf( Delimited( Ref("TableReferenceSegment"), ), Ref("StarSegment"), ), Sequence( "IN", OneOf( Sequence("ACCESS", "SHARE"), Sequence("ROW", "SHARE"), Sequence("ROW", "EXCLUSIVE"), Sequence("SHARE", "UPDATE", "EXCLUSIVE"), "SHARE", Sequence("SHARE", "ROW", "EXCLUSIVE"), "EXCLUSIVE", Sequence("ACCESS", "EXCLUSIVE"), ), "MODE", optional=True, ), Ref.keyword("NOWAIT", optional=True), ) class ClusterStatementSegment(BaseSegment): """A `CLUSTER` statement. https://www.postgresql.org/docs/current/sql-cluster.html """ type = "cluster_statement" match_grammar = Sequence( "CLUSTER", Ref.keyword("VERBOSE", optional=True), OneOf( Sequence( Ref("TableReferenceSegment"), Sequence("USING", Ref("IndexReferenceSegment"), optional=True), ), Sequence(Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment")), optional=True, ), ) class ColumnReferenceSegment(ansi.ObjectReferenceSegment): """A reference to column, field or alias. We override this for Postgres to allow keywords in fully qualified column names (using Full segments), similar to how this is done in BigQuery. """ type = "column_reference" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), Sequence( OneOf(Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment"))), Delimited( Ref("SingleIdentifierFullGrammar"), delimiter=OneOf( Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment")) ), terminators=[ "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("ColonSegment"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ], allow_gaps=False, ), allow_gaps=False, optional=True, ), allow_gaps=False, ) class NamedArgumentSegment(BaseSegment): """Named argument to a function. https://www.postgresql.org/docs/current/sql-syntax-calling-funcs.html#SQL-SYNTAX-CALLING-FUNCS-NAMED """ type = "named_argument" match_grammar = Sequence( Ref("NakedIdentifierSegment"), Ref("RightArrowSegment"), Ref("ExpressionSegment"), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause. Override from ANSI to allow optional WITH ORDINALITY clause """ match_grammar: Matchable = OneOf( Ref("ValuesClauseSegment"), Ref("BareFunctionSegment"), Sequence( Ref("FunctionSegment"), Sequence("WITH", "ORDINALITY", optional=True), ), Ref("TableReferenceSegment"), # Nested Selects Bracketed(Ref("SelectableGrammar")), Bracketed(Ref("MergeStatementSegment")), ) class ServerReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a server.""" type = "server_reference" class CreateServerStatementSegment(BaseSegment): """Create server statement. https://www.postgresql.org/docs/15/sql-createserver.html """ type = "create_server_statement" match_grammar: Matchable = Sequence( "CREATE", "SERVER", Ref("IfNotExistsGrammar", optional=True), Ref("ServerReferenceSegment"), Sequence("TYPE", Ref("QuotedLiteralSegment"), optional=True), Sequence("VERSION", Ref("VersionIdentifierSegment"), optional=True), Ref("ForeignDataWrapperGrammar"), Ref("ObjectReferenceSegment"), Ref("OptionsGrammar", optional=True), ) class CreateUserMappingStatementSegment(BaseSegment): """Create user mapping statement. https://www.postgresql.org/docs/15/sql-createusermapping.html """ type = "create_user_mapping_statement" match_grammar: Matchable = Sequence( Ref("CreateUserMappingGrammar"), Ref("IfNotExistsGrammar", optional=True), "FOR", OneOf( Ref("SingleIdentifierGrammar"), Ref("SessionInformationUserFunctionsGrammar"), "PUBLIC", ), "SERVER", Ref("ServerReferenceSegment"), Ref("OptionsGrammar", optional=True), ) class ImportForeignSchemaStatementSegment(BaseSegment): """Import foreign schema statement. https://www.postgresql.org/docs/15/sql-importforeignschema.html """ type = "import_foreign_schema_statement" match_grammar: Matchable = Sequence( Ref("ImportForeignSchemaGrammar"), Ref("SchemaReferenceSegment"), Sequence( OneOf(Sequence("LIMIT", "TO"), "EXCEPT"), Bracketed(Delimited(Ref("NakedIdentifierFullSegment"))), optional=True, ), "FROM", "SERVER", Ref("ServerReferenceSegment"), "INTO", Ref("SchemaReferenceSegment"), Ref("OptionsGrammar", optional=True), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_postgres_keywords.py000066400000000000000000001077701451700765000254210ustar00rootroot00000000000000"""Keywords in the Postgres Dialect. Most of the keywords come from https://www.postgresql.org/docs/13/sql-keywords-appendix.html Here, "not-keyword" refers to a word not being a keyword, and will be removed from any default keyword definition, these keywords are, or have been, an ANSI keyword. There are also some keywords that are(n't) supported as types and function, but there isn't support for that distinction at present. """ from typing import List, Tuple def priority_keyword_merge(*args: List[Tuple[str, str]]) -> List[Tuple[str, str]]: """Merge keyword lists, giving priority to entries in later lists. *args is a list of keyword lists, these lists should be of tuples in the form (keyword, type) """ keyword_lists = [*args] base_list = [] if len(keyword_lists) == 1: return keyword_lists[0] while len(keyword_lists) > 1: base_list, priority_list = keyword_lists[0], keyword_lists[1] keyword_set = set([x[0] for x in base_list]) for item in priority_list: if item[0] in keyword_set: for index, keyword in enumerate(base_list): if keyword[0] == item[0]: base_list.pop(index) break base_list.append(item) keyword_lists.pop(1) return base_list def get_keywords(keyword_list: List[Tuple[str, str]], keyword_type: str) -> List[str]: """Get a list of keywords of the required type. keyword_type should be one of "not-keyword", "reserved", "non-reserved" """ keywords = [x[0] for x in keyword_list if x[1].startswith(keyword_type)] return keywords postgres_docs_keywords = [ ("A", "not-keyword"), ("ABORT", "non-reserved"), ("ABS", "not-keyword"), ("ABSENT", "not-keyword"), ("ABSOLUTE", "non-reserved"), ("ACCESS", "non-reserved"), ("ACCORDING", "not-keyword"), ("ACOS", "not-keyword"), ("ACTION", "non-reserved"), ("ADA", "not-keyword"), ("ADD", "non-reserved"), ("ADMIN", "non-reserved"), ("AFTER", "non-reserved"), ("AGGREGATE", "non-reserved"), ("ALL", "reserved"), ("ALLOCATE", "not-keyword"), ("ALSO", "non-reserved"), ("ALTER", "non-reserved"), ("ALWAYS", "non-reserved"), ("ANALYSE", "reserved"), ("ANALYZE", "reserved"), ("AND", "reserved"), ("ANY", "reserved"), ("ARE", "not-keyword"), ("ARRAY", "reserved"), ("ARRAY_AGG", "not-keyword"), ("ARRAY_MAX_CARDINALITY", "not-keyword"), ("AS", "reserved"), ("ASC", "reserved"), ("ASENSITIVE", "not-keyword"), ("ASIN", "not-keyword"), ("ASSERTION", "non-reserved"), ("ASSIGNMENT", "non-reserved"), ("ASYMMETRIC", "reserved"), ("AT", "non-reserved"), ("ATAN", "not-keyword"), ("ATOMIC", "non-reserved"), ("ATTACH", "non-reserved"), ("ATTRIBUTE", "non-reserved"), ("ATTRIBUTES", "not-keyword"), ("AUTHORIZATION", "reserved-(can-be-function-or-type)"), ("AVG", "not-keyword"), ("BACKWARD", "non-reserved"), ("BASE64", "not-keyword"), ("BEFORE", "non-reserved"), ("BEGIN", "non-reserved"), ("BEGIN_FRAME", "not-keyword"), ("BEGIN_PARTITION", "not-keyword"), ("BERNOULLI", "non-reserved"), ("BETWEEN", "non-reserved-(cannot-be-function-or-type)"), ("BIGINT", "non-reserved-(cannot-be-function-or-type)"), ("BIGSERIAL", "non-reserved-(cannot-be-function-or-type)"), ("BINARY", "reserved-(can-be-function-or-type)"), ("BIT", "non-reserved-(cannot-be-function-or-type)"), ("BIT_LENGTH", "not-keyword"), ("BLOB", "not-keyword"), ("BLOCKED", "not-keyword"), ("BOM", "not-keyword"), ("BOOLEAN", "non-reserved-(cannot-be-function-or-type)"), ("BOOL", "non-reserved-(cannot-be-function-or-type)"), ("BOTH", "reserved"), ("BOX", "non-reserved-(cannot-be-function-or-type)"), ("BPCHAR", "non-reserved-(cannot-be-function-or-type)"), ("BREADTH", "not-keyword"), ("BY", "non-reserved"), ("BYTEA", "non-reserved-(cannot-be-function-or-type)"), ("C", "not-keyword"), ("CACHE", "non-reserved"), ("CALL", "non-reserved"), ("CALLED", "non-reserved"), ("CARDINALITY", "not-keyword"), ("CASCADE", "non-reserved"), ("CASCADED", "non-reserved"), ("CASE", "reserved"), ("CAST", "reserved"), ("CATALOG", "non-reserved"), ("CATALOG_NAME", "not-keyword"), ("CEIL", "not-keyword"), ("CEILING", "not-keyword"), ("CHAIN", "non-reserved"), ("CHAINING", "not-keyword"), ("CHAR", "non-reserved-(cannot-be-function-or-type)"), ("CHARACTER", "non-reserved-(cannot-be-function-or-type)"), ("CHARACTERISTICS", "non-reserved"), ("CHARACTERS", "not-keyword"), ("CHARACTER_LENGTH", "not-keyword"), ("CHARACTER_SET_CATALOG", "not-keyword"), ("CHARACTER_SET_NAME", "not-keyword"), ("CHARACTER_SET_SCHEMA", "not-keyword"), ("CHAR_LENGTH", "not-keyword"), ("CHECK", "reserved"), ("CHECKPOINT", "non-reserved"), ("CIDR", "non-reserved-(cannot-be-function-or-type)"), ("CIRCLE", "non-reserved-(cannot-be-function-or-type)"), ("CLASS", "non-reserved"), ("CLASSIFIER", "not-keyword"), ("CLASS_ORIGIN", "not-keyword"), ("CLOB", "not-keyword"), ("CLOSE", "non-reserved"), ("CLUSTER", "non-reserved"), ("COALESCE", "non-reserved-(cannot-be-function-or-type)"), ("COBOL", "not-keyword"), ("COLLATE", "reserved"), ("COLLATION", "non-reserved"), ("COLLATION_CATALOG", "not-keyword"), ("COLLATION_NAME", "not-keyword"), ("COLLATION_SCHEMA", "not-keyword"), ("COLLECT", "not-keyword"), ("COLUMN", "reserved"), ("COLUMNS", "non-reserved"), ("COLUMN_NAME", "not-keyword"), ("COMMAND_FUNCTION", "not-keyword"), ("COMMAND_FUNCTION_CODE", "not-keyword"), ("COMMENT", "non-reserved"), ("COMMENTS", "non-reserved"), ("COMMIT", "non-reserved"), ("COMMITTED", "non-reserved"), ("COMPRESSION", "non-reserved"), ("CONCURRENTLY", "reserved-(can-be-function-or-type)"), ("CONDITION", "not-keyword"), ("CONDITIONAL", "not-keyword"), ("CONDITION_NUMBER", "not-keyword"), ("CONFIGURATION", "non-reserved"), ("CONFLICT", "non-reserved"), ("CONNECT", "not-keyword"), ("CONNECTION", "non-reserved"), ("CONNECTION_NAME", "not-keyword"), ("CONSTRAINT", "reserved"), ("CONSTRAINTS", "non-reserved"), ("CONSTRAINT_CATALOG", "not-keyword"), ("CONSTRAINT_NAME", "not-keyword"), ("CONSTRAINT_SCHEMA", "not-keyword"), ("CONSTRUCTOR", "not-keyword"), ("CONTAINS", "not-keyword"), ("CONTENT", "non-reserved"), ("CONTINUE", "non-reserved"), ("CONTROL", "not-keyword"), ("CONVERSION", "non-reserved"), ("CONVERT", "not-keyword"), ("COPY", "non-reserved"), ("CORR", "not-keyword"), ("CORRESPONDING", "not-keyword"), ("COS", "not-keyword"), ("COSH", "not-keyword"), ("COST", "non-reserved"), ("COUNT", "not-keyword"), ("COVAR_POP", "not-keyword"), ("COVAR_SAMP", "not-keyword"), ("CREATE", "reserved"), ("CROSS", "reserved-(can-be-function-or-type)"), ("CSV", "non-reserved"), ("CUBE", "non-reserved"), ("CUME_DIST", "not-keyword"), ("CURRENT", "non-reserved"), ("CURRENT_CATALOG", "reserved"), ("CURRENT_DATE", "reserved"), ("CURRENT_DEFAULT_TRANSFORM_GROUP", "not-keyword"), ("CURRENT_PATH", "not-keyword"), ("CURRENT_ROLE", "reserved"), ("CURRENT_ROW", "not-keyword"), ("CURRENT_SCHEMA", "reserved-(can-be-function-or-type)"), ("CURRENT_TIME", "reserved"), ("CURRENT_TIMESTAMP", "reserved"), ("CURRENT_TRANSFORM_GROUP_FOR_TYPE", "not-keyword"), ("CURRENT_USER", "reserved"), ("CURSOR", "non-reserved"), ("CURSOR_NAME", "not-keyword"), ("CYCLE", "non-reserved"), ("DATA", "non-reserved"), ("DATABASE", "non-reserved"), ("DATALINK", "not-keyword"), ("DATE", "not-keyword"), ("DATERANGE", "non-reserved-(cannot-be-function-or-type)"), ("DATETIME_INTERVAL_CODE", "not-keyword"), ("DATETIME_INTERVAL_PRECISION", "not-keyword"), ("DAY", "non-reserved"), ("DB", "not-keyword"), ("DEALLOCATE", "non-reserved"), ("DEC", "non-reserved-(cannot-be-function-or-type)"), ("DECFLOAT", "not-keyword"), ("DECIMAL", "non-reserved-(cannot-be-function-or-type)"), ("DECLARE", "non-reserved"), ("DEFAULT", "reserved"), ("DEFAULTS", "non-reserved"), ("DEFERRABLE", "reserved"), ("DEFERRED", "non-reserved"), ("DEFINE", "not-keyword"), ("DEFINED", "not-keyword"), ("DEFINER", "non-reserved"), ("DEGREE", "not-keyword"), ("DELETE", "non-reserved"), ("DELIMITER", "non-reserved"), ("DELIMITERS", "non-reserved"), ("DENSE_RANK", "not-keyword"), ("DEPENDS", "non-reserved"), ("DEPTH", "not-keyword"), ("DEREF", "not-keyword"), ("DERIVED", "not-keyword"), ("DESC", "reserved"), ("DESCRIBE", "not-keyword"), ("DESCRIPTOR", "not-keyword"), ("DETACH", "non-reserved"), ("DETERMINISTIC", "not-keyword"), ("DIAGNOSTICS", "not-keyword"), ("DICTIONARY", "non-reserved"), ("DISABLE", "non-reserved"), ("DISCARD", "non-reserved"), ("DISCONNECT", "not-keyword"), ("DISPATCH", "not-keyword"), ("DISTINCT", "reserved"), ("DLNEWCOPY", "not-keyword"), ("DLPREVIOUSCOPY", "not-keyword"), ("DLURLCOMPLETE", "not-keyword"), ("DLURLCOMPLETEONLY", "not-keyword"), ("DLURLCOMPLETEWRITE", "not-keyword"), ("DLURLPATH", "not-keyword"), ("DLURLPATHONLY", "not-keyword"), ("DLURLPATHWRITE", "not-keyword"), ("DLURLSCHEME", "not-keyword"), ("DLURLSERVER", "not-keyword"), ("DLVALUE", "not-keyword"), ("DO", "reserved"), ("DOCUMENT", "non-reserved"), ("DOMAIN", "non-reserved"), ("DOUBLE", "non-reserved"), ("DROP", "non-reserved"), ("DYNAMIC", "not-keyword"), ("DYNAMIC_FUNCTION", "not-keyword"), ("DYNAMIC_FUNCTION_CODE", "not-keyword"), ("EACH", "non-reserved"), ("ELEMENT", "not-keyword"), ("ELSE", "reserved"), ("EMPTY", "not-keyword"), ("ENABLE", "non-reserved"), ("ENCODING", "non-reserved"), ("ENCRYPTED", "non-reserved"), ("END", "reserved"), ("END-EXEC", "not-keyword"), ("END_FRAME", "not-keyword"), ("END_PARTITION", "not-keyword"), ("ENFORCED", "not-keyword"), ("ENUM", "non-reserved"), ("EQUALS", "not-keyword"), ("ERROR", "not-keyword"), ("ESCAPE", "non-reserved"), ("EVENT", "non-reserved"), ("EVERY", "not-keyword"), ("EXCEPT", "reserved"), ("EXCEPTION", "not-keyword"), ("EXCLUDE", "non-reserved"), ("EXCLUDING", "non-reserved"), ("EXCLUSIVE", "non-reserved"), ("EXEC", "not-keyword"), ("EXECUTE", "non-reserved"), ("EXISTS", "non-reserved-(cannot-be-function-or-type)"), ("EXP", "not-keyword"), ("EXPLAIN", "non-reserved"), ("EXPRESSION", "non-reserved"), ("EXTENSION", "non-reserved"), ("EXTERNAL", "non-reserved"), ("EXTRACT", "non-reserved-(cannot-be-function-or-type)"), ("FALSE", "reserved"), ("FAMILY", "non-reserved"), ("FETCH", "reserved"), ("FILE", "not-keyword"), ("FILTER", "non-reserved"), ("FINAL", "not-keyword"), ("FINALIZE", "non-reserved"), ("FINISH", "not-keyword"), ("FIRST", "non-reserved"), ("FIRST_VALUE", "not-keyword"), ("FLAG", "not-keyword"), ("FLOAT", "non-reserved-(cannot-be-function-or-type)"), ("FLOOR", "not-keyword"), ("FOLLOWING", "non-reserved"), ("FOR", "reserved"), ("FORCE", "non-reserved"), ("FOREIGN", "reserved"), ("FORMAT", "not-keyword"), ("FORTRAN", "not-keyword"), ("FORWARD", "non-reserved"), ("FOUND", "not-keyword"), ("FRAME_ROW", "not-keyword"), ("FREE", "not-keyword"), ("FREEZE", "reserved-(can-be-function-or-type)"), ("FROM", "reserved"), ("FS", "not-keyword"), ("FULFILL", "not-keyword"), ("FULL", "reserved-(can-be-function-or-type)"), ("FUNCTION", "non-reserved"), ("FUNCTIONS", "non-reserved"), ("FUSION", "not-keyword"), ("G", "not-keyword"), ("GENERAL", "not-keyword"), ("GENERATED", "non-reserved"), ("GET", "not-keyword"), ("GLOBAL", "non-reserved"), ("GO", "not-keyword"), ("GOTO", "not-keyword"), ("GRANT", "reserved"), ("GRANTED", "non-reserved"), ("GREATEST", "non-reserved-(cannot-be-function-or-type)"), ("GROUP", "reserved"), ("GROUPING", "non-reserved-(cannot-be-function-or-type)"), ("GROUPS", "non-reserved"), ("HANDLER", "non-reserved"), ("HAVING", "reserved"), ("HEADER", "non-reserved"), ("HEX", "not-keyword"), ("HIERARCHY", "not-keyword"), ("HOLD", "non-reserved"), ("HOUR", "non-reserved"), ("ID", "not-keyword"), ("IDENTITY", "non-reserved"), ("IF", "non-reserved"), ("IGNORE", "not-keyword"), ("ILIKE", "reserved-(can-be-function-or-type)"), ("IMMEDIATE", "non-reserved"), ("IMMEDIATELY", "not-keyword"), ("IMMUTABLE", "non-reserved"), ("IMPLEMENTATION", "not-keyword"), ("IMPLICIT", "non-reserved"), ("IMPORT", "non-reserved"), ("IN", "reserved"), ("INCLUDE", "non-reserved"), ("INCLUDING", "non-reserved"), ("INCREMENT", "non-reserved"), ("INDENT", "not-keyword"), ("INDEX", "non-reserved"), ("INDEXES", "non-reserved"), ("INET", "non-reserved-(cannot-be-function-or-type)"), ("INDICATOR", "not-keyword"), ("INHERIT", "non-reserved"), ("INHERITS", "non-reserved"), ("INITIAL", "not-keyword"), ("INITIALLY", "reserved"), ("INLINE", "non-reserved"), ("INNER", "reserved-(can-be-function-or-type)"), ("INOUT", "non-reserved-(cannot-be-function-or-type)"), ("INPUT", "non-reserved"), ("INSENSITIVE", "non-reserved"), ("INSERT", "non-reserved"), ("INSTANCE", "not-keyword"), ("INSTANTIABLE", "not-keyword"), ("INSTEAD", "non-reserved"), ("INT", "non-reserved-(cannot-be-function-or-type)"), ("INT2", "non-reserved-(cannot-be-function-or-type)"), ("INT4", "non-reserved-(cannot-be-function-or-type)"), ("INT4RANGE", "non-reserved-(cannot-be-function-or-type)"), ("INT8", "non-reserved-(cannot-be-function-or-type)"), ("INT8RANGE", "non-reserved-(cannot-be-function-or-type)"), ("INTEGER", "non-reserved-(cannot-be-function-or-type)"), ("INTEGRITY", "not-keyword"), ("INTERSECT", "reserved"), ("INTERSECTION", "not-keyword"), ("INTERVAL", "non-reserved-(cannot-be-function-or-type)"), ("INTO", "reserved"), ("INVOKER", "non-reserved"), ("IS", "reserved-(can-be-function-or-type)"), ("ISNULL", "reserved-(can-be-function-or-type)"), ("ISOLATION", "non-reserved"), ("JOIN", "reserved-(can-be-function-or-type)"), ("JSON", "not-keyword"), ("JSON_ARRAY", "not-keyword"), ("JSON_ARRAYAGG", "not-keyword"), ("JSON_EXISTS", "not-keyword"), ("JSON_OBJECT", "not-keyword"), ("JSON_OBJECTAGG", "not-keyword"), ("JSON_QUERY", "not-keyword"), ("JSON_TABLE", "not-keyword"), ("JSON_TABLE_PRIMITIVE", "not-keyword"), ("JSON_VALUE", "not-keyword"), ("JSONB", "non-reserved-(cannot-be-function-or-type)"), ("K", "not-keyword"), ("KEEP", "not-keyword"), ("KEY", "non-reserved"), ("KEYS", "not-keyword"), ("KEY_MEMBER", "not-keyword"), ("KEY_TYPE", "not-keyword"), ("LABEL", "non-reserved"), ("LAG", "not-keyword"), ("LANGUAGE", "non-reserved"), ("LARGE", "non-reserved"), ("LAST", "non-reserved"), ("LAST_VALUE", "not-keyword"), ("LATERAL", "reserved"), ("LEAD", "not-keyword"), ("LEADING", "reserved"), ("LEAKPROOF", "non-reserved"), ("LEAST", "non-reserved-(cannot-be-function-or-type)"), ("LEFT", "reserved-(can-be-function-or-type)"), ("LENGTH", "not-keyword"), ("LEVEL", "non-reserved"), ("LIBRARY", "not-keyword"), ("LIKE", "reserved-(can-be-function-or-type)"), ("LIKE_REGEX", "not-keyword"), ("LIMIT", "reserved"), ("LINE", "non-reserved-(cannot-be-function-or-type)"), ("LINK", "not-keyword"), ("LISTAGG", "not-keyword"), ("LISTEN", "non-reserved"), ("LN", "not-keyword"), ("LOAD", "non-reserved"), ("LOCAL", "non-reserved"), ("LOCALTIME", "reserved"), ("LOCALTIMESTAMP", "reserved"), ("LOCATION", "non-reserved"), ("LOCATOR", "not-keyword"), ("LOCK", "non-reserved"), ("LOCKED", "non-reserved"), ("LOG", "not-keyword"), ("LOG10", "not-keyword"), ("LOGGED", "non-reserved"), ("LOWER", "not-keyword"), ("LSEG", "non-reserved-(cannot-be-function-or-type)"), ("M", "not-keyword"), ("MACADDR", "non-reserved-(cannot-be-function-or-type)"), ("MACADDR8", "non-reserved-(cannot-be-function-or-type)"), ("MAP", "not-keyword"), ("MAPPING", "non-reserved"), ("MATCH", "non-reserved"), ("MATCHED", "non-reserved"), ("MATCHES", "not-keyword"), ("MATCH_NUMBER", "not-keyword"), ("MATCH_RECOGNIZE", "not-keyword"), ("MATERIALIZED", "non-reserved"), ("MAX", "not-keyword"), ("MAXVALUE", "non-reserved"), ("MEASURES", "not-keyword"), ("MEMBER", "not-keyword"), ("MERGE", "non-reserved"), ("MESSAGE_LENGTH", "not-keyword"), ("MESSAGE_OCTET_LENGTH", "not-keyword"), ("MESSAGE_TEXT", "not-keyword"), ("METHOD", "non-reserved"), ("MIN", "not-keyword"), ("MINUTE", "non-reserved"), ("MINVALUE", "non-reserved"), ("MOD", "not-keyword"), ("MODE", "non-reserved"), ("MODIFIES", "not-keyword"), ("MODULE", "not-keyword"), ("MONEY", "non-reserved-(cannot-be-function-or-type)"), ("MONTH", "non-reserved"), ("MORE", "not-keyword"), ("MOVE", "non-reserved"), ("MULTISET", "not-keyword"), ("MUMPS", "not-keyword"), ("NAME", "non-reserved"), ("NAMES", "non-reserved"), ("NAMESPACE", "not-keyword"), ("NATIONAL", "non-reserved-(cannot-be-function-or-type)"), ("NATURAL", "reserved-(can-be-function-or-type)"), ("NCHAR", "non-reserved-(cannot-be-function-or-type)"), ("NCLOB", "not-keyword"), ("NESTED", "not-keyword"), ("NESTING", "not-keyword"), ("NEW", "non-reserved"), ("NEXT", "non-reserved"), ("NFC", "non-reserved"), ("NFD", "non-reserved"), ("NFKC", "non-reserved"), ("NFKD", "non-reserved"), ("NIL", "not-keyword"), ("NO", "non-reserved"), ("NONE", "non-reserved-(cannot-be-function-or-type)"), ("NORMALIZE", "non-reserved-(cannot-be-function-or-type)"), ("NORMALIZED", "non-reserved"), ("NOT", "reserved"), ("NOTHING", "non-reserved"), ("NOTIFY", "non-reserved"), ("NOTNULL", "reserved-(can-be-function-or-type)"), ("NOWAIT", "non-reserved"), ("NTH_VALUE", "not-keyword"), ("NTILE", "not-keyword"), ("NULL", "reserved"), ("NULLABLE", "not-keyword"), ("NULLIF", "non-reserved-(cannot-be-function-or-type)"), ("NULLS", "non-reserved"), ("NUMBER", "not-keyword"), ("NUMERIC", "non-reserved-(cannot-be-function-or-type)"), ("NUMRANGE", "non-reserved-(cannot-be-function-or-type)"), ("OBJECT", "non-reserved"), ("OCCURRENCES_REGEX", "not-keyword"), ("OCTETS", "not-keyword"), ("OCTET_LENGTH", "not-keyword"), ("OF", "non-reserved"), ("OFF", "non-reserved"), ("OFFSET", "reserved"), ("OIDS", "non-reserved"), ("OLD", "non-reserved"), ("OMIT", "not-keyword"), ("ON", "reserved"), ("ONE", "not-keyword"), ("ONLY", "reserved"), ("OPEN", "not-keyword"), ("OPERATOR", "non-reserved"), ("OPTION", "non-reserved"), ("OPTIONS", "non-reserved"), ("OR", "reserved"), ("ORDER", "reserved"), ("ORDERING", "not-keyword"), ("ORDINALITY", "non-reserved"), ("OTHERS", "non-reserved"), ("OUT", "non-reserved-(cannot-be-function-or-type)"), ("OUTER", "reserved-(can-be-function-or-type)"), ("OUTPUT", "not-keyword"), ("OVER", "non-reserved"), ("OVERFLOW", "not-keyword"), ("OVERLAPS", "reserved-(can-be-function-or-type)"), ("OVERLAY", "non-reserved-(cannot-be-function-or-type)"), ("OVERRIDING", "non-reserved"), ("OWNED", "non-reserved"), ("OWNER", "non-reserved"), ("P", "not-keyword"), ("PAD", "not-keyword"), ("PARALLEL", "non-reserved"), ("PARAMETER", "not-keyword"), ("PARAMETER_MODE", "not-keyword"), ("PARAMETER_NAME", "not-keyword"), ("PARAMETER_ORDINAL_POSITION", "not-keyword"), ("PARAMETER_SPECIFIC_CATALOG", "not-keyword"), ("PARAMETER_SPECIFIC_NAME", "not-keyword"), ("PARAMETER_SPECIFIC_SCHEMA", "not-keyword"), ("PARSER", "non-reserved"), ("PARTIAL", "non-reserved"), ("PARTITION", "non-reserved"), ("PASCAL", "not-keyword"), ("PASS", "not-keyword"), ("PASSING", "non-reserved"), ("PASSTHROUGH", "not-keyword"), ("PASSWORD", "non-reserved"), ("PAST", "not-keyword"), ("PATH", "non-reserved-(cannot-be-function-or-type)"), ("PATTERN", "not-keyword"), ("PER", "not-keyword"), ("PERCENT", "not-keyword"), ("PERCENTILE_CONT", "not-keyword"), ("PERCENTILE_DISC", "not-keyword"), ("PERCENT_RANK", "not-keyword"), ("PERIOD", "not-keyword"), ("PERMISSION", "not-keyword"), ("PERMISSIVE", "non-reserved"), ("PERMUTE", "not-keyword"), ("PG_LSN", "non-reserved-(cannot-be-function-or-type)"), ("PLACING", "reserved"), ("PLAN", "not-keyword"), ("PLANS", "non-reserved"), ("PLI", "not-keyword"), ("POINT", "non-reserved-(cannot-be-function-or-type)"), ("POLICY", "non-reserved"), ("POLYGON", "non-reserved-(cannot-be-function-or-type)"), ("PORTION", "not-keyword"), ("POSITION", "non-reserved-(cannot-be-function-or-type)"), ("POSITION_REGEX", "not-keyword"), ("POWER", "not-keyword"), ("PRECEDES", "not-keyword"), ("PRECEDING", "non-reserved"), ("PRECISION", "non-reserved-(cannot-be-function-or-type)"), ("PREPARE", "non-reserved"), ("PREPARED", "non-reserved"), ("PRESERVE", "non-reserved"), ("PRIMARY", "reserved"), ("PRIOR", "non-reserved"), ("PRIVATE", "not-keyword"), ("PRIVILEGES", "non-reserved"), ("PROCEDURAL", "non-reserved"), ("PROCEDURE", "non-reserved"), ("PROCEDURES", "non-reserved"), ("PROGRAM", "non-reserved"), ("PRUNE", "not-keyword"), ("PTF", "not-keyword"), ("PUBLIC", "not-keyword"), ("PUBLICATION", "non-reserved"), ("QUOTE", "non-reserved"), ("QUOTES", "not-keyword"), ("RANGE", "non-reserved"), ("RANK", "not-keyword"), ("READ", "non-reserved"), ("READS", "not-keyword"), ("REAL", "non-reserved-(cannot-be-function-or-type)"), ("REASSIGN", "non-reserved"), ("RECHECK", "non-reserved"), ("RECOVERY", "not-keyword"), ("RECURSIVE", "non-reserved"), ("REF", "non-reserved"), ("REFERENCES", "reserved"), ("REFERENCING", "non-reserved"), ("REFRESH", "non-reserved"), ("REGR_AVGX", "not-keyword"), ("REGR_AVGY", "not-keyword"), ("REGR_COUNT", "not-keyword"), ("REGR_INTERCEPT", "not-keyword"), ("REGR_R2", "not-keyword"), ("REGR_SLOPE", "not-keyword"), ("REGR_SXX", "not-keyword"), ("REGR_SXY", "not-keyword"), ("REGR_SYY", "not-keyword"), ("REINDEX", "non-reserved"), ("RELATIVE", "non-reserved"), ("RELEASE", "non-reserved"), ("RENAME", "non-reserved"), ("REPEATABLE", "non-reserved"), ("REPLACE", "non-reserved"), ("REPLICA", "non-reserved"), ("REQUIRING", "not-keyword"), ("RESET", "non-reserved"), ("RESPECT", "not-keyword"), ("RESTART", "non-reserved"), ("RESTORE", "not-keyword"), ("RESTRICT", "non-reserved"), ("RESTRICTIVE", "non-reserved"), ("RESULT", "not-keyword"), ("RETURN", "not-keyword"), ("RETURNED_CARDINALITY", "not-keyword"), ("RETURNED_LENGTH", "not-keyword"), ("RETURNED_OCTET_LENGTH", "not-keyword"), ("RETURNED_SQLSTATE", "not-keyword"), ("RETURNING", "reserved"), ("RETURNS", "non-reserved"), ("REVOKE", "non-reserved"), ("RIGHT", "reserved-(can-be-function-or-type)"), ("ROLE", "non-reserved"), ("ROLLBACK", "non-reserved"), ("ROLLUP", "non-reserved"), ("ROUTINE", "non-reserved"), ("ROUTINES", "non-reserved"), ("ROUTINE_CATALOG", "not-keyword"), ("ROUTINE_NAME", "not-keyword"), ("ROUTINE_SCHEMA", "not-keyword"), ("ROW", "non-reserved-(cannot-be-function-or-type)"), ("ROWS", "non-reserved"), ("ROW_COUNT", "not-keyword"), ("ROW_NUMBER", "not-keyword"), ("RULE", "non-reserved"), ("RUNNING", "not-keyword"), ("SAVEPOINT", "non-reserved"), ("SCALAR", "not-keyword"), ("SCALE", "not-keyword"), ("SCHEMA", "non-reserved"), ("SCHEMAS", "non-reserved"), ("SCHEMA_NAME", "not-keyword"), ("SCOPE", "not-keyword"), ("SCOPE_CATALOG", "not-keyword"), ("SCOPE_NAME", "not-keyword"), ("SCOPE_SCHEMA", "not-keyword"), ("SCROLL", "non-reserved"), ("SEARCH", "non-reserved"), ("SECOND", "non-reserved"), ("SECTION", "not-keyword"), ("SECURITY", "non-reserved"), ("SEEK", "not-keyword"), ("SELECT", "reserved"), ("SELECTIVE", "not-keyword"), ("SELF", "not-keyword"), ("SENSITIVE", "not-keyword"), ("SEQUENCE", "non-reserved"), ("SEQUENCES", "non-reserved"), ("SERIAL", "non-reserved-(cannot-be-function-or-type)"), ("SERIAL2", "non-reserved-(cannot-be-function-or-type)"), ("SERIAL4", "non-reserved-(cannot-be-function-or-type)"), ("SERIAL8", "non-reserved-(cannot-be-function-or-type)"), ("SERIALIZABLE", "non-reserved"), ("SERVER", "non-reserved"), ("SERVER_NAME", "not-keyword"), ("SESSION", "non-reserved"), ("SESSION_USER", "reserved"), ("SET", "non-reserved"), ("SETOF", "non-reserved-(cannot-be-function-or-type)"), ("SETS", "non-reserved"), ("SHARE", "non-reserved"), ("SHOW", "non-reserved"), ("SIMILAR", "reserved-(can-be-function-or-type)"), ("SIMPLE", "non-reserved"), ("SIN", "not-keyword"), ("SINH", "not-keyword"), ("SIZE", "not-keyword"), ("SKIP", "non-reserved"), ("SMALLINT", "non-reserved-(cannot-be-function-or-type)"), ("SMALLSERIAL", "non-reserved-(cannot-be-function-or-type)"), ("SNAPSHOT", "non-reserved"), ("SOME", "reserved"), ("SOURCE", "not-keyword"), ("SPACE", "not-keyword"), ("SPECIFIC", "not-keyword"), ("SPECIFICTYPE", "not-keyword"), ("SPECIFIC_NAME", "not-keyword"), ("SQL", "non-reserved"), ("SQLCODE", "not-keyword"), ("SQLERROR", "not-keyword"), ("SQLEXCEPTION", "not-keyword"), ("SQLSTATE", "not-keyword"), ("SQLWARNING", "not-keyword"), ("SQRT", "not-keyword"), ("STABLE", "non-reserved"), ("STANDALONE", "non-reserved"), ("START", "non-reserved"), ("STATE", "not-keyword"), ("STATEMENT", "non-reserved"), ("STATIC", "not-keyword"), ("STATISTICS", "non-reserved"), ("STDDEV_POP", "not-keyword"), ("STDDEV_SAMP", "not-keyword"), ("STDIN", "non-reserved"), ("STDOUT", "non-reserved"), ("STORAGE", "non-reserved"), ("STORED", "non-reserved"), ("STRICT", "non-reserved"), ("STRING", "not-keyword"), ("STRIP", "non-reserved"), ("STRUCTURE", "not-keyword"), ("STYLE", "not-keyword"), ("SUBCLASS_ORIGIN", "not-keyword"), ("SUBMULTISET", "not-keyword"), ("SUBSCRIPTION", "non-reserved"), ("SUBSET", "not-keyword"), ("SUBSTRING", "non-reserved-(cannot-be-function-or-type)"), ("SUBSTRING_REGEX", "not-keyword"), ("SUCCEEDS", "not-keyword"), ("SUM", "not-keyword"), ("SUPPORT", "non-reserved"), ("SYMMETRIC", "reserved"), ("SYSID", "non-reserved"), ("SYSTEM", "non-reserved"), ("SYSTEM_TIME", "not-keyword"), ("SYSTEM_USER", "not-keyword"), ("T", "not-keyword"), ("TABLE", "non-reserved"), ("TABLES", "non-reserved"), ("TABLESAMPLE", "reserved-(can-be-function-or-type)"), ("TABLESPACE", "non-reserved"), ("TABLE_NAME", "not-keyword"), ("TAN", "not-keyword"), ("TANH", "not-keyword"), ("TEMP", "non-reserved"), ("TEMPLATE", "non-reserved"), ("TEMPORARY", "non-reserved"), ("TEXT", "non-reserved"), ("THEN", "reserved"), ("THROUGH", "not-keyword"), ("TIES", "non-reserved"), ("TIME", "non-reserved-(cannot-be-function-or-type)"), ("TIMESTAMP", "non-reserved-(cannot-be-function-or-type)"), ("TIMEZONE_HOUR", "not-keyword"), ("TIMEZONE_MINUTE", "not-keyword"), ("TO", "reserved"), ("TOKEN", "not-keyword"), ("TOP_LEVEL_COUNT", "not-keyword"), ("TRAILING", "reserved"), ("TRANSACTION", "non-reserved"), ("TRANSACTIONS_COMMITTED", "not-keyword"), ("TRANSACTIONS_ROLLED_BACK", "not-keyword"), ("TRANSACTION_ACTIVE", "not-keyword"), ("TRANSFORM", "non-reserved"), ("TRANSFORMS", "not-keyword"), ("TRANSLATE", "not-keyword"), ("TRANSLATE_REGEX", "not-keyword"), ("TRANSLATION", "not-keyword"), ("TREAT", "non-reserved-(cannot-be-function-or-type)"), ("TRIGGER", "non-reserved"), ("TRIGGER_CATALOG", "not-keyword"), ("TRIGGER_NAME", "not-keyword"), ("TRIGGER_SCHEMA", "not-keyword"), ("TRIM", "non-reserved-(cannot-be-function-or-type)"), ("TRIM_ARRAY", "not-keyword"), ("TRUE", "reserved"), ("TRUNCATE", "non-reserved"), ("TRUSTED", "non-reserved"), ("TSQUERY", "non-reserved-(cannot-be-function-or-type)"), ("TSRANGE", "non-reserved-(cannot-be-function-or-type)"), ("TSTZRANGE", "non-reserved-(cannot-be-function-or-type)"), ("TSVECTOR", "non-reserved-(cannot-be-function-or-type)"), ("TYPE", "non-reserved"), ("TYPES", "non-reserved"), ("UESCAPE", "non-reserved"), ("UNBOUNDED", "non-reserved"), ("UNCOMMITTED", "non-reserved"), ("UNCONDITIONAL", "not-keyword"), ("UNDER", "not-keyword"), ("UNENCRYPTED", "non-reserved"), ("UNION", "reserved"), ("UNIQUE", "reserved"), ("UNKNOWN", "non-reserved"), ("UNLINK", "not-keyword"), ("UNLISTEN", "non-reserved"), ("UNLOGGED", "non-reserved"), ("UNMATCHED", "not-keyword"), ("UNNAMED", "not-keyword"), ("UNNEST", "not-keyword"), ("UNTIL", "non-reserved"), ("UNTYPED", "not-keyword"), ("UPDATE", "non-reserved"), ("UPPER", "not-keyword"), ("URI", "not-keyword"), ("USAGE", "not-keyword"), ("USER", "reserved"), ("USER_DEFINED_TYPE_CATALOG", "not-keyword"), ("USER_DEFINED_TYPE_CODE", "not-keyword"), ("USER_DEFINED_TYPE_NAME", "not-keyword"), ("USER_DEFINED_TYPE_SCHEMA", "not-keyword"), ("USING", "reserved"), ("UTF16", "not-keyword"), ("UTF32", "not-keyword"), ("UTF8", "not-keyword"), ("UUID", "non-reserved-(cannot-be-function-or-type)"), ("VACUUM", "non-reserved"), ("VALID", "non-reserved"), ("VALIDATE", "non-reserved"), ("VALIDATOR", "non-reserved"), ("VALUE", "non-reserved"), ("VALUES", "non-reserved-(cannot-be-function-or-type)"), ("VALUE_OF", "not-keyword"), ("VARBINARY", "not-keyword"), ("VARCHAR", "non-reserved-(cannot-be-function-or-type)"), ("VARIADIC", "reserved"), ("VARYING", "non-reserved"), ("VAR_POP", "not-keyword"), ("VAR_SAMP", "not-keyword"), ("VERBOSE", "reserved-(can-be-function-or-type)"), ("VERSION", "non-reserved"), ("VERSIONING", "not-keyword"), ("VIEW", "non-reserved"), ("VIEWS", "non-reserved"), ("VOLATILE", "non-reserved"), ("WHEN", "reserved"), ("WHENEVER", "not-keyword"), ("WHERE", "reserved"), ("WHITESPACE", "non-reserved"), ("WIDTH_BUCKET", "not-keyword"), ("WINDOW", "reserved"), ("WITH", "reserved"), ("WITHIN", "non-reserved"), ("WITHOUT", "non-reserved"), ("WORK", "non-reserved"), ("WRAPPER", "non-reserved"), ("WRITE", "non-reserved"), ("XML", "non-reserved"), ("XMLAGG", "not-keyword"), ("XMLATTRIBUTES", "non-reserved-(cannot-be-function-or-type)"), ("XMLBINARY", "not-keyword"), ("XMLCAST", "not-keyword"), ("XMLCOMMENT", "not-keyword"), ("XMLCONCAT", "non-reserved-(cannot-be-function-or-type)"), ("XMLDECLARATION", "not-keyword"), ("XMLDOCUMENT", "not-keyword"), ("XMLELEMENT", "non-reserved-(cannot-be-function-or-type)"), ("XMLEXISTS", "non-reserved-(cannot-be-function-or-type)"), ("XMLFOREST", "non-reserved-(cannot-be-function-or-type)"), ("XMLITERATE", "not-keyword"), ("XMLNAMESPACES", "non-reserved-(cannot-be-function-or-type)"), ("XMLPARSE", "non-reserved-(cannot-be-function-or-type)"), ("XMLPI", "non-reserved-(cannot-be-function-or-type)"), ("XMLQUERY", "not-keyword"), ("XMLROOT", "non-reserved-(cannot-be-function-or-type)"), ("XMLSCHEMA", "not-keyword"), ("XMLSERIALIZE", "non-reserved-(cannot-be-function-or-type)"), ("XMLTABLE", "non-reserved-(cannot-be-function-or-type)"), ("XMLTEXT", "not-keyword"), ("XMLVALIDATE", "not-keyword"), ("YEAR", "non-reserved"), ("YES", "non-reserved"), ("ZONE", "non-reserved"), ] postgres_nondocs_keywords = [ ("ALLOW_CONNECTIONS", "non-reserved"), ("BREADTH", "non-reserved"), ("BUFFERS", "non-reserved"), ("BYPASSRLS", "non-reserved"), ("CONNECT", "reserved"), ("COSTS", "non-reserved"), ("CURRENT_USER", "non-reserved"), ("CREATEDB", "non-reserved"), ("CREATEROLE", "non-reserved"), ("DATE", "non-reserved"), ("DEPTH", "non-reserved"), ("DESCRIBE", "non-reserved"), ("DETERMINISTIC", "non-reserved"), ("DISABLE_PAGE_SKIPPING", "non-reserved"), ("EXECUTION", "not-keyword"), ("EXTENDED", "non-reserved"), ("FILE", "non-reserved"), ("FORCE_NOT_NULL", "non-reserved"), ("FORCE_NULL", "non-reserved"), ("FORCE_QUOTE", "non-reserved"), ("FORMAT", "non-reserved"), ("HASH", "non-reserved"), ("ICU", "non-reserved"), ("IGNORE", "non-reserved"), ("INDEX_CLEANUP", "non-reserved"), ("IS_TEMPLATE", "non-reserved"), ("JSON", "non-reserved"), ("LC_COLLATE", "non-reserved"), ("LC_CTYPE", "non-reserved"), ("LIBC", "non-reserved"), ("LIST", "non-reserved"), ("LOGIN", "non-reserved"), ("LOCALE", "non-reserved"), ("MAIN", "non-reserved"), ("MODULUS", "non-reserved"), ("NOBYPASSRLS", "non-reserved"), ("NOCREATEDB", "non-reserved"), ("NOCREATEROLE", "non-reserved"), ("NOINHERIT", "non-reserved"), ("NOLOGIN", "non-reserved"), ("NOREPLICATION", "non-reserved"), ("NOSUPERUSER", "non-reserved"), ("PLAIN", "non-reserved"), ("PROCESS_TOAST", "non-reserved"), ("PROVIDER", "non-reserved"), ("PUBLIC", "non-reserved"), ("REMAINDER", "non-reserved"), ("REPLICATION", "non-reserved"), ("RESPECT", "non-reserved"), ("RESTRICTED", "non-reserved"), ("SAFE", "non-reserved"), ("SETTINGS", "non-reserved"), ("SKIP_LOCKED", "non-reserved"), ("SUMMARY", "non-reserved"), ("SUPERUSER", "non-reserved"), ("TIMETZ", "non-reserved"), ("TIMESTAMPTZ", "non-reserved"), ("TIMING", "non-reserved"), ("UNSAFE", "non-reserved"), ("USAGE", "non-reserved"), ("WAL", "non-reserved"), ] postgres_postgis_datatype_keywords = [ ("POINT", "non-reserved"), ("LINESTRING", "non-reserved"), ("POLYGON", "non-reserved"), ("MULTIPOINT", "non-reserved"), ("MULTILINESTRING", "non-reserved"), ("MULTIPOLYGON", "non-reserved"), ("GEOMETRYCOLLECTION", "non-reserved"), ("POINTZ", "non-reserved"), ("LINESTRINGZ", "non-reserved"), ("POLYGONZ", "non-reserved"), ("MULTIPOINTZ", "non-reserved"), ("MULTILINESTRINGZ", "non-reserved"), ("MULTIPOLYGONZ", "non-reserved"), ("GEOMETRYCOLLECTIONZ", "non-reserved"), ("POINTM", "non-reserved"), ("LINESTRINGM", "non-reserved"), ("POLYGONM", "non-reserved"), ("MULTIPOINTM", "non-reserved"), ("MULTILINESTRINGM", "non-reserved"), ("MULTIPOLYGONM", "non-reserved"), ("GEOMETRYCOLLECTIONM", "non-reserved"), ("POINTZM", "non-reserved"), ("LINESTRINGZM", "non-reserved"), ("POLYGONZM", "non-reserved"), ("MULTIPOINTZM", "non-reserved"), ("MULTILINESTRINGZM", "non-reserved"), ("MULTIPOLYGONZM", "non-reserved"), ("GEOMETRYCOLLECTIONZM", "non-reserved"), ("CIRCULARSTRING", "non-reserved"), ("COMPOUNDCURVE", "non-reserved"), ("CURVEPOLYGON", "non-reserved"), ("MULTICURVE", "non-reserved"), ("MULTISURFACE", "non-reserved"), ("POLYHEDRALSURFACE", "non-reserved"), ("TRIANGLE", "non-reserved"), ("TIN", "non-reserved"), ] postgres_postgis_other_keywords = [ ("GEOMETRY", "non-reserved"), ("GEOGRAPHY", "non-reserved"), ("EMPTY", "non-reserved"), ] postgres_keywords = priority_keyword_merge( postgres_docs_keywords, postgres_nondocs_keywords, postgres_postgis_datatype_keywords, postgres_postgis_other_keywords, ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_redshift.py000066400000000000000000002160741451700765000234320ustar00rootroot00000000000000"""The Amazon Redshift dialect. This is based on postgres dialect, since it was initially based off of Postgres 8. We should monitor in future and see if it should be rebased off of ANSI """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, Dedent, Delimited, IdentifierSegment, Indent, Matchable, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_postgres as postgres from sqlfluff.dialects.dialect_redshift_keywords import ( redshift_reserved_keywords, redshift_unreserved_keywords, ) postgres_dialect = load_raw_dialect("postgres") ansi_dialect = load_raw_dialect("ansi") redshift_dialect = postgres_dialect.copy_as("redshift") # Set Keywords redshift_dialect.sets("unreserved_keywords").clear() redshift_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", redshift_unreserved_keywords ) redshift_dialect.sets("reserved_keywords").clear() redshift_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", redshift_reserved_keywords ) redshift_dialect.sets("bare_functions").clear() redshift_dialect.sets("bare_functions").update( [ "current_date", "sysdate", "current_time", "current_timestamp", "user", "current_user", "current_aws_account", "current_namespace", "current_user_id", ] ) redshift_dialect.sets("date_part_function_name").update( ["DATEADD", "DATEDIFF", "EXTRACT", "DATE_PART"] ) # Add datetime units # https://docs.aws.amazon.com/redshift/latest/dg/r_Dateparts_for_datetime_functions.html redshift_dialect.sets("datetime_units").update( [ # millennium "MILLENNIUM", "MILLENNIA", "MIL", "MILS", # century "CENTURY", "CENTURIES", "C", "CENT", "CENTS", # decade "DECADE", "DECADES", "DEC", "DECS", # epoch "EPOCH", # year "YEAR", "YEARS", "Y", "YR", "YRS", # quarter "QUARTER", "QUARTERS", "QTR", "QTRS", # month "MONTH", "MONTHS", "MON", "MONS", # week "WEEK", "WEEKS", "W", # day of week "DAYOFWEEK", "DOW", "DW", "WEEKDAY", # day of year "DAYOFYEAR", "DOY", "DY", "YEARDAY", # day "DAY", "DAYS", "D", # hour "HOUR", "HOURS", "H", "HR", "HRS", # minute "MINUTE", "MINUTES", "M", "MIN", "MINS", # second "SECOND", "SECONDS", "S", "SEC", "SECS", # millisec "MILLISECOND", "MILLISECONDS", "MS", "MSEC", "MSECS", "MSECOND", "MSECONDS", "MILLISEC", "MILLISECS", "MILLISECON", # microsec "MICROSECOND", "MICROSECONDS", "MICROSEC", "MICROSECS", "MICROSECOND", "USECOND", "USECONDS", "US", "USEC", "USECS", # timezone "TIMEZONE", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", ] ) redshift_dialect.replace( WellKnownTextGeometrySegment=Nothing(), JoinLikeClauseGrammar=Sequence( AnySetOf( Ref("FromPivotExpressionSegment"), Ref("FromUnpivotExpressionSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), NakedIdentifierSegment=SegmentGenerator( lambda dialect: RegexParser( # Optionally begins with # for temporary tables. Otherwise # must only contain digits, letters, underscore, and $ but # can’t be all digits. r"#?([A-Z_]+|[0-9]+[A-Z_$])[A-Z0-9_$]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), ) redshift_dialect.patch_lexer_matchers( [ # add optional leading # to word for temporary tables RegexLexer( "word", r"#?[0-9a-zA-Z_]+[0-9a-zA-Z_$]*", WordSegment, ), ] ) redshift_dialect.add( CompressionTypeGrammar=OneOf( "BZIP2", "GZIP", "LZOP", "ZSTD", ), ArgModeGrammar=OneOf( "IN", "OUT", "INOUT", ), ColumnEncodingGrammar=OneOf( "RAW", "AZ64", "BYTEDICT", "DELTA", "DELTA32K", "LZO", "MOSTLY8", "MOSTLY16", "MOSTLY32", "RUNLENGTH", "TEXT255", "TEXT32K", "ZSTD", ), QuotaGrammar=Sequence( "QUOTA", OneOf( Sequence( Ref("NumericLiteralSegment"), OneOf( "MB", "GB", "TB", ), ), "UNLIMITED", ), ), ) class FromUnpivotExpressionSegment(BaseSegment): """An UNPIVOT expression. See https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html for details. """ type = "from_unpivot_expression" match_grammar = Sequence( "UNPIVOT", Sequence( OneOf("INCLUDE", "EXCLUDE"), "NULLS", optional=True, ), Bracketed( Sequence( Ref("ColumnReferenceSegment"), "FOR", Ref("ColumnReferenceSegment"), "IN", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("AliasExpressionSegment", optional=True), ) ), ), ), ), ) class FromPivotExpressionSegment(BaseSegment): """A PIVOT expression. See https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html for details. """ type = "from_pivot_expression" match_grammar = Sequence( "PIVOT", Bracketed( Sequence( OptionallyBracketed(Ref("FunctionSegment")), Ref("AliasExpressionSegment", optional=True), "FOR", Ref("ColumnReferenceSegment"), "IN", Bracketed( Delimited( Sequence( Ref("ExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), ), ), ), ) class DateTimeTypeIdentifier(BaseSegment): """A Date Time type.""" type = "datetime_type_identifier" match_grammar = OneOf( "DATE", "DATETIME", Sequence( OneOf("TIME", "TIMESTAMP"), Sequence(OneOf("WITH", "WITHOUT"), "TIME", "ZONE", optional=True), ), OneOf("TIMETZ", "TIMESTAMPTZ"), # INTERVAL types are not Datetime types under Redshift: # https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html ) class BracketedArguments(ansi.BracketedArguments): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ match_grammar = Bracketed( # The brackets might be empty for some cases... Delimited( OneOf( Ref("LiteralGrammar"), # In redshift, character types offer on optional MAX # keyword in their parameters. "MAX", ), optional=True, ), ) class DatatypeSegment(BaseSegment): """A data type segment. Indicates a data type. https://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html """ type = "data_type" match_grammar = OneOf( # numeric types "SMALLINT", "INT2", "INTEGER", "INT", "INT4", "BIGINT", "INT8", "REAL", "FLOAT4", Sequence("DOUBLE", "PRECISION"), "FLOAT8", "FLOAT", # numeric types [precision ["," scale])] Sequence( OneOf("DECIMAL", "NUMERIC"), Ref("BracketedArguments", optional=True), ), # character types OneOf( Sequence( OneOf( "CHAR", "CHARACTER", "NCHAR", "VARCHAR", Sequence("CHARACTER", "VARYING"), "NVARCHAR", ), Ref("BracketedArguments", optional=True), ), "BPCHAR", "TEXT", ), Ref("DateTimeTypeIdentifier"), # INTERVAL is a data type *only* for conversion operations "INTERVAL", # boolean types OneOf("BOOLEAN", "BOOL"), # hllsketch type "HLLSKETCH", # super type "SUPER", # spatial data "GEOMETRY", "GEOGRAPHY", # binary type Sequence( OneOf( "VARBYTE", "VARBINARY", Sequence("BINARY", "VARYING"), ), Ref("BracketedArguments", optional=True), ), "ANYELEMENT", ) class DataFormatSegment(BaseSegment): """DataFormat segment. Indicates data format available for COPY commands. https://docs.aws.amazon.com/redshift/latest/dg/c_Compression_encodings.html """ type = "data_format_segment" match_grammar = Sequence( Sequence( "FORMAT", Ref.keyword("AS", optional=True), optional=True, ), OneOf( Sequence( "CSV", Sequence( "QUOTE", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), Sequence( "SHAPEFILE", Sequence( "SIMPLIFY", Ref.keyword("AUTO", optional=True), Ref("NumericLiteralSegment", optional=True), optional=True, ), ), Sequence( OneOf("AVRO", "JSON"), Sequence( Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), "PARQUET", "ORC", "RCFILE", "SEQUENCEFILE", ), ) class AuthorizationSegment(BaseSegment): """Authorization segment. Specifies authorization to access data in another AWS resource. https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html """ type = "authorization_segment" match_grammar = AnySetOf( OneOf( Sequence( "IAM_ROLE", OneOf( "DEFAULT", Ref("QuotedLiteralSegment"), ), ), Sequence( Ref.keyword("WITH", optional=True), "CREDENTIALS", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "ACCESS_KEY_ID", Ref("QuotedLiteralSegment"), "SECRET_ACCESS_KEY", Ref("QuotedLiteralSegment"), Sequence( "SESSION_TOKEN", Ref("QuotedLiteralSegment"), optional=True, ), ), optional=False, ), Sequence( "KMS_KEY_ID", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "MASTER_SYMMETRIC_KEY", Ref("QuotedLiteralSegment"), optional=True, ), ) class ColumnAttributeSegment(BaseSegment): """Redshift specific column attributes. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "column_attribute_segment" match_grammar = AnySetOf( Sequence("DEFAULT", Ref("ExpressionSegment")), Sequence( "IDENTITY", Bracketed(Delimited(Ref("NumericLiteralSegment")), optional=True), ), Sequence( "GENERATED", "BY", "DEFAULT", "AS", "IDENTITY", Bracketed(Delimited(Ref("NumericLiteralSegment")), optional=True), ), Sequence("ENCODE", Ref("ColumnEncodingGrammar")), "DISTKEY", "SORTKEY", Sequence("COLLATE", OneOf("CASE_SENSITIVE", "CASE_INSENSITIVE")), ) class ColumnConstraintSegment(BaseSegment): """Redshift specific column constraints. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "column_constraint_segment" match_grammar = AnySetOf( OneOf(Sequence("NOT", "NULL"), "NULL"), OneOf("UNIQUE", Ref("PrimaryKeyGrammar")), Sequence( "REFERENCES", Ref("TableReferenceSegment"), Bracketed(Ref("ColumnReferenceSegment"), optional=True), ), ) class AlterTableActionSegment(BaseSegment): """Alter Table Action Segment. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_TABLE.html """ type = "alter_table_action_segment" match_grammar = OneOf( Sequence( "ADD", Ref("TableConstraintSegment"), Sequence("NOT", "VALID", optional=True), ), Sequence("VALIDATE", "CONSTRAINT", Ref("ParameterNameSegment")), Sequence( "DROP", "CONSTRAINT", Ref("ParameterNameSegment"), Ref("DropBehaviorGrammar", optional=True), ), Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), ), ), Sequence( "RENAME", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), ), ), Sequence( "RENAME", "COLUMN", "TO", OneOf( Ref("ColumnReferenceSegment"), ), ), Sequence( "ALTER", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence( "TYPE", Ref("DatatypeSegment"), ), Sequence( "ENCODE", Delimited( Ref("ColumnEncodingGrammar"), ), ), ), ), Sequence( "ALTER", "DISTKEY", Ref("ColumnReferenceSegment"), ), Sequence( "ALTER", "DISTSTYLE", OneOf( "ALL", "EVEN", Sequence("KEY", "DISTKEY", Ref("ColumnReferenceSegment")), "AUTO", ), ), Sequence( "ALTER", Ref.keyword("COMPOUND", optional=True), "SORTKEY", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), ), Sequence( "ALTER", "SORTKEY", OneOf( "AUTO", "NONE", ), ), Sequence( "ALTER", "ENCODE", "AUTO", ), Sequence( "ADD", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Sequence("DEFAULT", Ref("ExpressionSegment"), optional=True), Sequence("COLLATE", Ref("CollationReferenceSegment"), optional=True), AnyNumberOf(Ref("ColumnConstraintSegment")), ), Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ), ) class TableAttributeSegment(BaseSegment): """Redshift specific table attributes. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "table_constraint" match_grammar = AnySetOf( Sequence("DISTSTYLE", OneOf("AUTO", "EVEN", "KEY", "ALL"), optional=True), Sequence("DISTKEY", Bracketed(Ref("ColumnReferenceSegment")), optional=True), OneOf( Sequence( OneOf("COMPOUND", "INTERLEAVED", optional=True), "SORTKEY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence("SORTKEY", "AUTO"), optional=True, ), Sequence("ENCODE", "AUTO", optional=True), ) class TableConstraintSegment(BaseSegment): """Redshift specific table constraints. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "table_constraint" match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence("UNIQUE", Bracketed(Delimited(Ref("ColumnReferenceSegment")))), Sequence( "PRIMARY", "KEY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "FOREIGN", "KEY", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), "REFERENCES", Ref("TableReferenceSegment"), Sequence(Bracketed(Ref("ColumnReferenceSegment"))), ), ), ) class LikeOptionSegment(BaseSegment): """Like Option Segment. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "like_option_segment" match_grammar = Sequence(OneOf("INCLUDING", "EXCLUDING"), "DEFAULTS") class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html """ type = "create_table_statement" match_grammar = Sequence( "CREATE", Ref.keyword("LOCAL", optional=True), Ref("TemporaryGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), Bracketed( Delimited( # Columns and comment syntax: AnyNumberOf( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), AnyNumberOf( Ref("ColumnAttributeSegment"), Ref("ColumnConstraintSegment"), optional=True, ), ), Ref("TableConstraintSegment"), Sequence( "LIKE", Ref("TableReferenceSegment"), AnyNumberOf(Ref("LikeOptionSegment"), optional=True), ), ), ) ), Sequence("BACKUP", OneOf("YES", "NO", optional=True), optional=True), AnyNumberOf(Ref("TableAttributeSegment"), optional=True), ) class CreateTableAsStatementSegment(BaseSegment): """A `CREATE TABLE AS` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_AS.html """ type = "create_table_as_statement" match_grammar = Sequence( "CREATE", Sequence( Ref.keyword("LOCAL", optional=True), OneOf("TEMPORARY", "TEMP"), optional=True, ), "TABLE", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Sequence("BACKUP", OneOf("YES", "NO"), optional=True), Ref("TableAttributeSegment", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class CreateModelStatementSegment(BaseSegment): """A `CREATE MODEL` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_MODEL.html NB: order of keywords matter """ type = "create_model_statement" match_grammar = Sequence( "CREATE", "MODEL", Ref("ObjectReferenceSegment"), Sequence( "FROM", OneOf( Ref("QuotedLiteralSegment"), Bracketed(Ref("SelectableGrammar")), Ref("ObjectReferenceSegment"), ), optional=True, ), Sequence( "TARGET", Ref("ColumnReferenceSegment"), optional=True, ), Sequence( "FUNCTION", Ref("ObjectReferenceSegment"), Bracketed( Delimited(Ref("DatatypeSegment")), optional=True, ), ), Sequence( "RETURNS", Ref("DatatypeSegment"), optional=True, ), Sequence( "SAGEMAKER", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "IAM_ROLE", OneOf( "DEFAULT", Ref("QuotedLiteralSegment"), ), ), Sequence( "AUTO", OneOf( "ON", "OFF", ), optional=True, ), Sequence( "MODEL_TYPE", OneOf( "XGBOOST", "MLP", "KMEANS", ), optional=True, ), Sequence( "PROBLEM_TYPE", OneOf( "REGRESSION", "BINARY_CLASSIFICATION", "MULTICLASS_CLASSIFICATION", ), optional=True, ), Sequence( "OBJECTIVE", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "PREPROCESSORS", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "HYPERPARAMETERS", "DEFAULT", Sequence( "EXCEPT", Bracketed( Delimited( Anything(), ), ), optional=True, ), optional=True, ), Sequence( "SETTINGS", Bracketed( Sequence( "S3_BUCKET", Ref("QuotedLiteralSegment"), Sequence( "KMS_KEY_ID", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "S3_GARBAGE_COLLECT", OneOf( "ON", "OFF", ), optional=True, ), Sequence( "MAX_CELLS", Ref("NumericLiteralSegment"), optional=True, ), Sequence( "MAX_RUNTIME", Ref("NumericLiteralSegment"), optional=True, ), ), ), optional=True, ), ) class ShowModelStatementSegment(BaseSegment): """A `SHOW MODEL` statement. As specified in: https://docs.aws.amazon.com/redshift/latest/dg/r_SHOW_MODEL.html """ type = "show_model_statement" match_grammar = Sequence( "SHOW", "MODEL", OneOf( "ALL", Ref("ObjectReferenceSegment"), ), ) class CreateExternalTableStatementSegment(BaseSegment): """A `CREATE EXTERNAL TABLE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "TABLE", Ref("TableReferenceSegment"), Bracketed( # Columns and comment syntax: Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), ), ), ), Ref("PartitionedBySegment", optional=True), Sequence( "ROW", "FORMAT", OneOf( Sequence( "DELIMITED", Ref("RowFormatDelimitedSegment"), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Sequence( "WITH", "SERDEPROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), optional=True, ), ), ), optional=True, ), "STORED", "AS", OneOf( "PARQUET", "RCFILE", "SEQUENCEFILE", "TEXTFILE", "ORC", "AVRO", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), "LOCATION", Ref("QuotedLiteralSegment"), Sequence( "TABLE", "PROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), optional=True, ), ) class CreateExternalTableAsStatementSegment(BaseSegment): """A `CREATE EXTERNAL TABLE AS` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "TABLE", Ref("TableReferenceSegment"), Ref("PartitionedBySegment", optional=True), Sequence( "ROW", "FORMAT", "DELIMITED", Ref("RowFormatDelimitedSegment"), optional=True, ), "STORED", "AS", OneOf( "PARQUET", "TEXTFILE", ), "LOCATION", Ref("QuotedLiteralSegment"), Sequence( "TABLE", "PROPERTIES", Bracketed( Delimited( Sequence( Ref("QuotedLiteralSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), optional=True, ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class CreateExternalSchemaStatementSegment(BaseSegment): """A `CREATE EXTERNAL SCHEMA` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_SCHEMA.html """ type = "create_external_schema_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), "FROM", OneOf( Sequence("DATA", "CATALOG"), Sequence("HIVE", "METASTORE"), "POSTGRES", "MYSQL", "KINESIS", "REDSHIFT", ), AnySetOf( Sequence("DATABASE", Ref("QuotedLiteralSegment")), Sequence("REGION", Ref("QuotedLiteralSegment")), Sequence("SCHEMA", Ref("QuotedLiteralSegment")), Sequence( "URI", Ref("QuotedLiteralSegment"), Sequence("PORT", Ref("NumericLiteralSegment"), optional=True), ), Sequence( "IAM_ROLE", OneOf( "DEFAULT", Ref("QuotedLiteralSegment"), ), ), Sequence("SECRET_ARN", Ref("QuotedLiteralSegment")), Sequence("CATALOG_ROLE", Ref("QuotedLiteralSegment")), Sequence("CREATE", "EXTERNAL", "DATABASE", "IF", "NOT", "EXISTS"), optional=True, ), ) class CreateLibraryStatementSegment(BaseSegment): """A `CREATE LIBRARY` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_LIBRARY.html """ type = "create_library_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "LIBRARY", Ref("ObjectReferenceSegment"), "LANGUAGE", "PLPYTHONU", "FROM", Ref("QuotedLiteralSegment"), AnySetOf( Ref("AuthorizationSegment", optional=False), Sequence( "REGION", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ), ) class UnloadStatementSegment(BaseSegment): """A `UNLOAD` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html """ type = "unload_statement" match_grammar = Sequence( "UNLOAD", Bracketed(Ref("QuotedLiteralSegment")), "TO", Ref("QuotedLiteralSegment"), AnySetOf( Ref("AuthorizationSegment", optional=False), Sequence( "REGION", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CompressionTypeGrammar", optional=True), Sequence( Sequence( "FORMAT", Ref.keyword("AS", optional=True), optional=True, ), OneOf( "CSV", "JSON", "PARQUET", ), optional=True, ), Sequence( "PARTITION", "BY", Ref("BracketedColumnReferenceListGrammar"), Ref.keyword("INCLUDE", optional=True), ), Sequence( "PARALLEL", OneOf( "PRESET", "ON", "OFF", "TRUE", "FALSE", optional=True, ), optional=True, ), OneOf( Sequence( "DELIMITER", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "FIXEDWIDTH", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), optional=True, ), Sequence( "MANIFEST", Ref.keyword("VERBOSE", optional=True), optional=True, ), Sequence( "NULL", "AS", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "NULL", "AS", Ref("QuotedLiteralSegment"), optional=True, ), AnySetOf( OneOf( "MAXFILESIZE", "ROWGROUPSIZE", ), Ref.keyword("AS", optional=True), Ref("NumericLiteralSegment"), OneOf( "MB", "GB", ), optional=True, ), Sequence( "ENCRYPTED", Ref.keyword("AUTO", optional=True), optional=True, ), Ref.keyword("ALLOWOVERWRITE", optional=True), Ref.keyword("CLEANPATH", optional=True), Ref.keyword("ESCAPE", optional=True), Ref.keyword("ADDQUOTES", optional=True), Ref.keyword("HEADER", optional=True), ), ) class CopyStatementSegment(postgres.CopyStatementSegment): """A `COPY` statement. : - https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html - https://docs.aws.amazon.com/redshift/latest/dg/r_COPY-parameters.html """ type = "copy_statement" match_grammar = Sequence( "COPY", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), "FROM", Ref("QuotedLiteralSegment"), AnySetOf( Ref("AuthorizationSegment", optional=False), Sequence( "REGION", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CompressionTypeGrammar", optional=True), Ref("DataFormatSegment", optional=True), OneOf( Sequence( "DELIMITER", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), Sequence( "FIXEDWIDTH", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment"), ), optional=True, ), Sequence( "ENCRYPTED", Ref.keyword("AUTO", optional=True), optional=True, ), Ref.keyword("MANIFEST", optional=True), Sequence( "COMPROWS", Ref("NumericLiteralSegment"), optional=True, ), Sequence( "MAXERROR", Ref.keyword("AS", optional=True), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "COMPUPDATE", OneOf( "PRESET", "ON", "OFF", "TRUE", "FALSE", optional=True, ), optional=True, ), Sequence( "STATUPDATE", OneOf( "ON", "OFF", "TRUE", "FALSE", optional=True, ), optional=True, ), Ref.keyword("NOLOAD", optional=True), Ref.keyword("ACCEPTANYDATE", optional=True), Sequence( "ACCEPTINVCHARS", Ref.keyword("AS", optional=True), Ref("QuotedLiteralSegment", optional=True), optional=True, ), Ref.keyword("BLANKSASNULL", optional=True), Sequence( "DATEFORMAT", Ref.keyword("AS", optional=True), OneOf( "AUTO", Ref("QuotedLiteralSegment"), ), optional=True, ), Ref.keyword("EMPTYASNULL", optional=True), Sequence( "ENCODING", Ref.keyword("AS", optional=True), OneOf( "UTF8", "UTF16", "UTF16BE", "UTF16LE", ), optional=True, ), Ref.keyword("ESCAPE", optional=True), Ref.keyword("EXPLICIT_IDS", optional=True), Ref.keyword("FILLRECORD", optional=True), Ref.keyword("IGNOREBLANKLINES", optional=True), Sequence( "IGNOREHEADER", Ref.keyword("AS", optional=True), Ref("LiteralGrammar"), optional=True, ), Sequence( "NULL", "AS", Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "READRATIO", Ref("NumericLiteralSegment"), optional=True, ), Ref.keyword("REMOVEQUOTES", optional=True), Ref.keyword("ROUNDEC", optional=True), Sequence( "TIMEFORMAT", Ref.keyword("AS", optional=True), OneOf( "AUTO", "EPOCHSECS", "EPOCHMILLISECS", Ref("QuotedLiteralSegment"), ), optional=True, ), Ref.keyword("TRIMBLANKS", optional=True), Ref.keyword("TRUNCATECOLUMNS", optional=True), ), ) class InsertStatementSegment(BaseSegment): """An`INSERT` statement. Redshift has two versions of insert statements: - https://docs.aws.amazon.com/redshift/latest/dg/r_INSERT_30.html - https://docs.aws.amazon.com/redshift/latest/dg/r_INSERT_external_table.html """ # TODO: This logic can be streamlined. However, there are some odd parsing issues. # See https://github.com/sqlfluff/sqlfluff/pull/1896 type = "insert_statement" match_grammar = Sequence( "INSERT", "INTO", Ref("TableReferenceSegment"), OneOf( OptionallyBracketed(Ref("SelectableGrammar")), Sequence("DEFAULT", "VALUES"), Sequence( Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ValuesClauseSegment"), OptionallyBracketed(Ref("SelectableGrammar")), ), ), ), ) class CreateSchemaStatementSegment(BaseSegment): """A `CREATE SCHEMA` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_SCHEMA.html TODO: support optional SCHEMA_ELEMENT (should mostly be provided by ansi) """ type = "create_schema_statement" match_grammar = Sequence( "CREATE", "SCHEMA", OneOf( Sequence( Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), Sequence( "AUTHORIZATION", Ref("RoleReferenceSegment"), optional=True, ), ), Sequence( "AUTHORIZATION", Ref("RoleReferenceSegment"), ), ), Ref("QuotaGrammar", optional=True), ) class ProcedureParameterListSegment(BaseSegment): """The parameters for a procedure. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_PROCEDURE.html """ type = "procedure_parameter_list" # Odd syntax, but prevents eager parameters being confused for data types _param_type = OneOf("REFCURSOR", Ref("DatatypeSegment")) match_grammar = Bracketed( Delimited( Sequence( AnyNumberOf( Ref( "ParameterNameSegment", exclude=OneOf(_param_type, Ref("ArgModeGrammar")), optional=True, ), Ref("ArgModeGrammar", optional=True), max_times_per_element=1, ), _param_type, ), optional=True, ), ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE PROCEDURE` statement. https://www.postgresql.org/docs/14/sql-createprocedure.html TODO: Just a basic statement for now, without full syntax. based on CreateFunctionStatementSegment without a return type. """ type = "create_procedure_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "PROCEDURE", Ref("FunctionNameSegment"), Ref("ProcedureParameterListSegment"), Ref("FunctionDefinitionGrammar"), ) class AlterProcedureStatementSegment(BaseSegment): """An `ALTER PROCEDURE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_PROCEDURE.html """ type = "alter_procedure_statement" match_grammar = Sequence( "ALTER", "PROCEDURE", Ref("FunctionNameSegment"), Ref("ProcedureParameterListSegment", optional=True), OneOf( Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence( "OWNER", "TO", OneOf( OneOf(Ref("ParameterNameSegment"), Ref("QuotedIdentifierSegment")), "CURRENT_USER", "SESSION_USER", ), ), ), ) class DropProcedureStatementSegment(BaseSegment): """An `DROP PROCEDURE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_PROCEDURE.html """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Delimited( Sequence( Ref("FunctionNameSegment"), Ref("ProcedureParameterListSegment", optional=True), ), ), ) class AlterDefaultPrivilegesSchemaObjectsSegment( postgres.AlterDefaultPrivilegesSchemaObjectsSegment ): """`ALTER DEFAULT PRIVILEGES` schema object types. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_DEFAULT_PRIVILEGES.html """ match_grammar = ( postgres.AlterDefaultPrivilegesSchemaObjectsSegment.match_grammar.copy( insert=[Sequence("PROCEDURES")] ) ) class DeclareStatementSegment(BaseSegment): """A `DECLARE` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/declare.html """ type = "declare_statement" match_grammar = Sequence( "DECLARE", Ref("ObjectReferenceSegment"), "CURSOR", "FOR", Ref("SelectableGrammar"), ) class FetchStatementSegment(BaseSegment): """A `FETCH` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/fetch.html """ type = "fetch_statement" match_grammar = Sequence( "fetch", OneOf( "NEXT", "ALL", Sequence( "FORWARD", OneOf( "ALL", Ref("NumericLiteralSegment"), ), ), ), "FROM", Ref("ObjectReferenceSegment"), ) class CloseStatementSegment(BaseSegment): """A `CLOSE` statement. As specified in https://docs.aws.amazon.com/redshift/latest/dg/close.html """ type = "close_statement" match_grammar = Sequence( "CLOSE", Ref("ObjectReferenceSegment"), ) class AltereDatashareStatementSegment(BaseSegment): """An `ALTER DATASHARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_DATASHARE.html """ type = "create_datashare_statement" match_grammar = Sequence( "ALTER", "DATASHARE", Ref("ObjectReferenceSegment"), OneOf( # add or remove objects to the datashare Sequence( OneOf( "ADD", "REMOVE", ), OneOf( Sequence( "TABLE", Delimited(Ref("TableReferenceSegment")), ), Sequence( "SCHEMA", Delimited(Ref("SchemaReferenceSegment")), ), Sequence( "FUNCTION", Delimited(Ref("FunctionNameSegment")), ), Sequence( "ALL", OneOf("TABLES", "FUNCTIONS"), "IN", "SCHEMA", Delimited(Ref("SchemaReferenceSegment")), ), ), ), # configure the properties of the datashare Sequence( "SET", OneOf( Sequence( "PUBLICACCESSIBLE", Ref("EqualsSegment", optional=True), Ref("BooleanLiteralGrammar"), ), Sequence( "INCLUDENEW", Ref("EqualsSegment", optional=True), Ref("BooleanLiteralGrammar"), "FOR", "SCHEMA", Ref("SchemaReferenceSegment"), ), ), ), ), ) class CreateDatashareStatementSegment(BaseSegment): """A `CREATE DATASHARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_DATASHARE.html """ type = "create_datashare_statement" match_grammar = Sequence( "CREATE", "DATASHARE", Ref("ObjectReferenceSegment"), Sequence( Ref.keyword("SET", optional=True), "PUBLICACCESSIBLE", Ref("EqualsSegment", optional=True), OneOf( "TRUE", "FALSE", ), optional=True, ), ) class DescDatashareStatementSegment(BaseSegment): """A `DESC DATASHARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DESC_DATASHARE.html """ type = "desc_datashare_statement" match_grammar = Sequence( "DESC", "DATASHARE", Ref("ObjectReferenceSegment"), Sequence( "OF", Sequence( "ACCOUNT", Ref("QuotedLiteralSegment"), optional=True, ), "NAMESPACE", Ref("QuotedLiteralSegment"), optional=True, ), ) class DropDatashareStatementSegment(BaseSegment): """A `DROP DATASHARE` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_DATASHARE.html """ type = "drop_datashare_statement" match_grammar = Sequence( "DROP", "DATASHARE", Ref("ObjectReferenceSegment"), ) class ShowDatasharesStatementSegment(BaseSegment): """A `SHOW DATASHARES` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_SHOW_DATASHARES.html """ type = "show_datashares_statement" match_grammar = Sequence( "SHOW", "DATASHARES", Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ) class GrantUsageDatashareStatementSegment(BaseSegment): """A `GRANT DATASHARES` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_GRANT.html section "Granting datashare permissions" Note: According to docummentation, multiple accounts and namespaces can be specified. However, tests using redshift instance showed this causes a syntax error. """ type = "grant_datashare_statement" match_grammar = Sequence( OneOf("GRANT", "REVOKE"), "USAGE", "ON", "DATASHARE", Ref("ObjectReferenceSegment"), OneOf("TO", "FROM"), OneOf( Sequence("NAMESPACE", Ref("QuotedLiteralSegment")), Sequence( "ACCOUNT", Sequence( Ref("QuotedLiteralSegment"), Sequence("VIA", "DATA", "CATALOG", optional=True), ), ), ), ) class CreateRlsPolicyStatementSegment(BaseSegment): """A `CREATE RLS POLICY` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_RLS_POLICY.html """ type = "create_rls_policy_statement" match_grammar = Sequence( "CREATE", "RLS", "POLICY", Ref("ObjectReferenceSegment"), Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), ), ), ), Sequence( Ref.keyword("AS", optional=True), Ref("AliasExpressionSegment"), optional=True, ), optional=True, ), Sequence( "USING", Bracketed(Ref("ExpressionSegment")), ), ) class ManageRlsPolicyStatementSegment(BaseSegment): """An `ATTACH/DETACH RLS POLICY` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ATTACH_RLS_POLICY.html https://docs.aws.amazon.com/redshift/latest/dg/r_DETACH_RLS_POLICY.html """ # 1 statement for both ATTACH and DETACH since same syntax type = "manage_rls_policy_statement" match_grammar = Sequence( OneOf("ATTACH", "DETACH"), "RLS", "POLICY", Ref("ObjectReferenceSegment"), "ON", Ref.keyword("TABLE", optional=True), Delimited( Ref("TableReferenceSegment"), ), OneOf("TO", "FROM"), Delimited( OneOf( Sequence( Ref.keyword("ROLE", optional=True), Ref("RoleReferenceSegment"), ), "PUBLIC", ), ), ) class DropRlsPolicyStatementSegment(BaseSegment): """A `DROP RLS POLICY` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_RLS_POLICY.html """ type = "drop_rls_policy_statement" match_grammar = Sequence( "DROP", "RLS", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( "CASCADE", "RESTRICT", optional=True, ), ) class AnalyzeCompressionStatementSegment(BaseSegment): """An `ANALYZE COMPRESSION` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ANALYZE_COMPRESSION.html """ type = "analyze_compression_statement" match_grammar = Sequence( OneOf("ANALYZE", "ANALYSE"), "COMPRESSION", Sequence( Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Sequence( "COMPROWS", Ref("NumericLiteralSegment"), optional=True, ), optional=True, ), ) class VacuumStatementSegment(postgres.VacuumStatementSegment): """A `VACUUM` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_VACUUM_command.html """ match_grammar = Sequence( "VACUUM", OneOf( "FULL", "REINDEX", "RECLUSTER", Sequence( OneOf( "SORT", "DELETE", ), "ONLY", ), optional=True, ), Ref("TableReferenceSegment", optional=True), Sequence( "TO", Ref("NumericLiteralSegment"), "PERCENT", optional=True, ), Ref.keyword("BOOST", optional=True), ) # Adding Redshift specific statements class StatementSegment(postgres.StatementSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = postgres.StatementSegment.match_grammar.copy( insert=[ Ref("CreateLibraryStatementSegment"), Ref("CreateGroupStatementSegment"), Ref("AlterUserStatementSegment"), Ref("AlterGroupStatementSegment"), Ref("CreateExternalTableAsStatementSegment"), Ref("CreateExternalTableStatementSegment"), Ref("CreateExternalSchemaStatementSegment"), Ref("DataFormatSegment"), Ref("UnloadStatementSegment"), Ref("CopyStatementSegment"), Ref("ShowModelStatementSegment"), Ref("CreateDatashareStatementSegment"), Ref("DescDatashareStatementSegment"), Ref("DropDatashareStatementSegment"), Ref("ShowDatasharesStatementSegment"), Ref("AltereDatashareStatementSegment"), Ref("DeclareStatementSegment"), Ref("FetchStatementSegment"), Ref("CloseStatementSegment"), Ref("AnalyzeCompressionStatementSegment"), Ref("AlterProcedureStatementSegment"), Ref("CallStatementSegment"), Ref("CreateRlsPolicyStatementSegment"), Ref("ManageRlsPolicyStatementSegment"), Ref("DropRlsPolicyStatementSegment"), Ref("CreateExternalFunctionStatementSegment"), Ref("GrantUsageDatashareStatementSegment"), ], ) class PartitionedBySegment(BaseSegment): """Partitioned By Segment. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html """ type = "partitioned_by_segment" match_grammar = Sequence( Ref.keyword("PARTITIONED"), "BY", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment", optional=True), ), ), ), ) class RowFormatDelimitedSegment(BaseSegment): """Row Format Delimited Segment. As specified in https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html """ type = "row_format_delimited_segment" match_grammar = AnySetOf( Sequence( "FIELDS", "TERMINATED", "BY", Ref("QuotedLiteralSegment"), ), Sequence( "LINES", "TERMINATED", "BY", Ref("QuotedLiteralSegment"), ), optional=True, ) class CreateUserStatementSegment(ansi.CreateUserStatementSegment): """`CREATE USER` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html """ match_grammar = Sequence( "CREATE", "USER", Ref("RoleReferenceSegment"), Ref.keyword("WITH", optional=True), "PASSWORD", OneOf(Ref("QuotedLiteralSegment"), "DISABLE"), AnySetOf( OneOf( "CREATEDB", "NOCREATEDB", ), OneOf( "CREATEUSER", "NOCREATEUSER", ), Sequence( "SYSLOG", "ACCESS", OneOf( "RESTRICTED", "UNRESTRICTED", ), ), Sequence("IN", "GROUP", Delimited(Ref("ObjectReferenceSegment"))), Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment")), Sequence( "CONNECTION", "LIMIT", OneOf( Ref("NumericLiteralSegment"), "UNLIMITED", ), ), Sequence( "SESSION", "TIMEOUT", Ref("NumericLiteralSegment"), ), ), ) class CreateGroupStatementSegment(BaseSegment): """`CREATE GROUP` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_GROUP.html """ type = "create_group" match_grammar = Sequence( "CREATE", "GROUP", Ref("ObjectReferenceSegment"), Sequence( Ref.keyword("WITH", optional=True), "USER", Delimited( Ref("ObjectReferenceSegment"), ), optional=True, ), ) class AlterUserStatementSegment(BaseSegment): """`ALTER USER` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_USER.html """ type = "alter_user_statement" match_grammar = Sequence( "ALTER", "USER", Ref("RoleReferenceSegment"), Ref.keyword("WITH", optional=True), AnySetOf( OneOf( "CREATEDB", "NOCREATEDB", ), OneOf( "CREATEUSER", "NOCREATEUSER", ), Sequence( "SYSLOG", "ACCESS", OneOf( "RESTRICTED", "UNRESTRICTED", ), ), Sequence( "PASSWORD", OneOf( Ref("QuotedLiteralSegment"), "DISABLE", ), Sequence("VALID", "UNTIL", Ref("QuotedLiteralSegment"), optional=True), ), Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( "CONNECTION", "LIMIT", OneOf( Ref("NumericLiteralSegment"), "UNLIMITED", ), ), OneOf( Sequence( "SESSION", "TIMEOUT", Ref("NumericLiteralSegment"), ), Sequence( "RESET", "SESSION", "TIMEOUT", ), ), OneOf( Sequence( "SET", Ref("ObjectReferenceSegment"), OneOf( "TO", Ref("EqualsSegment"), ), OneOf( "DEFAULT", Ref("LiteralGrammar"), ), ), Sequence( "RESET", Ref("ObjectReferenceSegment"), ), ), min_times=1, ), ) class AlterGroupStatementSegment(BaseSegment): """`ALTER GROUP` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_GROUP.html """ type = "alter_group" match_grammar = Sequence( "ALTER", "GROUP", Ref("ObjectReferenceSegment"), OneOf( Sequence( OneOf("ADD", "DROP"), "USER", Delimited( Ref("ObjectReferenceSegment"), ), ), Sequence( "RENAME", "TO", Ref("ObjectReferenceSegment"), ), ), ) class TransactionStatementSegment(BaseSegment): """A `BEGIN|START`, `COMMIT|END` or `ROLLBACK|ABORT` transaction statement. https://docs.aws.amazon.com/redshift/latest/dg/r_BEGIN.html """ type = "transaction_statement" match_grammar = Sequence( OneOf("BEGIN", "START", "COMMIT", "END", "ROLLBACK", "ABORT"), OneOf("TRANSACTION", "WORK", optional=True), Sequence( "ISOLATION", "LEVEL", OneOf( "SERIALIZABLE", Sequence("READ", "COMMITTED"), Sequence("READ", "UNCOMMITTED"), Sequence("REPEATABLE", "READ"), ), optional=True, ), OneOf( Sequence("READ", "ONLY"), Sequence("READ", "WRITE"), optional=True, ), ) class AlterSchemaStatementSegment(BaseSegment): """An `ALTER SCHEMA` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_SCHEMA.html """ type = "alter_schema_statement" match_grammar = Sequence( "ALTER", "SCHEMA", Ref("SchemaReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("SchemaReferenceSegment"), ), Sequence( "OWNER", "TO", Ref("RoleReferenceSegment"), ), Ref("QuotaGrammar"), ), ) class LockTableStatementSegment(BaseSegment): """An `LOCK TABLE` statement. https://www.postgresql.org/docs/14/sql-lock.html """ type = "lock_table_statement" match_grammar: Matchable = Sequence( "LOCK", Ref.keyword("TABLE", optional=True), Delimited( Ref("TableReferenceSegment"), ), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause. Override to add Object unpivoting. """ match_grammar = ansi.TableExpressionSegment.match_grammar.copy( insert=[ Ref("ObjectUnpivotSegment", optional=True), Ref("ArrayUnnestSegment", optional=True), ], before=Ref("TableReferenceSegment"), ) class ObjectUnpivotSegment(BaseSegment): """Object unpivoting. https://docs.aws.amazon.com/redshift/latest/dg/query-super.html#unpivoting """ type = "object_unpivoting" match_grammar: Matchable = Sequence( "UNPIVOT", Ref("ObjectReferenceSegment"), "AS", Ref("SingleIdentifierGrammar"), "AT", Ref("SingleIdentifierGrammar"), ) class ArrayAccessorSegment(ansi.ArrayAccessorSegment): """Array element accessor. Redshift allows multiple levels of array access, like Postgres, but it * doesn't allow ranges like `myarray[1:2]` * does allow function or column expressions `myarray[idx]` """ match_grammar = Sequence( AnyNumberOf( Bracketed( OneOf(Ref("NumericLiteralSegment"), Ref("ExpressionSegment")), bracket_type="square", ) ) ) class ArrayUnnestSegment(BaseSegment): """Array unnesting. https://docs.aws.amazon.com/redshift/latest/dg/query-super.html """ type = "array_unnesting" match_grammar: Matchable = Sequence( Ref("ObjectReferenceSegment"), "AS", Ref("SingleIdentifierGrammar"), "AT", Ref("SingleIdentifierGrammar"), ) class CallStatementSegment(BaseSegment): """A `CALL` statement. https://docs.aws.amazon.com/redshift/latest/dg/r_CALL_procedure.html """ type = "call_statement" match_grammar = Sequence( "CALL", Ref("FunctionSegment"), ) class SelectClauseModifierSegment(postgres.SelectClauseModifierSegment): """Things that come after SELECT but before the columns.""" match_grammar = postgres.SelectClauseModifierSegment.match_grammar.copy( insert=[Sequence("TOP", Ref("NumericLiteralSegment"))], ) class ConvertFunctionNameSegment(BaseSegment): """CONVERT function name segment. Function taking a data type identifier and an expression. An alternative to CAST. """ type = "function_name" match_grammar = Sequence("CONVERT") class FunctionSegment(ansi.FunctionSegment): """A scalar or aggregate function. Maybe in the future we should distinguish between aggregate functions and other functions. For now we treat them the same because they look the same for our purposes. """ type = "function" match_grammar: Matchable = OneOf( Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Sequence( Ref("DatePartFunctionNameSegment"), Bracketed( Delimited( Ref("DatetimeUnitSegment"), Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), ), parse_mode=ParseMode.GREEDY, ), ), ), Sequence( Sequence( OneOf( Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), Ref("ConvertFunctionNameSegment"), ), ), Sequence( Ref.keyword("APPROXIMATE"), Ref( "FunctionNameSegment", exclude=OneOf( Ref("DatePartFunctionNameSegment"), Ref("ValuesClauseSegment"), Ref("ConvertFunctionNameSegment"), ), ), ), ), Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), parse_mode=ParseMode.GREEDY, ), ), Ref("PostFunctionGrammar", optional=True), ), Sequence( Ref("ConvertFunctionNameSegment"), Bracketed( Ref("DatatypeSegment"), Ref("CommaSegment"), Ref("ExpressionSegment"), ), ), ) class FromClauseSegment(ansi.FromClauseSegment): """Slightly modified version which allows for using brackets for content of FROM.""" match_grammar = Sequence( "FROM", Delimited( OptionallyBracketed(Ref("FromExpressionSegment")), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement.""" type = "create_view_statement" # https://crate.io/docs/sql-99/en/latest/chapters/18.html#create-view-statement # https://dev.mysql.com/doc/refman/8.0/en/create-view.html # https://www.postgresql.org/docs/12/sql-createview.html match_grammar: Matchable = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Optional list of column names Ref("BracketedColumnReferenceListGrammar", optional=True), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class CreateMaterializedViewStatementSegment( postgres.CreateMaterializedViewStatementSegment ): """A `CREATE MATERIALIZED VIEW` statement. # https://docs.aws.amazon.com/redshift/latest/dg/materialized-view-create-sql-command.html """ type = "create_materialized_view_statement" match_grammar = Sequence( "CREATE", "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), Sequence("BACKUP", OneOf("YES", "NO"), optional=True), Ref("TableAttributeSegment", optional=True), Sequence("AUTO", "REFRESH", OneOf("YES", "NO"), optional=True), "AS", OneOf( OptionallyBracketed(Ref("SelectableGrammar")), OptionallyBracketed(Sequence("TABLE", Ref("TableReferenceSegment"))), Ref("ValuesClauseSegment"), OptionallyBracketed(Sequence("EXECUTE", Ref("FunctionSegment"))), ), Ref("WithDataClauseSegment", optional=True), ) class CreateExternalFunctionStatementSegment(BaseSegment): """A `CREATE EXTERNAL FUNCTION` segment. https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_FUNCTION.html """ type = "create_external_function_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "EXTERNAL", "FUNCTION", Ref("FunctionNameSegment"), Bracketed( Delimited( Ref("DatatypeSegment"), optional=True, ), ), "RETURNS", Ref("DatatypeSegment"), OneOf("VOLATILE", "STABLE", "IMMUTABLE"), OneOf("LAMBDA", "SAGEMAKER"), Ref("QuotedLiteralSegment"), "IAM_ROLE", OneOf("DEFAULT", Ref("QuotedLiteralSegment")), Sequence( "RETRY_TIMEOUT", Ref("NumericLiteralSegment"), optional=True, ), ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`. https://docs.aws.amazon.com/redshift/latest/dg/r_QUALIFY_clause.html """ type = "qualify_clause" match_grammar = Sequence( "QUALIFY", Indent, Ref("ExpressionSegment"), Dedent, ) class SelectStatementSegment(postgres.SelectStatementSegment): """A snowflake `SELECT` statement including optional Qualify. https://docs.aws.amazon.com/redshift/latest/dg/r_QUALIFY_clause.html """ type = "select_statement" match_grammar = postgres.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), terminators=[Ref("SetOperatorSegment")], ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A snowflake unordered `SELECT` statement including optional Qualify. https://docs.aws.amazon.com/redshift/latest/dg/r_QUALIFY_clause.html """ type = "select_statement" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OverlapsClauseSegment", optional=True), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_redshift_keywords.py000066400000000000000000000226471451700765000253620ustar00rootroot00000000000000"""A list of all SQL key words.""" redshift_reserved_keywords = """AES128 AES256 ALL ALLOWOVERWRITE ANALYSE ANALYZE AND ANY ARRAY AS ASC AUTHORIZATION AZ64 BETWEEN BINARY BLANKSASNULL BOTH BYTEDICT CASE CAST CHECK COLLATE COLUMN COMPROWS COMPUPDATE CONSTRAINT CREATE CREDENTIALS CROSS CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER CURRENT_USER_ID DATETIME DEFAULT DEFERRABLE DEFRAG DELIMITERS DELTA DELTA32K DESC DISABLE DISTINCT DO ELSE EMPTYASNULL ENABLE ENCRYPT ENCRYPTION END EXCEPT EXPLICIT_IDS FALSE FILLRECORD FOR FOREIGN FREEZE FROM FULL GLOBALDICT256 GLOBALDICT64K GRANT GROUP HAVING IDENTITY IGNORE IGNOREBLANKLINES IGNOREHEADER ILIKE IN INITIALLY INNER INTERSECT INTO IS ISNULL JOIN LEADING LEFT LIKE LIMIT LOCALTIME LOCALTIMESTAMP LUN LUNS LZO MINUS MOSTLY16 MOSTLY32 MOSTLY8 NATURAL NEW NOT NOTNULL NULL NULLS OFF OFFSET OID OLD ON ONLY OPEN OR ORDER OUTER OVERLAPS PARALLEL PARTITION PERCENT PERMISSIONS PIVOT PLACING PRIMARY RAW READRATIO RECOVER REFERENCES RESPECT REJECTLOG RESORT RESTORE RIGHT RUNLENGTH SELECT SESSION_USER SIMILAR SNAPSHOT SOME SYSDATE SYSTEM TABLE TAG TDES TEXT255 TEXT32K THEN TIMESTAMP TO TOP TRAILING TRUE TRUNCATECOLUMNS UNION UNIQUE UNNEST UNPIVOT USER USING VERBOSE WHEN WHERE WITH WITHIN WITHOUT""" redshift_unreserved_keywords = """A ABORT ABS ABSENT ABSOLUTE ACCEPTANYDATE ACCEPTINVCHARS ACCESS ACCESS_KEY_ID ACCORDING ACCOUNT ACOS ACTION ADA ADD ADDQUOTES ADMIN AFTER AGGREGATE ALLOCATE ALSO ALTER ALWAYS ANYELEMENT APPLY APPROXIMATE ARE ARRAY_AGG ARRAY_MAX_CARDINALITY ASENSITIVE ASIN ASSERTION ASSIGNMENT ASYMMETRIC AT ATAN ATOMIC ATTACH ATTRIBUTE ATTRIBUTES AUTO AUTO_INCREMENT AVG AVRO BACKUP BACKWARD BASE64 BEFORE BEGIN BEGIN_FRAME BEGIN_PARTITION BERNOULLI BIGINT BINARY_CLASSIFICATION BINDING BIT BIT_LENGTH BLANKSASNULL BLOB BLOCKED BOM BOOL BOOLEAN BOOST BPCHAR BREADTH BUFFERS BY BYPASSRLS BZIP2 C CACHE CALL CALLED CARDINALITY CASCADE CASCADED CASE_INSENSITIVE CASE_SENSITIVE CATALOG CATALOG_NAME CATALOG_ROLE CEIL CEILING CHAIN CHAINING CHAR CHARACTER CHARACTERISTICS CHARACTERS CHARACTER_LENGTH CHARACTER_SET_CATALOG CHARACTER_SET_NAME CHARACTER_SET_SCHEMA CHAR_LENGTH CHECKPOINT CLASS CLASSIFIER CLASS_ORIGIN CLEANPATH CLOB CLOSE CLUSTER COALESCE COBOL COLLATION COLLATION_CATALOG COLLATION_NAME COLLATION_SCHEMA COLLECT COLUMNS COLUMN_NAME COMMAND_FUNCTION COMMAND_FUNCTION_CODE COMMENT COMMENTS COMMIT COMMITTED COMPOUND COMPRESSION CONCURRENTLY CONDITION CONDITIONAL CONDITION_NUMBER CONFIGURATION CONFLICT CONNECT CONNECTION CONNECTION_NAME CONSTRAINTS CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CONSTRUCTOR CONTAINS CONTENT CONTINUE CONTROL CONVERSION CONVERT COPY CORR CORRESPONDING COS COSH COST COSTS COUNT COVAR_POP COVAR_SAMP CREATEDB CREATEUSER CREATEROLE CSV CUBE CUME_DIST CURRENT CURRENT_CATALOG CURRENT_DEFAULT_TRANSFORM_GROUP CURRENT_PATH CURRENT_ROLE CURRENT_ROW CURRENT_SCHEMA CURRENT_TRANSFORM_GROUP_FOR_TYPE CURSOR CURSOR_NAME CYCLE DATA DATABASE DATALINK DATASHARE DATASHARES DATE DATEFORMAT DATETIME_INTERVAL_CODE DATETIME_INTERVAL_PRECISION DAY DAYOFYEAR DB DEALLOCATE DEC DECFLOAT DECIMAL DECLARE DEFAULTS DEFERRED DEFINE DEFINED DEFINER DEFLATE DEGREE DELETE DELIMITED DELIMITER DENSE_RANK DEPENDS DEPTH DEREF DERIVED DESCRIBE DESCRIPTOR DETACH DETERMINISTIC DIAGNOSTICS DICTIONARY DISCARD DISCONNECT DISPATCH DISTKEY DISTSTYLE DLNEWCOPY DLPREVIOUSCOPY DLURLCOMPLETE DLURLCOMPLETEONLY DLURLCOMPLETEWRITE DLURLPATH DLURLPATHONLY DLURLPATHWRITE DLURLSCHEME DLURLSERVER DLVALUE DOCUMENT DOMAIN DOUBLE DROP DYNAMIC DYNAMIC_FUNCTION DYNAMIC_FUNCTION_CODE EACH ELEMENT EMPTY ENCODE ENCODING ENCRYPTED END-EXEC END_FRAME END_PARTITION ENFORCED ENUM EPOCH EPOCHSECS EPOCHMILLISECS EQUALS ERROR ESCAPE EVEN EVENT EVERY EXCEPTION EXCLUDE EXCLUDING EXCLUSIVE EXEC EXECUTE EXECUTION EXISTS EXP EXPLAIN EXPLICIT EXPRESSION EXTENDED EXTENSION EXTERNAL EXTRACT FAMILY FETCH FIELDS FILE FILTER FINAL FINALIZE FINISH FIRST FIRST_VALUE FIXEDWIDTH FLAG FLOAT FLOAT4 FLOAT8 FLOOR FOLLOWING FORCE FORMAT FORTRAN FORWARD FOUND FRAME_ROW FREE FS FULFILL FUNCTION FUNCTIONS FUSION FUTURE G GB GENERAL GENERATED GEOGRAPHY GEOMETRY GET GLOBAL GO GOTO GRANTED GRANTS GREATEST GROUPING GROUPS GZIP HANDLER HASH HEADER HEX HIERARCHY HIVE HLLSKETCH HOLD HOUR HYPERPARAMETERS IAM_ROLE ID IF IMMEDIATE IMMEDIATELY IMMUTABLE IMPLEMENTATION IMPLICIT IMPORT IMPORTED INCLUDE INCLUDENEW INCLUDING INCREMENT INDENT INDEX INDEXES INDICATOR INHERIT INHERITS INITIAL INLINE INOUT INPUT INPUTFORMAT INSENSITIVE INSERT INSTANCE INSTANTIABLE INSTEAD INT INT2 INT4 INT8 INTEGER INTEGRATION INTEGRITY INTERLEAVED INTERSECTION INTERVAL INVOKER ISOLATION JSON JSON_ARRAY JSON_ARRAYAGG JSON_EXISTS JSON_OBJECT JSON_OBJECTAGG JSON_QUERY JSON_TABLE JSON_TABLE_PRIMITIVE JSON_VALUE K KEEP KEY KEYS KEY_MEMBER KEY_TYPE KINESIS KMEANS KMS_KEY_ID LABEL LAG LAMBDA LANGUAGE LARGE LAST LAST_VALUE LATERAL LEAD LEAKPROOF LEAST LENGTH LEVEL LIBRARY LIKE_REGEX LINES LINK LIST LISTAGG LISTEN LN LOAD LOCAL LOCATION LOCATOR LOCK LOCKED LOG LOG10 LOGGED LOGIN LOWER LZOP M MAIN MANAGE MANIFEST MAP MAPPING MASKING MASTER_SYMMETRIC_KEY MATCH MATCHED MATCHES MATCH_NUMBER MATCH_RECOGNIZE MATERIALIZED MAX MAXERROR MAXFILESIZE MAXVALUE MAX_CELLS MAX_RUNTIME MB MEASURES MEMBER MERGE MESSAGE_LENGTH MESSAGE_OCTET_LENGTH MESSAGE_TEXT METASTORE METHOD MILLISECOND MIN MINUTE MINVALUE ML MLP MOD MODE MODEL MODEL_TYPE MODIFIES MODIFY MODULE MODULUS MONITOR MONTH MORE MOVE MULTICLASS_CLASSIFICATION MULTISET MYSQL MUMPS NAME NAMES NAMESPACE NAN NATIONAL NCHAR NCLOB NESTED NESTING NEXT NFC NFD NFKC NFKD NIL NO NOBYPASSRLS NOCACHE NOCREATEDB NOCREATEROLE NOCREATEUSER NOCYCLE NOINHERIT NOLOAD NOLOGIN NOREPLICATION NOSUPERUSER NONE NOORDER NORMALIZE OUTPUTFORMAT NORMALIZED NOTHING NOTIFY NOWAIT NTH_VALUE NTILE NULLABLE NULLIF NUMBER NUMERIC NVARCHAR OBJECT OBJECTIVE OCCURRENCES_REGEX OCTET_LENGTH OCTETS OF OFFLINE OIDS OMIT ONE OPERATE OPERATOR OPTION OPTIONS ORC ORDERING ORDINALITY OTHERS OUT OUTPUT OVER OVERFLOW OVERLAY OVERRIDING OVERWRITE OWNED OWNER OWNERSHIP P PAD PARAMETER PARAMETER_MODE PARAMETER_NAME PARAMETER_ORDINAL_POSITION PARAMETER_SPECIFIC_CATALOG PARAMETER_SPECIFIC_NAME PARAMETER_SPECIFIC_SCHEMA PARQUET PARSER PARTIAL PARTITIONED PASCAL PASS PASSING PASSTHROUGH PASSWORD PAST PATH PATTERN PER PERCENT_RANK PERCENTILE_CONT PERCENTILE_DISC PERIOD PERMISSION PERMUTE PIPE PLAIN PLAN PLANS PLI POLICY PORT PORTION POSITION POSITION_REGEX POSTGRES POWER PRECEDES PRECEDING PRECISION PREPARE PREPARED PREPROCESSORS PRESERVE PRESET PRIOR PRIVATE PRIVILEGES PROBLEM_TYPE PROCEDURAL PROCEDURE PROCEDURES PROGRAM PROPERTIES PRUNE PTF PUBLIC PUBLICACCESSIBLE PUBLICATION PLPYTHONU QUALIFY QUARTER QUOTA QUOTE QUOTES RANGE RANK RCFILE READ READRATIO READS REAL REASSIGN RECHECK RECLUSTER RECOVERY RECURSIVE REDSHIFT REF REFCURSOR REFERENCE_USAGE REFERENCING REFRESH REGION REGR_AVGX REGR_AVGY REGR_COUNT REGR_INTERCEPT REGR_R2 REGR_SLOPE REGR_SXX REGR_SXY REGR_SYY REGRESSION REINDEX RELATIVE RELEASE REMAINDER REMOVE REMOVEQUOTES RENAME REPEATABLE REPLACE REPLICA REPLICATION REQUIRING RESET RESOURCE RESTART RESTRICT RESTRICTED RESULT RETRY_TIMEOUT RETURN RETURNED_CARDINALITY RETURNED_LENGTH RETURNED_OCTET_LENGTH RETURNED_SQLSTATE RETURNING RETURNS REVOKE RLIKE RLS ROLE ROLLBACK ROLLUP ROUNDEC ROUTINE ROUTINE_CATALOG ROUTINE_NAME ROUTINE_SCHEMA ROUTINES ROW ROW_COUNT ROW_NUMBER ROWGROUPSIZE ROWS RULE RUNNING S3_BUCKET S3_GARBAGE_COLLECT SAFE SAGEMAKER SAVEPOINT SCALAR SCALE SCHEMA SCHEMA_NAME SCHEMAS SCOPE SCOPE_CATALOG SCOPE_NAME SCOPE_SCHEMA SCROLL SEARCH SECOND SECRET_ACCESS_KEY SECRET_ARN SECTION SECURITY SEEK SELECTIVE SELF SENSITIVE SEPARATOR SEQUENCE SEQUENCEFILE SEQUENCES SERDE SERDEPROPERTIES SERIALIZABLE SERVER SERVER_NAME SESSION SESSION_TOKEN SET SETTINGS SETOF SETS SHAPEFILE SHARE SHOW SIMPLE SIMPLIFY SIN SINH SIZE SKIP SMALLINT SORT SORTKEY SOURCE SPACE SPECIFIC SPECIFIC_NAME SPECIFICTYPE SQL SQLCODE SQLERROR SQLEXCEPTION SQLSTATE SQLWARNING SQRT STABLE STAGE STAGES STANDALONE START STATE STATEMENT STATIC STATISTICS STATUPDATE STDDEV_POP STDDEV_SAMP STDIN STDOUT STORAGE STORED STREAM STREAMS STRICT STRING STRIP STRUCTURE STYLE SUBCLASS_ORIGIN SUBMULTISET SUBSCRIPTION SUBSET SUBSTRING SUBSTRING_REGEX SUCCEEDS SUM SUPER SUPERUSER SUPPORT SYMMETRIC SYSID SYSLOG SYSTEM_TIME SYSTEM_USER T TABLE_NAME TABLES TABLESAMPLE TABLESPACE TAN TANH TARGET TASK TASKS TB TEMP TEMPLATE TEMPORARY TERMINATED TEXT TEXTFILE THROUGH TIES TIME TIMEFORMAT TIMEOUT TIMETZ TIMESTAMPTZ TIMEZONE_HOUR TIMEZONE_MINUTE TOKEN TOP_LEVEL_COUNT TRANSACTION TRANSACTION_ACTIVE TRANSACTIONS_COMMITTED TRANSACTIONS_ROLLED_BACK TRANSFORM TRANSFORMS TRANSIENT TRANSLATE TRANSLATE_REGEX TRANSLATION TREAT TRIGGER TRIGGER_CATALOG TRIGGER_NAME TRIGGER_SCHEMA TRIM TRIMBLANKS TRIM_ARRAY TRUNCATE TRUNCATECOLUMNS TRUSTED TYPE TYPES UESCAPE UNBOUNDED UNCOMMITTED UNCONDITIONAL UNDER UNENCRYPTED UNKNOWN UNLIMITED UNLINK UNLISTEN UNLOAD UNLOGGED UNMATCHED UNNAMED UNRESTRICTED UNSAFE UNSIGNED UNTIL UNTYPED UPDATE UPPER URI USE_ANY_ROLE USAGE USE USER_DEFINED_TYPE_CATALOG USER_DEFINED_TYPE_CODE USER_DEFINED_TYPE_NAME USER_DEFINED_TYPE_SCHEMA UTF16 UTF16BE UTF16LE UTF32 UTF8 VACUUM VALID VALIDATE VALIDATOR VALUE VALUE_OF VALUES VAR_POP VAR_SAMP VARBINARY VARBYTE VARCHAR VARIADIC VARYING VERSION VERSIONING VIA VIEW VIEWS VOLATILE WALLET WAREHOUSE WEEK WEEKDAY WHENEVER WHITESPACE WIDTH_BUCKET WINDOW WORK WRAPPER WRITE XGBOOST XML XMLAGG XMLATTRIBUTES XMLBINARY XMLCAST XMLCOMMENT XMLCONCAT XMLDECLARATION XMLDOCUMENT XMLELEMENT XMLEXISTS XMLFOREST XMLITERATE XMLNAMESPACES XMLPARSE XMLPI XMLQUERY XMLROOT XMLSCHEMA XMLSERIALIZE XMLTABLE XMLTEXT XMLVALIDATE YAML YEAR YES ZONE ZSTD""" sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_snowflake.py000066400000000000000000006563121451700765000236160ustar00rootroot00000000000000"""The Snowflake dialect. Inherits from ANSI. Based on https://docs.snowflake.com/en/sql-reference-commands.html """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, BaseSegment, Bracketed, CodeSegment, CommentSegment, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, MultiStringParser, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_snowflake_keywords import ( snowflake_reserved_keywords, snowflake_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") snowflake_dialect = ansi_dialect.copy_as("snowflake") snowflake_dialect.patch_lexer_matchers( [ # In snowflake, a double single quote resolves as a single quote in the string. # https://docs.snowflake.com/en/sql-reference/data-types-text.html#single-quoted-string-constants RegexLexer( "single_quote", r"'([^'\\]|\\.|'')*'", CodeSegment, ), RegexLexer( "inline_comment", r"(--|#|//)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--", "#", "//")}, ), ] ) snowflake_dialect.insert_lexer_matchers( [ # Keyword assigner needed for keyword functions. StringLexer("parameter_assigner", "=>", CodeSegment), StringLexer("function_assigner", "->", CodeSegment), RegexLexer("stage_path", r"(?:@[^\s;)]+|'@[^']+')", CodeSegment), # Column selector # https://docs.snowflake.com/en/sql-reference/sql/select.html#parameters RegexLexer("column_selector", r"\$[0-9]+", CodeSegment), RegexLexer( "dollar_quote", r"\$\$.*\$\$", CodeSegment, ), RegexLexer( "dollar_literal", r"[$][a-zA-Z0-9_.]*", CodeSegment, ), RegexLexer( "inline_dollar_sign", r"[a-zA-Z_][a-zA-Z0-9_$]*\$[a-zA-Z0-9_$]*", CodeSegment, ), RegexLexer( # For use with https://docs.snowflake.com/en/sql-reference/sql/get.html # Accepts unquoted file paths that begin file://. # Unquoted file paths cannot include special characters. "unquoted_file_path", r"file://(?:[a-zA-Z]+:|/)+(?:[0-9a-zA-Z\\/_*?-]+)(?:\.[0-9a-zA-Z]+)?", CodeSegment, ), StringLexer("question_mark", "?", CodeSegment), StringLexer("exclude_bracket_open", "{-", CodeSegment), StringLexer("exclude_bracket_close", "-}", CodeSegment), ], before="like_operator", ) # Check for ":=" operator before the equals operator to correctly parse walrus operator # for Snowflake scripting block statements # https://docs.snowflake.com/en/developer-guide/snowflake-scripting/variables snowflake_dialect.insert_lexer_matchers( [ StringLexer("walrus_operator", ":=", CodeSegment), ], before="equals", ) snowflake_dialect.bracket_sets("bracket_pairs").add( ("exclude", "StartExcludeBracketSegment", "EndExcludeBracketSegment", True) ) # Set the bare functions snowflake_dialect.sets("bare_functions").clear() snowflake_dialect.sets("bare_functions").update( [ "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "LOCALTIME", "LOCALTIMESTAMP", ] ) # Add all Snowflake compression types snowflake_dialect.sets("compression_types").clear() snowflake_dialect.sets("compression_types").update( [ "AUTO", "AUTO_DETECT", "GZIP", "BZ2", "BROTLI", "ZSTD", "DEFLATE", "RAW_DEFLATE", "LZO", "NONE", "SNAPPY", ], ) # Add all Snowflake supported file types snowflake_dialect.sets("files_types").clear() snowflake_dialect.sets("files_types").update( ["CSV", "JSON", "AVRO", "ORC" "PARQUET", "XML"], ) snowflake_dialect.sets("warehouse_types").clear() snowflake_dialect.sets("warehouse_types").update( [ "STANDARD", "SNOWPARK-OPTIMIZED", ], ) snowflake_dialect.sets("warehouse_sizes").clear() snowflake_dialect.sets("warehouse_sizes").update( [ "XSMALL", "SMALL", "MEDIUM", "LARGE", "XLARGE", "XXLARGE", "X2LARGE", "XXXLARGE", "X3LARGE", "X4LARGE", "X5LARGE", "X6LARGE", "X-SMALL", "X-LARGE", "2X-LARGE", "3X-LARGE", "4X-LARGE", "5X-LARGE", "6X-LARGE", ], ) snowflake_dialect.sets("warehouse_scaling_policies").clear() snowflake_dialect.sets("warehouse_scaling_policies").update( [ "STANDARD", "ECONOMY", ], ) snowflake_dialect.add( # In snowflake, these are case sensitive even though they're not quoted # so they need a different `name` and `type` so they're not picked up # by other rules. ParameterAssignerSegment=StringParser( "=>", SymbolSegment, type="parameter_assigner" ), FunctionAssignerSegment=StringParser("->", SymbolSegment, type="function_assigner"), # Walrus operator for Snowflake scripting block statements WalrusOperatorSegment=StringParser(":=", SymbolSegment, type="assignment_operator"), QuotedStarSegment=StringParser( "'*'", IdentifierSegment, type="quoted_star", trim_chars=("'",), ), NakedSemiStructuredElementSegment=RegexParser( r"[A-Z0-9_]*", CodeSegment, type="semi_structured_element", ), QuotedSemiStructuredElementSegment=TypedParser( "double_quote", CodeSegment, type="semi_structured_element", ), ColumnIndexIdentifierSegment=RegexParser( r"\$[0-9]+", IdentifierSegment, type="column_index_identifier_segment", ), LocalVariableNameSegment=RegexParser( r"[a-zA-Z0-9_]*", CodeSegment, type="variable", ), ReferencedVariableNameSegment=RegexParser( r"\$[A-Z_][A-Z0-9_]*", CodeSegment, type="variable", trim_chars=("$",), ), # We use a RegexParser instead of keywords as some (those with dashes) require # quotes: WarehouseType=OneOf( MultiStringParser( [ type for type in snowflake_dialect.sets("warehouse_types") if "-" not in type ], CodeSegment, type="warehouse_size", ), MultiStringParser( [f"'{type}'" for type in snowflake_dialect.sets("warehouse_types")], CodeSegment, type="warehouse_size", ), ), WarehouseSize=OneOf( MultiStringParser( [ size for size in snowflake_dialect.sets("warehouse_sizes") if "-" not in size ], CodeSegment, type="warehouse_size", ), MultiStringParser( [f"'{size}'" for size in snowflake_dialect.sets("warehouse_sizes")], CodeSegment, type="warehouse_size", ), ), CompressionType=OneOf( MultiStringParser( snowflake_dialect.sets("compression_types"), KeywordSegment, type="compression_type", ), MultiStringParser( [ f"'{compression}'" for compression in snowflake_dialect.sets("compression_types") ], KeywordSegment, type="compression_type", ), ), ScalingPolicy=OneOf( MultiStringParser( snowflake_dialect.sets("warehouse_scaling_policies"), KeywordSegment, type="scaling_policy", ), MultiStringParser( [ f"'{scaling_policy}'" for scaling_policy in snowflake_dialect.sets( "warehouse_scaling_policies" ) ], KeywordSegment, type="scaling_policy", ), ), ValidationModeOptionSegment=RegexParser( r"'?RETURN_(?:\d+_ROWS|ERRORS|ALL_ERRORS)'?", CodeSegment, type="validation_mode_option", ), CopyOptionOnErrorSegment=RegexParser( r"'?CONTINUE'?|'?SKIP_FILE(?:_[0-9]+%?)?'?|'?ABORT_STATEMENT'?", LiteralSegment, type="copy_on_error_option", ), DoubleQuotedUDFBody=TypedParser( "double_quote", CodeSegment, type="udf_body", trim_chars=('"',), ), SingleQuotedUDFBody=TypedParser( "single_quote", CodeSegment, type="udf_body", trim_chars=("'",), ), DollarQuotedUDFBody=TypedParser( "dollar_quote", CodeSegment, type="udf_body", trim_chars=("$",), ), StagePath=RegexParser( r"(?:@[^\s;)]+|'@[^']+')", IdentifierSegment, type="stage_path", ), S3Path=RegexParser( # https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html r"'s3://[a-z0-9][a-z0-9\.-]{1,61}[a-z0-9](?:/.*)?'", CodeSegment, type="bucket_path", ), GCSPath=RegexParser( # https://cloud.google.com/storage/docs/naming-buckets r"'gcs://[a-z0-9][\w\.-]{1,61}[a-z0-9](?:/.+)?'", CodeSegment, type="bucket_path", ), AzureBlobStoragePath=RegexParser( # https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/resource-name-rules#microsoftstorage r"'azure://[a-z0-9][a-z0-9-]{1,61}[a-z0-9]\.blob\.core\.windows\.net/[a-z0-9]" r"[a-z0-9\.-]{1,61}[a-z0-9](?:/.+)?'", CodeSegment, type="bucket_path", ), UnquotedFilePath=TypedParser( "unquoted_file_path", CodeSegment, type="unquoted_file_path", ), SnowflakeEncryptionOption=MultiStringParser( ["'SNOWFLAKE_FULL'", "'SNOWFLAKE_SSE'"], CodeSegment, type="stage_encryption_option", ), S3EncryptionOption=MultiStringParser( ["'AWS_CSE'", "'AWS_SSE_S3'", "'AWS_SSE_KMS'"], CodeSegment, type="stage_encryption_option", ), GCSEncryptionOption=StringParser( "'GCS_SSE_KMS'", CodeSegment, type="stage_encryption_option", ), AzureBlobStorageEncryptionOption=StringParser( "'AZURE_CSE'", CodeSegment, type="stage_encryption_option", ), FileType=OneOf( MultiStringParser( snowflake_dialect.sets("file_types"), CodeSegment, type="file_type", ), MultiStringParser( [f"'{file_type}'" for file_type in snowflake_dialect.sets("file_types")], CodeSegment, type="file_type", ), ), IntegerSegment=RegexParser( # An unquoted integer that can be passed as an argument to Snowflake functions. r"[0-9]+", LiteralSegment, type="integer_literal", ), SystemFunctionName=RegexParser( r"SYSTEM\$([A-Za-z0-9_]*)", CodeSegment, type="system_function_name", ), GroupByContentsGrammar=Delimited( OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), terminators=[ "ORDER", "LIMIT", "FETCH", "OFFSET", "HAVING", "QUALIFY", "WINDOW", ], ), LimitLiteralGrammar=OneOf( Ref("NumericLiteralSegment"), "NULL", # '' and $$$$ are allowed as alternatives to NULL. Ref("QuotedLiteralSegment"), ), StartExcludeBracketSegment=StringParser( "{-", SymbolSegment, type="start_exclude_bracket" ), EndExcludeBracketSegment=StringParser( "-}", SymbolSegment, type="end_exclude_bracket" ), QuestionMarkSegment=StringParser("?", SymbolSegment, type="question_mark"), CaretSegment=StringParser("^", SymbolSegment, type="caret"), DollarSegment=StringParser("$", SymbolSegment, type="dollar"), PatternQuantifierGrammar=Sequence( OneOf( Ref("PositiveSegment"), Ref("StarSegment"), Ref("QuestionMarkSegment"), Bracketed( OneOf( Ref("NumericLiteralSegment"), Sequence( Ref("NumericLiteralSegment"), Ref("CommaSegment"), ), Sequence( Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), Sequence( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), ), bracket_type="curly", bracket_pairs_set="bracket_pairs", ), ), # To put a quantifier into “reluctant mode”. Ref("QuestionMarkSegment", optional=True), allow_gaps=False, ), PatternSymbolGrammar=Sequence( Ref("SingleIdentifierGrammar"), Ref("PatternQuantifierGrammar", optional=True), allow_gaps=False, ), PatternOperatorGrammar=OneOf( Ref("PatternSymbolGrammar"), Sequence( OneOf( Bracketed( OneOf( AnyNumberOf( Ref("PatternOperatorGrammar"), ), Delimited( Ref("PatternOperatorGrammar"), delimiter=Ref("BitwiseOrSegment"), ), ), bracket_type="exclude", bracket_pairs_set="bracket_pairs", ), Bracketed( OneOf( AnyNumberOf( Ref("PatternOperatorGrammar"), ), Delimited( Ref("PatternOperatorGrammar"), delimiter=Ref("BitwiseOrSegment"), ), ), ), Sequence( "PERMUTE", Bracketed( Delimited( Ref("PatternSymbolGrammar"), ), ), ), ), # Operators can also be followed by a quantifier. Ref("PatternQuantifierGrammar", optional=True), allow_gaps=False, ), ), ContextHeadersGrammar=OneOf( "CURRENT_ACCOUNT", "CURRENT_CLIENT", "CURRENT_DATABASE", "CURRENT_DATE", "CURRENT_IP_ADDRESS", "CURRENT_REGION", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_SCHEMAS", "CURRENT_SESSION", "CURRENT_STATEMENT", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TRANSACTION", "CURRENT_USER", "CURRENT_VERSION", "CURRENT_WAREHOUSE", "LAST_QUERY_ID", "LAST_TRANSACTION", "LOCALTIME", "LOCALTIMESTAMP", ), ) snowflake_dialect.replace( NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( # See https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html r"[a-zA-Z_][a-zA-Z0-9_$]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$", ) ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("ReferencedVariableNameSegment"), ] ), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), PreTableFunctionKeywordsGrammar=OneOf(Ref("LateralKeywordSegment")), FunctionContentsExpressionGrammar=OneOf( Ref("DatetimeUnitSegment"), Ref("NamedParameterExpressionSegment"), Ref("ReferencedVariableNameSegment"), Sequence( Ref("ExpressionSegment"), Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True), ), ), JoinLikeClauseGrammar=Sequence( AnySetOf( Ref("MatchRecognizeClauseSegment"), Ref("ChangesClauseSegment"), Ref("ConnectByClauseSegment"), Ref("FromBeforeExpressionSegment"), Ref("FromPivotExpressionSegment"), AnyNumberOf(Ref("FromUnpivotExpressionSegment")), Ref("SamplingExpressionSegment"), min_times=1, ), Ref("AliasExpressionSegment", optional=True), ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("ColumnIndexIdentifierSegment"), Ref("ReferencedVariableNameSegment"), Ref("StagePath"), Sequence( "IDENTIFIER", Bracketed( OneOf( Ref("SingleQuotedIdentifierSegment"), Ref("ReferencedVariableNameSegment"), ), ), ), ), PostFunctionGrammar=Sequence( Ref("WithinGroupClauseSegment", optional=True), Sequence(OneOf("IGNORE", "RESPECT"), "NULLS", optional=True), Ref("OverClauseSegment", optional=True), ), TemporaryGrammar=Sequence( OneOf("LOCAL", "GLOBAL", optional=True), OneOf("TEMP", "TEMPORARY", optional=True), Sequence("VOLATILE", optional=True), optional=True, ), TemporaryTransientGrammar=OneOf(Ref("TemporaryGrammar"), "TRANSIENT"), BaseExpressionElementGrammar=ansi_dialect.get_grammar( "BaseExpressionElementGrammar" ).copy( insert=[ # Allow use of CONNECT_BY_ROOT pseudo-columns. # https://docs.snowflake.com/en/sql-reference/constructs/connect-by.html#:~:text=Snowflake%20supports%20the%20CONNECT_BY_ROOT,the%20Examples%20section%20below. Sequence("CONNECT_BY_ROOT", Ref("ColumnReferenceSegment")), ], before=Ref("LiteralGrammar"), ), QuotedLiteralSegment=OneOf( # https://docs.snowflake.com/en/sql-reference/data-types-text.html#string-constants TypedParser( "single_quote", LiteralSegment, type="quoted_literal", ), TypedParser( "dollar_quote", LiteralSegment, type="quoted_literal", ), ), LikeGrammar=OneOf( # https://docs.snowflake.com/en/sql-reference/functions/like.html Sequence("LIKE", OneOf("ALL", "ANY", optional=True)), "RLIKE", Sequence("ILIKE", Ref.keyword("ANY", optional=True)), "REGEXP", ), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", "FETCH", "OFFSET", Ref("SetOperatorSegment"), ), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", "FETCH", "OFFSET", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", "FETCH", "OFFSET", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", "FETCH", "OFFSET", "MEASURES", ), TrimParametersGrammar=Nothing(), GroupByClauseTerminatorGrammar=OneOf( "ORDER", "LIMIT", "FETCH", "OFFSET", "HAVING", "QUALIFY", "WINDOW" ), HavingClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "QUALIFY", "WINDOW", "FETCH", "OFFSET", ), ) # Add all Snowflake keywords snowflake_dialect.sets("unreserved_keywords").clear() snowflake_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", snowflake_unreserved_keywords ) snowflake_dialect.sets("reserved_keywords").clear() snowflake_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", snowflake_reserved_keywords ) # Add datetime units and their aliases from # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts snowflake_dialect.sets("datetime_units").clear() snowflake_dialect.sets("datetime_units").update( [ "YEAR", "Y", "YY", "YYY", "YYYY", "YR", "YEARS", "YRS", "MONTH", "MM", "MON", "MONS", "MONTHS", "DAY", "D", "DD", "DAYS", "DAYOFMONTH", "DAYOFWEEK", "WEEKDAY", "DOW", "DW", "DAYOFWEEKISO", "WEEKDAY_ISO", "DOW_ISO", "DW_ISO", "DAYOFYEAR", "YEARDAY", "DOY", "DY", "WEEK", "W", "WK", "WEEKOFYEAR", "WOY", "WY", "WEEKISO", "WEEK_ISO", "WEEKOFYEARISO", "WEEKOFYEAR_ISO", "QUARTER", "Q", "QTR", "QTRS", "QUARTERS", "YEAROFWEEK", "YEAROFWEEKISO", "HOUR", "H", "HH", "HR", "HOURS", "HRS", "MINUTE", "M", "MI", "MIN", "MINUTES", "MINS", "SECOND", "S", "SEC", "SECONDS", "SECS", "MILLISECOND", "MS", "MSEC", "MILLISECONDS", "MICROSECOND", "US", "USEC", "MICROSECONDS", "NANOSECOND", "NS", "NSEC", "NANOSEC", "NSECOND", "NANOSECONDS", "NANOSECS", "NSECONDS", "EPOCH_SECOND", "EPOCH", "EPOCH_SECONDS", "EPOCH_MILLISECOND", "EPOCH_MILLISECONDS", "EPOCH_MICROSECOND", "EPOCH_MICROSECONDS", "EPOCH_NANOSECOND", "EPOCH_NANOSECONDS", "TIMEZONE_HOUR", "TZH", "TIMEZONE_MINUTE", "TZM", ] ) class FunctionNameSegment(ansi.FunctionNameSegment): """Function name, including any prefix bits, e.g. project or schema. Overriding FunctionNameSegment to support Snowflake's IDENTIFIER pseudo-function. """ type = "function_name" match_grammar: Matchable = Sequence( # Project name, schema identifier, etc. AnyNumberOf( Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), ), terminators=[Ref("BracketedSegment")], ), # Base function name OneOf( Ref("FunctionNameIdentifierSegment"), Ref("QuotedIdentifierSegment"), # Snowflake's IDENTIFIER pseudo-function # https://docs.snowflake.com/en/sql-reference/identifier-literal.html Sequence( "IDENTIFIER", Bracketed( OneOf( Ref("SingleQuotedIdentifierSegment"), Ref("ReferencedVariableNameSegment"), ), ), ), ), allow_gaps=False, ) class ConnectByClauseSegment(BaseSegment): """A `CONNECT BY` clause. https://docs.snowflake.com/en/sql-reference/constructs/connect-by.html """ type = "connectby_clause" match_grammar = Sequence( "START", "WITH", Ref("ExpressionSegment"), "CONNECT", "BY", Delimited( Sequence( Ref.keyword("PRIOR", optional=True), Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), Ref.keyword("PRIOR", optional=True), Ref("ColumnReferenceSegment"), ), ), ) class GroupByClauseSegment(ansi.GroupByClauseSegment): """A `GROUP BY` clause like in `SELECT`. Snowflake supports Cube, Rollup, and Grouping Sets https://docs.snowflake.com/en/sql-reference/constructs/group-by.html """ match_grammar: Matchable = Sequence( "GROUP", "BY", Indent, OneOf( Sequence( OneOf("CUBE", "ROLLUP", Sequence("GROUPING", "SETS")), Bracketed( Ref("GroupByContentsGrammar"), ), ), "ALL", Ref("GroupByContentsGrammar"), ), Dedent, ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause like in `INSERT`.""" match_grammar = Sequence( "VALUES", Delimited( Bracketed( Delimited( # DEFAULT and NULL keywords used in # INSERT INTO statement. "DEFAULT", "NULL", Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), ), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. https://docs.snowflake.com/en/sql-reference/sql/insert.html https://docs.snowflake.com/en/sql-reference/sql/insert-multi-table.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", Ref.keyword("OVERWRITE", optional=True), OneOf( # Single table INSERT INTO. Sequence( "INTO", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("SelectableGrammar"), ), # Unconditional multi-table INSERT INTO. Sequence( "ALL", AnyNumberOf( Sequence( "INTO", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("ValuesClauseSegment", optional=True), ), min_times=1, ), Ref("SelectStatementSegment"), ), # Conditional multi-table INSERT INTO. Sequence( OneOf( "FIRST", "ALL", ), AnyNumberOf( Sequence( "WHEN", Ref("ExpressionSegment"), "THEN", AnyNumberOf( Sequence( "INTO", Ref("TableReferenceSegment"), Ref( "BracketedColumnReferenceListGrammar", optional=True ), Ref("ValuesClauseSegment", optional=True), ), min_times=1, ), ), min_times=1, ), Sequence( "ELSE", "INTO", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("ValuesClauseSegment", optional=True), optional=True, ), Ref("SelectStatementSegment"), ), ), ) class FunctionDefinitionGrammar(ansi.FunctionDefinitionGrammar): """This is the body of a `CREATE FUNCTION AS` statement.""" match_grammar = Sequence( "AS", Ref("QuotedLiteralSegment"), Sequence( "LANGUAGE", Ref("NakedIdentifierSegment"), optional=True, ), ) class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("AccessStatementSegment"), Ref("CreateStatementSegment"), Ref("CreateTaskSegment"), Ref("CreateUserSegment"), Ref("CreateCloneStatementSegment"), Ref("CreateProcedureStatementSegment"), Ref("AlterProcedureStatementSegment"), Ref("ScriptingBlockStatementSegment"), Ref("ScriptingLetStatementSegment"), Ref("ReturnStatementSegment"), Ref("ShowStatementSegment"), Ref("AlterAccountStatementSegment"), Ref("AlterUserStatementSegment"), Ref("AlterSessionStatementSegment"), Ref("AlterTaskStatementSegment"), Ref("SetAssignmentStatementSegment"), Ref("CallStoredProcedureSegment"), Ref("MergeStatementSegment"), Ref("CopyIntoTableStatementSegment"), Ref("CopyIntoLocationStatementSegment"), Ref("FormatTypeOptions"), Ref("AlterWarehouseStatementSegment"), Ref("AlterShareStatementSegment"), Ref("CreateExternalTableSegment"), Ref("AlterExternalTableStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("AlterSchemaStatementSegment"), Ref("CreateFunctionStatementSegment"), Ref("AlterFunctionStatementSegment"), Ref("CreateExternalFunctionStatementSegment"), Ref("CreateStageSegment"), Ref("AlterStageSegment"), Ref("CreateStreamStatementSegment"), Ref("AlterStreamStatementSegment"), Ref("UnsetStatementSegment"), Ref("UndropStatementSegment"), Ref("CommentStatementSegment"), Ref("CallStatementSegment"), Ref("AlterViewStatementSegment"), Ref("AlterMaterializedViewStatementSegment"), Ref("DropProcedureStatementSegment"), Ref("DropExternalTableStatementSegment"), Ref("DropMaterializedViewStatementSegment"), Ref("DropObjectStatementSegment"), Ref("CreateFileFormatSegment"), Ref("AlterFileFormatSegment"), Ref("AlterPipeSegment"), Ref("ListStatementSegment"), Ref("GetStatementSegment"), Ref("PutStatementSegment"), Ref("RemoveStatementSegment"), Ref("CreateDatabaseFromShareStatementSegment"), Ref("AlterRoleStatementSegment"), Ref("AlterStorageIntegrationSegment"), Ref("ExecuteImmediateClauseSegment"), Ref("ExecuteTaskClauseSegment"), Ref("CreateResourceMonitorStatementSegment"), Ref("AlterResourceMonitorStatementSegment"), Ref("CreateSequenceStatementSegment"), Ref("AlterSequenceStatementSegment"), Ref("AlterDatabaseSegment"), Ref("AlterMaskingPolicySegment"), ], remove=[ Ref("CreateIndexStatementSegment"), Ref("DropIndexStatementSegment"), ], ) class SetAssignmentStatementSegment(BaseSegment): """A `SET` statement. https://docs.snowflake.com/en/sql-reference/sql/set.html """ type = "set_statement" match_grammar = OneOf( Sequence( "SET", Ref("LocalVariableNameSegment"), Ref("EqualsSegment"), Ref("ExpressionSegment"), ), Sequence( "SET", Bracketed(Delimited(Ref("LocalVariableNameSegment"))), Ref("EqualsSegment"), Bracketed( Delimited( Ref("ExpressionSegment"), ), ), ), ) class CallStoredProcedureSegment(BaseSegment): """This is a CALL statement used to execute a stored procedure. https://docs.snowflake.com/en/sql-reference/sql/call.html """ type = "call_segment" match_grammar = Sequence( "CALL", Ref("FunctionSegment"), ) class WithinGroupClauseSegment(BaseSegment): """An WITHIN GROUP clause for window functions. https://docs.snowflake.com/en/sql-reference/functions/listagg.html. https://docs.snowflake.com/en/sql-reference/functions/array_agg.html. """ type = "withingroup_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed( Ref("OrderByClauseSegment", optional=True), parse_mode=ParseMode.GREEDY ), ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """A table expression.""" type = "from_expression_element" match_grammar = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("SamplingExpressionSegment"), Ref("ChangesClauseSegment"), Ref("JoinLikeClauseGrammar"), "CROSS", ), optional=True, ), # https://cloud.google.com/bigquery/docs/reference/standard-sql/arrays#flattening_arrays Sequence("WITH", "OFFSET", Ref("AliasExpressionSegment"), optional=True), Ref("SamplingExpressionSegment", optional=True), Ref("PostTableExpressionGrammar", optional=True), ) class PatternSegment(BaseSegment): """A `PATTERN` expression. https://docs.snowflake.com/en/sql-reference/constructs/match_recognize.html """ type = "pattern_expression" match_grammar = Sequence( # https://docs.snowflake.com/en/sql-reference/constructs/match_recognize.html#pattern-specifying-the-pattern-to-match Ref("CaretSegment", optional=True), OneOf( AnyNumberOf( Ref("PatternOperatorGrammar"), ), Delimited( Ref("PatternOperatorGrammar"), delimiter=Ref("BitwiseOrSegment"), ), ), Ref("DollarSegment", optional=True), ) class MatchRecognizeClauseSegment(BaseSegment): """A `MATCH_RECOGNIZE` clause. https://docs.snowflake.com/en/sql-reference/constructs/match_recognize.html """ type = "match_recognize_clause" match_grammar = Sequence( "MATCH_RECOGNIZE", Bracketed( Ref("PartitionClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Sequence( "MEASURES", Delimited( Sequence( # The edges of the window frame can be specified # by using either RUNNING or FINAL semantics. # https://docs.snowflake.com/en/sql-reference/constructs/match_recognize.html#expressions-in-define-and-measures-clauses OneOf( "FINAL", "RUNNING", optional=True, ), Ref("ExpressionSegment"), Ref("AliasExpressionSegment"), ), ), optional=True, ), OneOf( Sequence( "ONE", "ROW", "PER", "MATCH", ), Sequence( "ALL", "ROWS", "PER", "MATCH", OneOf( Sequence( "SHOW", "EMPTY", "MATCHES", ), Sequence( "OMIT", "EMPTY", "MATCHES", ), Sequence( "WITH", "UNMATCHED", "ROWS", ), optional=True, ), ), optional=True, ), Sequence( "AFTER", "MATCH", "SKIP", OneOf( Sequence( "PAST", "LAST", "ROW", ), Sequence( "TO", "NEXT", "ROW", ), Sequence( "TO", OneOf("FIRST", "LAST", optional=True), Ref("SingleIdentifierGrammar"), ), ), optional=True, ), "PATTERN", Bracketed( Ref("PatternSegment"), ), "DEFINE", Delimited( Sequence( Ref("SingleIdentifierGrammar"), "AS", Ref("ExpressionSegment"), ), ), ), ) class ChangesClauseSegment(BaseSegment): """A `CHANGES` clause. https://docs.snowflake.com/en/sql-reference/constructs/changes.html """ type = "changes_clause" match_grammar = Sequence( "CHANGES", Bracketed( "INFORMATION", Ref("ParameterAssignerSegment"), OneOf("DEFAULT", "APPEND_ONLY"), ), OneOf( Sequence( "AT", Bracketed( OneOf("TIMESTAMP", "OFFSET", "STATEMENT"), Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), ), ), Sequence( "BEFORE", Bracketed( "STATEMENT", Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), ), ), ), Sequence( "END", Bracketed( OneOf("TIMESTAMP", "OFFSET", "STATEMENT"), Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), ), optional=True, ), ) class FromAtExpressionSegment(BaseSegment): """An AT expression.""" type = "from_at_expression" match_grammar = Sequence( "AT", Bracketed( OneOf("TIMESTAMP", "OFFSET", "STATEMENT"), Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), ), ) class FromBeforeExpressionSegment(BaseSegment): """A BEFORE expression.""" type = "from_before_expression" match_grammar = Sequence( "BEFORE", Bracketed( OneOf("TIMESTAMP", "OFFSET", "STATEMENT"), Ref("ParameterAssignerSegment"), Ref("ExpressionSegment"), parse_mode=ParseMode.GREEDY, ), ) class FromPivotExpressionSegment(BaseSegment): """A PIVOT expression.""" type = "from_pivot_expression" match_grammar = Sequence( "PIVOT", Bracketed( Ref("FunctionSegment"), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed(Delimited(Ref("LiteralGrammar"))), ), ) class FromUnpivotExpressionSegment(BaseSegment): """An UNPIVOT expression.""" type = "from_unpivot_expression" match_grammar = Sequence( "UNPIVOT", Bracketed( Ref("SingleIdentifierGrammar"), "FOR", Ref("SingleIdentifierGrammar"), "IN", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), ), ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """A sampling expression.""" match_grammar = Sequence( OneOf("SAMPLE", "TABLESAMPLE"), OneOf("BERNOULLI", "ROW", "SYSTEM", "BLOCK", optional=True), Bracketed( OneOf(Ref("NumericLiteralSegment"), Ref("ReferencedVariableNameSegment")), Ref.keyword("ROWS", optional=True), ), Sequence( OneOf("REPEATABLE", "SEED"), Bracketed(Ref("NumericLiteralSegment")), optional=True, ), ) class NamedParameterExpressionSegment(BaseSegment): """A keyword expression. e.g. 'input => custom_fields' """ type = "snowflake_keyword_expression" match_grammar = Sequence( Ref("ParameterNameSegment"), Ref("ParameterAssignerSegment"), OneOf( Ref("LiteralGrammar"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ), ) class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment. https://docs.snowflake.com/en/user-guide/semistructured-considerations.html """ type = "semi_structured_expression" match_grammar = Sequence( OneOf( # If a field is already a VARIANT, this could # be initiated by a colon or a dot. This is particularly # useful when a field is an ARRAY of objects. Ref("DotSegment"), Ref("ColonSegment"), ), OneOf( Ref("NakedSemiStructuredElementSegment"), Ref("QuotedSemiStructuredElementSegment"), ), Ref("ArrayAccessorSegment", optional=True), AnyNumberOf( Sequence( OneOf( # Can be delimited by dots or colons Ref("DotSegment"), Ref("ColonSegment"), ), OneOf( Ref("NakedSemiStructuredElementSegment"), Ref("QuotedSemiStructuredElementSegment"), ), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, ), allow_gaps=True, ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`. https://docs.snowflake.com/en/sql-reference/constructs/qualify.html """ type = "qualify_clause" match_grammar = Sequence( "QUALIFY", Indent, OneOf( Bracketed( Ref("ExpressionSegment"), ), Ref("ExpressionSegment"), ), Dedent, ) class SelectStatementSegment(ansi.SelectStatementSegment): """A snowflake `SELECT` statement including optional Qualify. https://docs.snowflake.com/en/sql-reference/constructs/qualify.html """ type = "select_statement" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), ) class SelectClauseElementSegment(ansi.SelectClauseElementSegment): """Inherit from ansi but also allow for Snowflake System Functions. https://docs.snowflake.com/en/sql-reference/functions-system """ match_grammar = ansi.SelectClauseElementSegment.match_grammar.copy( insert=[ Sequence( Ref("SystemFunctionName"), Bracketed(Ref("QuotedLiteralSegment")), ) ], before=Ref("WildcardExpressionSegment"), ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for Snowflake.""" match_grammar = ansi.WildcardExpressionSegment.match_grammar.copy( insert=[ # Optional Exclude or Rename clause Ref("ExcludeClauseSegment", optional=True), Ref("ReplaceClauseSegment", optional=True), Ref("RenameClauseSegment", optional=True), ] ) class ExcludeClauseSegment(BaseSegment): """A snowflake SELECT EXCLUDE clause. https://docs.snowflake.com/en/sql-reference/sql/select.html """ type = "select_exclude_clause" match_grammar = Sequence( "EXCLUDE", OneOf( Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), Ref("SingleIdentifierGrammar"), ), ) class RenameClauseSegment(BaseSegment): """A snowflake SELECT RENAME clause. https://docs.snowflake.com/en/sql-reference/sql/select.html """ type = "select_rename_clause" match_grammar = Sequence( "RENAME", OneOf( Sequence( Ref("SingleIdentifierGrammar"), "AS", Ref("SingleIdentifierGrammar"), ), Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), "AS", Ref("SingleIdentifierGrammar"), ) ) ), ), ) class ReplaceClauseSegment(BaseSegment): """A snowflake SELECT REPLACE clause. https://docs.snowflake.com/en/sql-reference/sql/select.html """ type = "select_replace_clause" match_grammar = Sequence( "REPLACE", Bracketed( Delimited( Sequence( Ref("ExpressionSegment"), "AS", Ref("SingleIdentifierGrammar"), ) ) ), ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns, specifically for Snowflake. https://docs.snowflake.com/en/sql-reference/constructs.html """ match_grammar = Sequence( OneOf("DISTINCT", "ALL", optional=True), # TOP N is unique to Snowflake, and we can optionally add DISTINCT/ALL in front # of it. Sequence("TOP", Ref("NumericLiteralSegment"), optional=True), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """An `ALTER TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-table.html If possible, please keep the order below the same as Snowflake's doc: """ match_grammar = Sequence( "ALTER", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( # Rename Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), # Swap With Sequence( "SWAP", "WITH", Ref("TableReferenceSegment"), ), # searchOptimizationAction # N.B. Since SEARCH and OPTIMIZATION are unreserved keywords # we move this above AlterTableTableColumnActionSegment # in order to avoid matching these as columns. Sequence( OneOf( "ADD", "DROP", ), "SEARCH", "OPTIMIZATION", ), Ref("AlterTableClusteringActionSegment"), Ref("AlterTableConstraintActionSegment"), # @TODO: constraintAction # @TODO: extTableColumnAction # SET Table options # @TODO: Restrict the list of parameters supported per Snowflake doc. Sequence( Ref.keyword("SET"), OneOf( Ref("ParameterNameSegment"), Ref.keyword("COMMENT"), ), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), ), # @TODO: add more constraint actions Sequence( "DROP", Ref("PrimaryKeyGrammar"), ), Sequence( "ADD", Ref("PrimaryKeyGrammar"), Bracketed(Delimited(Ref("ColumnReferenceSegment"), optional=True)), ), Ref("AlterTableTableColumnActionSegment"), # @TODO: Set/unset TAG # @TODO: Unset table options # @TODO: Add/drop row access policies ), ) class AlterTableTableColumnActionSegment(BaseSegment): """ALTER TABLE `tableColumnAction` per defined in Snowflake's grammar. https://docs.snowflake.com/en/sql-reference/sql/alter-table.html https://docs.snowflake.com/en/sql-reference/sql/alter-table-column.html If possible, please match the order of this sequence with what's defined in Snowflake's tableColumnAction grammar. """ type = "alter_table_table_column_action" match_grammar = OneOf( # Add Column Sequence( "ADD", Ref.keyword("COLUMN", optional=True), # Handle Multiple Columns Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), OneOf( # Default Sequence( "DEFAULT", Ref("ExpressionSegment"), ), # Auto-increment/identity column Sequence( OneOf( "AUTOINCREMENT", "IDENTITY", ), OneOf( # ( , ) Bracketed( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), # START INCREMENT Sequence( "START", Ref("NumericLiteralSegment"), "INCREMENT", Ref("NumericLiteralSegment"), ), optional=True, ), ), optional=True, ), # @TODO: Add support for `inlineConstraint` Sequence( Ref.keyword("WITH", optional=True), "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ), ), optional=True, ), optional=True, ), Ref("CommentClauseSegment", optional=True), ), ), ), # Rename column Sequence( "RENAME", "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), # Alter/Modify column(s) Sequence( OneOf("ALTER", "MODIFY"), OptionallyBracketed( Delimited( OneOf( # Add things Sequence( Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence("DROP", "DEFAULT"), Sequence( "SET", "DEFAULT", Ref("NakedIdentifierSegment"), Ref("DotSegment"), "NEXTVAL", ), Sequence( OneOf("SET", "DROP", optional=True), "NOT", "NULL", ), Sequence( Sequence( Sequence("SET", "DATA", optional=True), "TYPE", optional=True, ), Ref("DatatypeSegment"), ), Ref("CommentClauseSegment"), ), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "SET", "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ), ), optional=True, ), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "UNSET", "MASKING", "POLICY", ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "SET", "TAG", Ref("TagReferenceSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "UNSET", "TAG", Ref("TagReferenceSegment"), ), ), ), ), ), # Drop column Sequence( "DROP", Ref.keyword("COLUMN", optional=True), Delimited(Ref("ColumnReferenceSegment")), ), # @TODO: Drop columns # vvvvv COPIED FROM ANSI vvvvv # @TODO: Removed these once `tableColumnAction` is properly supported. Sequence( OneOf("ADD", "MODIFY"), Ref.keyword("COLUMN", optional=True), Ref("ColumnDefinitionSegment"), OneOf( Sequence(OneOf("FIRST", "AFTER"), Ref("ColumnReferenceSegment")), # Bracketed Version of the same Ref("BracketedColumnReferenceListGrammar"), optional=True, ), ), ) class AlterTableClusteringActionSegment(BaseSegment): """ALTER TABLE `clusteringAction` per defined in Snowflake's grammar. https://docs.snowflake.com/en/sql-reference/sql/alter-table.html#clustering-actions-clusteringaction """ type = "alter_table_clustering_action" match_grammar = OneOf( Sequence( "CLUSTER", "BY", OneOf( Ref("FunctionSegment"), Bracketed(Delimited(Ref("ExpressionSegment"))), ), ), # N.B. RECLUSTER is deprecated: # https://docs.snowflake.com/en/user-guide/tables-clustering-manual.html Sequence( "RECLUSTER", Sequence( "MAX_SIZE", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Ref("WhereClauseSegment", optional=True), ), Sequence( OneOf( "SUSPEND", "RESUME", ), "RECLUSTER", ), Sequence( "DROP", "CLUSTERING", "KEY", ), ) class AlterTableConstraintActionSegment(BaseSegment): """ALTER TABLE `constraintAction` per defined in Snowflake's grammar. https://docs.snowflake.com/en/sql-reference/sql/alter-table.html#constraint-actions-constraintaction """ type = "alter_table_constraint_action" match_grammar = OneOf( # Add Column Sequence( "ADD", Sequence( "CONSTRAINT", OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), ), optional=True, ), OneOf( Sequence( Ref("PrimaryKeyGrammar"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), ), Sequence( Sequence( Ref("ForeignKeyGrammar"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ) ), ), "REFERENCES", Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), ), Sequence( "UNIQUE", Bracketed(Ref("ColumnReferenceSegment"), optional=True) ), ), ), Sequence( "DROP", Sequence("CONSTRAINT", Ref("NakedIdentifierSegment"), optional=True), OneOf( Ref("PrimaryKeyGrammar"), Ref("ForeignKeyGrammar"), "UNIQUE", ), Delimited(Ref("ColumnReferenceSegment")), ), Sequence( "RENAME", "CONSTRAINT", Ref("NakedIdentifierSegment"), "TO", Ref("NakedIdentifierSegment"), ), ) class AlterWarehouseStatementSegment(BaseSegment): """An `ALTER WAREHOUSE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-warehouse.html """ type = "alter_warehouse_statement" match_grammar = Sequence( "ALTER", "WAREHOUSE", Sequence("IF", "EXISTS", optional=True), OneOf( Sequence( Ref("ObjectReferenceSegment", optional=True), OneOf( "SUSPEND", Sequence( "RESUME", Sequence("IF", "SUSPENDED", optional=True), ), ), ), Sequence( Ref("ObjectReferenceSegment", optional=True), Sequence( "ABORT", "ALL", "QUERIES", ), ), Sequence( Ref("ObjectReferenceSegment"), "RENAME", "TO", Ref("ObjectReferenceSegment"), ), Sequence( Ref("ObjectReferenceSegment", optional=True), "SET", OneOf( AnyNumberOf( Ref("CommaSegment", optional=True), Ref("WarehouseObjectPropertiesSegment"), Ref("CommentEqualsClauseSegment"), Ref("WarehouseObjectParamsSegment"), ), Ref("TagEqualsSegment"), ), ), Sequence( Ref("ObjectReferenceSegment"), "UNSET", OneOf( Delimited(Ref("NakedIdentifierSegment")), Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), ), ), ), ) class AlterShareStatementSegment(BaseSegment): """An `ALTER SHARE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-share.html """ type = "alter_share_statement" match_grammar = Sequence( "ALTER", "SHARE", Sequence("IF", "EXISTS", optional=True), Ref("NakedIdentifierSegment"), OneOf( Sequence( OneOf( "ADD", "REMOVE", ), "ACCOUNTS", Ref("EqualsSegment"), Delimited(Ref("NakedIdentifierSegment")), Sequence( "SHARE_RESTRICTIONS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), ), Sequence( "SET", "ACCOUNTS", Ref("EqualsSegment"), Delimited(Ref("NakedIdentifierSegment")), Ref("CommentEqualsClauseSegment", optional=True), ), Sequence( "SET", Ref("TagEqualsSegment"), ), Sequence( "UNSET", "TAG", Ref("TagReferenceSegment"), AnyNumberOf( Ref("CommaSegment"), Ref("TagReferenceSegment"), optional=True ), ), Sequence("UNSET", "COMMENT"), ), ) class AlterStorageIntegrationSegment(BaseSegment): """An `ALTER STORAGE INTEGRATION` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-storage-integration """ type = "alter_storage_integration_statement" match_grammar = Sequence( "ALTER", Ref.keyword("STORAGE", optional=True), "INTEGRATION", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", OneOf( Ref("TagEqualsSegment", optional=True), AnySetOf( Sequence( "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), Sequence( "ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), OneOf( AnySetOf( Sequence( "STORAGE_AWS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "STORAGE_AWS_OBJECT_ACL", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), AnySetOf( Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), Sequence( "STORAGE_ALLOWED_LOCATIONS", Ref("EqualsSegment"), OneOf( Bracketed( Delimited( OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) ) ), Bracketed( Ref("QuotedStarSegment"), ), ), ), Sequence( "STORAGE_BLOCKED_LOCATIONS", Ref("EqualsSegment"), Bracketed( Delimited( OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) ) ), ), ), ), ), Sequence( "UNSET", OneOf( Sequence( "TAG", Delimited(Ref("TagReferenceSegment")), optional=True ), "COMMENT", "ENABLED", "STORAGE_BLOCKED_LOCATIONS", ), ), ), ) class AlterExternalTableStatementSegment(BaseSegment): """An `ALTER EXTERNAL TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-external-table.html """ type = "alter_external_table_statement" match_grammar = Sequence( "ALTER", "EXTERNAL", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence("REFRESH", Ref("QuotedLiteralSegment", optional=True)), Sequence( OneOf("ADD", "REMOVE"), "FILES", Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "SET", Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Ref("TagEqualsSegment", optional=True), ), Sequence("UNSET", Ref("TagEqualsSegment")), Sequence("DROP", "PARTITION", "LOCATION", Ref("QuotedLiteralSegment")), Sequence( "ADD", "PARTITION", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), "LOCATION", Ref("QuotedLiteralSegment"), ), ), ) class CommentEqualsClauseSegment(BaseSegment): """A comment clause. e.g. COMMENT = 'view/table description' """ type = "comment_equals_clause" match_grammar = Sequence( "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ) class TagBracketedEqualsSegment(BaseSegment): """A tag clause. e.g. TAG (tag1 = 'value1', tag2 = 'value2') """ type = "tag_bracketed_equals" match_grammar = Sequence( Sequence("WITH", optional=True), "TAG", Bracketed( Delimited( Sequence( Ref("TagReferenceSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) ), ), ) class TagEqualsSegment(BaseSegment): """A tag clause. e.g. TAG tag1 = 'value1', tag2 = 'value2' """ type = "tag_equals" match_grammar = Sequence( "TAG", Delimited( Sequence( Ref("TagReferenceSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ) ), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A snowflake unordered `SELECT` statement including optional Qualify. https://docs.snowflake.com/en/sql-reference/constructs/qualify.html """ type = "select_statement" match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OverlapsClauseSegment", optional=True), ) class AccessStatementSegment(BaseSegment): """A `GRANT` or `REVOKE` statement. Grant specific information: * https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Revoke specific information: * https://docs.snowflake.com/en/sql-reference/sql/revoke-role.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege.html * https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege-share.html """ type = "access_statement" # Privileges that can be set on the account (specific to snowflake) _global_permissions = OneOf( Sequence( "CREATE", OneOf( "ACCOUNT", "ROLE", "USER", "WAREHOUSE", "DATABASE", "INTEGRATION", "SHARE", Sequence("DATA", "EXCHANGE", "LISTING"), Sequence("NETWORK", "POLICY"), ), ), Sequence("APPLY", "MASKING", "POLICY"), Sequence("APPLY", "ROW", "ACCESS", "POLICY"), Sequence("APPLY", "SESSION", "POLICY"), Sequence("APPLY", "TAG"), Sequence("ATTACH", "POLICY"), Sequence("EXECUTE", "TASK"), Sequence("IMPORT", "SHARE"), Sequence( "MANAGE", OneOf( "GRANTS", Sequence(OneOf("ACCOUNT", "ORGANIZATION", "USER"), "SUPPORT", "CASES"), ), ), Sequence("MONITOR", OneOf("EXECUTION", "USAGE")), Sequence("OVERRIDE", "SHARE", "RESTRICTIONS"), ) _schema_object_names = [ "TABLE", "VIEW", "STAGE", "FUNCTION", "PROCEDURE", "ROUTINE", "SEQUENCE", "STREAM", "TASK", "PIPE", ] _schema_object_types = OneOf( *_schema_object_names, Sequence("MATERIALIZED", "VIEW"), Sequence("EXTERNAL", "TABLE"), Sequence(OneOf("TEMP", "TEMPORARY"), "TABLE"), Sequence("FILE", "FORMAT"), Sequence("SESSION", "POLICY"), Sequence("MASKING", "POLICY"), Sequence("ROW", "ACCESS", "POLICY"), ) # We reuse the object names above and simply append an `S` to the end of them to get # plurals _schema_object_types_plural = OneOf( *[f"{object_name}S" for object_name in _schema_object_names] ) _permissions = Sequence( OneOf( Sequence( "CREATE", OneOf( "SCHEMA", # Sequence("MASKING", "POLICY"), _schema_object_types, ), ), Sequence("IMPORTED", "PRIVILEGES"), "APPLY", "CONNECT", "CREATE", "DELETE", "EXECUTE", "INSERT", "MODIFY", "MONITOR", "OPERATE", "OWNERSHIP", "READ", "REFERENCE_USAGE", "REFERENCES", "SELECT", "TEMP", "TEMPORARY", "TRIGGER", "TRUNCATE", "UPDATE", "USAGE", "USE_ANY_ROLE", "WRITE", Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), Ref("BracketedColumnReferenceListGrammar", optional=True), ) # All of the object types that we can grant permissions on. _objects = OneOf( "ACCOUNT", Sequence( OneOf( Sequence("RESOURCE", "MONITOR"), "WAREHOUSE", "DATABASE", "DOMAIN", "INTEGRATION", "SCHEMA", "ROLE", Sequence("ALL", "SCHEMAS", "IN", "DATABASE"), Sequence("FUTURE", "SCHEMAS", "IN", "DATABASE"), _schema_object_types, Sequence( "ALL", OneOf( _schema_object_types_plural, Sequence("MATERIALIZED", "VIEWS"), Sequence("EXTERNAL", "TABLES"), Sequence("FILE", "FORMATS"), ), "IN", OneOf("SCHEMA", "DATABASE"), ), Sequence( "FUTURE", OneOf( _schema_object_types_plural, Sequence("MATERIALIZED", "VIEWS"), Sequence("EXTERNAL", "TABLES"), Sequence("FILE", "FORMATS"), ), "IN", OneOf("DATABASE", "SCHEMA"), ), optional=True, ), Delimited( Ref("ObjectReferenceSegment"), Sequence( Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar", optional=True), ), terminators=["TO", "FROM"], ), ), ) match_grammar: Matchable = OneOf( # https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Sequence( "GRANT", OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), "ON", _objects, ), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")), # In the case where a role is granted non-explicitly, # e.g. GRANT ROLE_NAME TO OTHER_ROLE_NAME # See https://docs.snowflake.com/en/sql-reference/sql/grant-role.html Ref("ObjectReferenceSegment"), ), "TO", OneOf("USER", "ROLE", "SHARE", optional=True), Delimited( OneOf(Ref("RoleReferenceSegment"), Ref("FunctionSegment"), "PUBLIC"), ), OneOf( Sequence("WITH", "GRANT", "OPTION"), Sequence("WITH", "ADMIN", "OPTION"), Sequence(OneOf("REVOKE", "COPY"), "CURRENT", "GRANTS"), optional=True, ), Sequence( "GRANTED", "BY", OneOf( "CURRENT_USER", "SESSION_USER", Ref("ObjectReferenceSegment"), ), optional=True, ), ), # https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege.html Sequence( "REVOKE", Sequence("GRANT", "OPTION", "FOR", optional=True), OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), "ON", _objects, ), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")), ), "FROM", OneOf("USER", "ROLE", "SHARE", optional=True), Delimited( Ref("ObjectReferenceSegment"), ), Ref("DropBehaviorGrammar", optional=True), ), ) class CreateCloneStatementSegment(BaseSegment): """A snowflake `CREATE ... CLONE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-clone.html """ type = "create_clone_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), OneOf( "DATABASE", "SCHEMA", "TABLE", "SEQUENCE", Sequence("FILE", "FORMAT"), "STAGE", "STREAM", "TASK", ), Sequence("IF", "NOT", "EXISTS", optional=True), Ref("ObjectReferenceSegment"), "CLONE", Ref("ObjectReferenceSegment"), OneOf( Ref("FromAtExpressionSegment"), Ref("FromBeforeExpressionSegment"), optional=True, ), ) class CreateDatabaseFromShareStatementSegment(BaseSegment): """A snowflake `CREATE ... DATABASE FROM SHARE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-database.html """ type = "create_database_from_share_statement" match_grammar = Sequence( "CREATE", "DATABASE", Ref("ObjectReferenceSegment"), Sequence("FROM", "SHARE"), Ref("ObjectReferenceSegment"), ) class CreateProcedureStatementSegment(BaseSegment): """A snowflake `CREATE ... PROCEDURE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-procedure.html """ type = "create_procedure_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Sequence("SECURE", optional=True), "PROCEDURE", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), Sequence("COPY", "GRANTS", optional=True), "RETURNS", OneOf( Ref("DatatypeSegment"), Sequence( "TABLE", Bracketed(Delimited(Ref("ColumnDefinitionSegment"), optional=True)), ), ), AnySetOf( Sequence("NOT", "NULL", optional=True), Sequence( "LANGUAGE", OneOf( "JAVA", "JAVASCRIPT", "PYTHON", "SCALA", "SQL", ), optional=True, ), OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", optional=True, ), OneOf("VOLATILE", "IMMUTABLE", optional=True), Sequence( "RUNTIME_VERSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), Sequence( "IMPORTS", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), optional=True, ), Sequence( "PACKAGES", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), optional=True, ), Sequence( "HANDLER", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "TARGET_PATH", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence("EXECUTE", "AS", OneOf("CALLER", "OWNER"), optional=True), optional=True, ), "AS", OneOf( # Either a foreign programming language UDF... Ref("DoubleQuotedUDFBody"), Ref("SingleQuotedUDFBody"), Ref("DollarQuotedUDFBody"), # ...or a SQL UDF Ref("ScriptingBlockStatementSegment"), ), ) class AlterProcedureStatementSegment(BaseSegment): """A snowflake `ALTER ... PROCEDURE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-procedure.html """ type = "alter_procedure_statement" match_grammar = Sequence( "ALTER", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), OneOf( Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence("EXECUTE", "AS", OneOf("CALLER", "OWNER")), Sequence( "SET", OneOf(Ref("TagEqualsSegment"), Ref("CommentEqualsClauseSegment")) ), Sequence( "UNSET", OneOf( Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), "COMMENT" ), ), ), ) class ReturnStatementSegment(BaseSegment): """A snowflake `RETURN` statement for SQL scripting. https://docs.snowflake.com/en/sql-reference/snowflake-scripting/return """ type = "return_statement" match_grammar = Sequence( "RETURN", Ref("ExpressionSegment"), ) class ScriptingBlockStatementSegment(BaseSegment): """A snowflake `BEGIN ... END` statement for SQL scripting. https://docs.snowflake.com/en/sql-reference/snowflake-scripting/begin """ type = "scripting_block_statement" match_grammar = OneOf( Sequence( "BEGIN", Delimited( Ref("StatementSegment"), ), ), Sequence("END"), ) class ScriptingLetStatementSegment(BaseSegment): """A snowflake `LET` statement for SQL scripting. https://docs.snowflake.com/en/sql-reference/snowflake-scripting/let https://docs.snowflake.com/en/developer-guide/snowflake-scripting/variables """ type = "scripting_let_statement" match_grammar = OneOf( # Initial declaration and assignment Sequence( "LET", Ref("LocalVariableNameSegment"), OneOf( # Variable assigment OneOf( Sequence( Ref("DatatypeSegment"), OneOf("DEFAULT", Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ), Sequence( OneOf("DEFAULT", Ref("WalrusOperatorSegment")), Ref("ExpressionSegment"), ), ), # Cursor assignment Sequence( "CURSOR", "FOR", OneOf(Ref("LocalVariableNameSegment"), Ref("SelectableGrammar")), ), # Resultset assignment Sequence( "RESULTSET", Ref("WalrusOperatorSegment"), Bracketed(Ref("SelectableGrammar")), ), ), ), # Subsequent assignment, see # https://docs.snowflake.com/en/developer-guide/snowflake-scripting/variables Sequence( Ref("LocalVariableNameSegment"), Ref("WalrusOperatorSegment"), OneOf( # Variable reassigment Ref("ExpressionSegment"), # Cursors cannot be reassigned # no code # Resultset reassigment Bracketed(Ref("SelectableGrammar")), ), ), ) class CreateFunctionStatementSegment(BaseSegment): """A snowflake `CREATE ... FUNCTION` statement for SQL and JavaScript functions. https://docs.snowflake.com/en/sql-reference/sql/create-function.html """ type = "create_function_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Sequence("SECURE", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), "RETURNS", OneOf( Ref("DatatypeSegment"), Sequence("TABLE", Bracketed(Delimited(Ref("ColumnDefinitionSegment")))), ), AnySetOf( Sequence("NOT", "NULL", optional=True), Sequence( "LANGUAGE", OneOf("JAVASCRIPT", "SQL", "PYTHON", "JAVA", "SCALA"), optional=True, ), OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", optional=True, ), OneOf("VOLATILE", "IMMUTABLE", optional=True), Sequence( "RUNTIME_VERSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), Sequence( "IMPORTS", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), optional=True, ), Sequence( "PACKAGES", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), optional=True, ), Sequence( "HANDLER", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "TARGET_PATH", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), optional=True, ), Sequence( "AS", OneOf( # Either a foreign programming language UDF... Ref("DoubleQuotedUDFBody"), Ref("SingleQuotedUDFBody"), Ref("DollarQuotedUDFBody"), # ...or a SQL UDF Ref("ScriptingBlockStatementSegment"), ), optional=True, ), ) class AlterFunctionStatementSegment(BaseSegment): """A snowflake `ALTER ... FUNCTION` and `ALTER ... EXTERNAL FUNCTION` statements. NOTE: `ALTER ... EXTERNAL FUNCTION` statements always use the `ALTER ... FUNCTION` syntax. https://docs.snowflake.com/en/sql-reference/sql/alter-function.html https://docs.snowflake.com/en/sql-reference/sql/alter-external-function.html """ type = "alter_function_statement" match_grammar = Sequence( "ALTER", "FUNCTION", Sequence("IF", "EXISTS", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), OneOf( Sequence("RENAME", "TO", Ref("FunctionNameSegment")), Sequence( "SET", OneOf( Ref("CommentEqualsClauseSegment"), Sequence( "API_INTEGRATION", Ref("EqualsSegment"), Ref("SingleIdentifierGrammar"), ), Sequence( "HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Sequence( Ref("SingleQuotedIdentifierSegment"), Ref("EqualsSegment"), Ref("SingleQuotedIdentifierSegment"), ), ), ), ), Sequence( "CONTEXT_HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("ContextHeadersGrammar"), ), ), ), Sequence( "MAX_BATCH_ROWS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), "SECURE", Sequence( OneOf("REQUEST_TRANSLATOR", "RESPONSE_TRANSLATOR"), Ref("EqualsSegment"), Ref("FunctionNameSegment"), ), ), ), Sequence( "UNSET", OneOf( "COMMENT", "HEADERS", "CONTEXT_HEADERS", "MAX_BATCH_ROWS", "COMPRESSION", "SECURE", "REQUEST_TRANSLATOR", "RESPONSE_TRANSLATOR", ), ), Sequence( "RENAME", "TO", Ref("SingleIdentifierGrammar"), ), ), ) class CreateExternalFunctionStatementSegment(BaseSegment): """A snowflake `CREATE ... EXTERNAL FUNCTION` statement for API integrations. https://docs.snowflake.com/en/sql-reference/sql/create-external-function.html """ type = "create_external_function_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Sequence("SECURE", optional=True), "EXTERNAL", "FUNCTION", Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), "RETURNS", Ref("DatatypeSegment"), Sequence(Ref.keyword("NOT", optional=True), "NULL", optional=True), OneOf( Sequence("CALLED", "ON", "NULL", "INPUT"), Sequence("RETURNS", "NULL", "ON", "NULL", "INPUT"), "STRICT", optional=True, ), OneOf("VOLATILE", "IMMUTABLE", optional=True), Ref("CommentEqualsClauseSegment", optional=True), "API_INTEGRATION", Ref("EqualsSegment"), Ref("SingleIdentifierGrammar"), Sequence( "HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Sequence( Ref("SingleQuotedIdentifierSegment"), Ref("EqualsSegment"), Ref("SingleQuotedIdentifierSegment"), ), ), ), optional=True, ), Sequence( "CONTEXT_HEADERS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("ContextHeadersGrammar"), ), ), optional=True, ), Sequence( "MAX_BATCH_ROWS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), optional=True, ), Sequence( "REQUEST_TRANSLATOR", Ref("EqualsSegment"), Ref("FunctionNameSegment"), optional=True, ), Sequence( "RESPONSE_TRANSLATOR", Ref("EqualsSegment"), Ref("FunctionNameSegment"), optional=True, ), "AS", Ref("SingleQuotedIdentifierSegment"), ) class WarehouseObjectPropertiesSegment(BaseSegment): """A snowflake Warehouse Object Properties segment. https://docs.snowflake.com/en/sql-reference/sql/create-warehouse.html https://docs.snowflake.com/en/sql-reference/sql/alter-warehouse.html Note: comments are handled separately so not incorrectly marked as warehouse object. """ type = "warehouse_object_properties" match_grammar = AnySetOf( Sequence( "WAREHOUSE_TYPE", Ref("EqualsSegment"), Ref("WarehouseType"), ), Sequence( "WAREHOUSE_SIZE", Ref("EqualsSegment"), Ref("WarehouseSize"), ), Sequence( "WAIT_FOR_COMPLETION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "MAX_CLUSTER_COUNT", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "MIN_CLUSTER_COUNT", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "SCALING_POLICY", Ref("EqualsSegment"), Ref("ScalingPolicy"), ), Sequence( "AUTO_SUSPEND", Ref("EqualsSegment"), OneOf( Ref("NumericLiteralSegment"), "NULL", ), ), Sequence( "AUTO_RESUME", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "INITIALLY_SUSPENDED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "RESOURCE_MONITOR", Ref("EqualsSegment"), Ref("NakedIdentifierSegment"), ), ) class WarehouseObjectParamsSegment(BaseSegment): """A snowflake Warehouse Object Param segment. https://docs.snowflake.com/en/sql-reference/sql/create-warehouse.html https://docs.snowflake.com/en/sql-reference/sql/alter-warehouse.html """ type = "warehouse_object_properties" match_grammar = AnySetOf( Sequence( "MAX_CONCURRENCY_LEVEL", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "STATEMENT_QUEUED_TIMEOUT_IN_SECONDS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "STATEMENT_TIMEOUT_IN_SECONDS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), ) class ConstraintPropertiesSegment(BaseSegment): """CONSTRAINT clause for CREATE TABLE or ALTER TABLE command. https://docs.snowflake.com/en/sql-reference/constraints-properties.html """ type = "constraint_properties_segment" match_grammar = Sequence( Sequence("CONSTRAINT", Ref("QuotedLiteralSegment"), optional=True), OneOf( Sequence("UNIQUE", Bracketed(Ref("ColumnReferenceSegment"), optional=True)), Sequence( Ref("PrimaryKeyGrammar"), Bracketed(Ref("ColumnReferenceSegment"), optional=True), ), Sequence( Sequence( Ref("ForeignKeyGrammar"), Bracketed(Ref("ColumnReferenceSegment"), optional=True), optional=True, ), "REFERENCES", Ref("TableReferenceSegment"), Bracketed(Ref("ColumnReferenceSegment")), ), ), AnySetOf( OneOf(Sequence("NOT", optional=True), "ENFORCED"), OneOf(Sequence("NOT", optional=True), "DEFERRABLE"), OneOf("INITIALLY", OneOf("DEFERRED", "IMMEDIATE")), ), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """A column option; each CREATE TABLE column can have 0 or more. https://docs.snowflake.com/en/sql-reference/sql/create-table.html """ match_grammar = AnySetOf( Sequence("COLLATE", Ref("CollationReferenceSegment")), Sequence( "DEFAULT", Ref("ExpressionSegment"), ), Sequence( OneOf("AUTOINCREMENT", "IDENTITY"), OneOf( Bracketed(Delimited(Ref("NumericLiteralSegment"))), Sequence( "START", Ref("NumericLiteralSegment"), "INCREMENT", Ref("NumericLiteralSegment"), ), optional=True, ), ), Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL Sequence( Sequence("WITH", optional=True), "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ), ), optional=True, ), ), Ref("TagBracketedEqualsSegment", optional=True), Ref("ConstraintPropertiesSegment"), Sequence("DEFAULT", Ref("QuotedLiteralSegment")), Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( # DEFAULT "DEFAULT", OneOf( Ref("LiteralGrammar"), Ref("FunctionSegment"), # ?? Ref('IntervalExpressionSegment') ), ), Sequence( # REFERENCES reftable [ ( refcolumn) ] "REFERENCES", Ref("ColumnReferenceSegment"), # Foreign columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar", optional=True), ), ) class CopyOptionsSegment(BaseSegment): """A Snowflake CopyOptions statement. https://docs.snowflake.com/en/sql-reference/sql/create-table.html https://docs.snowflake.com/en/sql-reference/sql/copy-into-location.html https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html """ type = "copy_options" match_grammar = OneOf( AnySetOf( Sequence("ON_ERROR", Ref("EqualsSegment"), Ref("CopyOptionOnErrorSegment")), Sequence("SIZE_LIMIT", Ref("EqualsSegment"), Ref("NumericLiteralSegment")), Sequence("PURGE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "RETURN_FAILED_ONLY", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), Sequence( "MATCH_BY_COLUMN_NAME", Ref("EqualsSegment"), OneOf("CASE_SENSITIVE", "CASE_INSENSITIVE", "NONE"), ), Sequence( "ENFORCE_LENGTH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), Sequence( "TRUNCATECOLUMNS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), Sequence("FORCE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), ), AnySetOf( Sequence("OVERWRITE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence("SINGLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "MAX_FILE_SIZE", Ref("EqualsSegment"), Ref("NumericLiteralSegment") ), Sequence( "INCLUDE_QUERY_ID", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), Sequence( "DETAILED_OUTPUT", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar") ), ), ) class CreateSchemaStatementSegment(ansi.CreateSchemaStatementSegment): """A `CREATE SCHEMA` statement. https://docs.snowflake.com/en/sql-reference/sql/create-schema.html """ type = "create_schema_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "SCHEMA", Ref("IfNotExistsGrammar", optional=True), Ref("SchemaReferenceSegment"), Sequence("WITH", "MANAGED", "ACCESS", optional=True), Ref("SchemaObjectParamsSegment", optional=True), Ref("TagBracketedEqualsSegment", optional=True), ) class AlterRoleStatementSegment(BaseSegment): """An `ALTER ROLE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-role.html """ type = "alter_role_statement" match_grammar = Sequence( "ALTER", "ROLE", Ref("IfExistsGrammar", optional=True), Ref("RoleReferenceSegment"), OneOf( Sequence( "SET", OneOf( Ref("RoleReferenceSegment"), Ref("TagEqualsSegment"), Sequence( "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), ), ), Sequence( "UNSET", OneOf( Ref("RoleReferenceSegment"), Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), Sequence("COMMENT"), ), ), Sequence( "RENAME", "TO", OneOf( Ref("RoleReferenceSegment"), ), ), ), ) class CreateSequenceStatementSegment(BaseSegment): """A `CREATE SEQUENCE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-sequence """ type = "create_sequence_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), "SEQUENCE", Ref("IfNotExistsGrammar", optional=True), Ref("SequenceReferenceSegment"), Sequence("WITH", optional=True), Sequence( "START", Sequence("WITH", optional=True), Ref("EqualsSegment", optional=True), Ref("IntegerSegment"), optional=True, ), Sequence( "INCREMENT", Sequence("BY", optional=True), Ref("EqualsSegment", optional=True), Ref("IntegerSegment"), optional=True, ), OneOf("ORDER", "NOORDER", optional=True), Ref("CommentEqualsClauseSegment", optional=True), ) class AlterSequenceStatementSegment(BaseSegment): """An `ALTER SEQUENCE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-sequence """ type = "alter_sequence_statement" match_grammar = Sequence( "ALTER", "SEQUENCE", Ref("IfExistsGrammar", optional=True), Ref("SequenceReferenceSegment"), Sequence( Sequence("SET", optional=True), AnySetOf( Sequence( "INCREMENT", Sequence("BY", optional=True), Ref("EqualsSegment", optional=True), Ref("IntegerSegment"), optional=True, ), OneOf( "ORDER", "NOORDER", ), Ref("CommentEqualsClauseSegment"), ), optional=True, ), Sequence("UNSET", "COMMENT", optional=True), Sequence("RENAME", "TO", Ref("SequenceReferenceSegment"), optional=True), ) class AlterSchemaStatementSegment(BaseSegment): """An `ALTER SCHEMA` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-schema.html """ type = "alter_schema_statement" match_grammar = Sequence( "ALTER", "SCHEMA", Sequence("IF", "EXISTS", optional=True), Ref("SchemaReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("SchemaReferenceSegment"), ), Sequence( "SWAP", "WITH", Ref("SchemaReferenceSegment"), ), Sequence( "SET", OneOf(Ref("SchemaObjectParamsSegment"), Ref("TagEqualsSegment")), ), Sequence( "UNSET", OneOf( Delimited( "DATA_RETENTION_TIME_IN_DAYS", "MAX_DATA_EXTENSION_TIME_IN_DAYS", "DEFAULT_DDL_COLLATION", "COMMENT", ), Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), ), ), Sequence(OneOf("ENABLE", "DISABLE"), Sequence("MANAGED", "ACCESS")), ), ) class SchemaObjectParamsSegment(BaseSegment): """A Snowflake Schema Object Param segment. https://docs.snowflake.com/en/sql-reference/sql/create-schema.html https://docs.snowflake.com/en/sql-reference/sql/alter-schema.html """ type = "schema_object_properties" match_grammar = AnySetOf( Sequence( "DATA_RETENTION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "MAX_DATA_EXTENSION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "DEFAULT_DDL_COLLATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Ref("CommentEqualsClauseSegment"), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement. A lot more options than ANSI https://docs.snowflake.com/en/sql-reference/sql/create-table.html """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Ref("TemporaryTransientGrammar", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Columns and comment syntax: AnySetOf( Sequence( Bracketed( Delimited( Sequence( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("SingleIdentifierGrammar"), ), Ref("CommentClauseSegment", optional=True), ), ), ), optional=True, ), Sequence( "CLUSTER", "BY", OneOf( Ref("FunctionSegment"), Bracketed(Delimited(Ref("ExpressionSegment"))), ), optional=True, ), Sequence( "STAGE_FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), optional=True, ), Sequence( "STAGE_COPY_OPTIONS", Ref("EqualsSegment"), Bracketed(Ref("CopyOptionsSegment")), optional=True, ), Sequence( "DATA_RETENTION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "MAX_DATA_EXTENSION_TIME_IN_DAYS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "CHANGE_TRACKING", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "DEFAULT_DDL_COLLATION", Ref("EqualsSegment"), Ref("QuotedLiteralGrammar"), optional=True, ), Sequence( "COPY", "GRANTS", optional=True, ), Sequence( Sequence("WITH", optional=True), "ROW", "ACCESS", "POLICY", Ref("NakedIdentifierSegment"), "ON", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), optional=True, ), Ref("TagBracketedEqualsSegment", optional=True), Ref("CommentEqualsClauseSegment", optional=True), OneOf( # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), # Create clone syntax Sequence("ClONE", Ref("TableReferenceSegment")), Sequence("USING", "TEMPLATE", Ref("SelectableGrammar")), optional=True, ), ), ) class CreateTaskSegment(BaseSegment): """A snowflake `CREATE TASK` statement. https://docs.snowflake.com/en/sql-reference/sql/create-task.html """ type = "create_task_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), "TASK", Sequence("IF", "NOT", "EXISTS", optional=True), Ref("ObjectReferenceSegment"), Indent, AnyNumberOf( OneOf( Sequence( "WAREHOUSE", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE", Ref("EqualsSegment"), Ref("WarehouseSize"), ), ), Sequence( "SCHEDULE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ALLOW_OVERLAPPING_EXECUTION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "USER_TASK_TIMEOUT_MS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), ), ), Sequence( "COPY", "GRANTS", ), Ref("CommentEqualsClauseSegment"), ), Sequence( "AFTER", Ref("ObjectReferenceSegment"), optional=True, ), Dedent, Sequence( "WHEN", Indent, Ref("TaskExpressionSegment"), Dedent, optional=True, ), Sequence( Ref.keyword("AS"), Indent, Ref("StatementSegment"), Dedent, ), ) class TaskExpressionSegment(BaseSegment): """Expressions for WHEN clause in TASK. e.g. "SYSTEM$STREAM_HAS_DATA('MYSTREAM')" """ type = "snowflake_task_expression_segment" match_grammar = Sequence( Delimited( OneOf( Ref("ExpressionSegment"), Sequence( Ref("SystemFunctionName"), Bracketed(Ref("QuotedLiteralSegment")), ), ), delimiter=OneOf(Ref("BooleanBinaryOperatorGrammar")), ) ) class CreateStatementSegment(BaseSegment): """A snowflake `CREATE` statement. https://docs.snowflake.com/en/sql-reference/sql/create.html """ type = "create_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), OneOf( Sequence("NETWORK", "POLICY"), Sequence("RESOURCE", "MONITOR"), "SHARE", "ROLE", "USER", "TAG", "WAREHOUSE", Sequence("NOTIFICATION", "INTEGRATION"), Sequence("SECURITY", "INTEGRATION"), Sequence("STORAGE", "INTEGRATION"), Sequence("MATERIALIZED", "VIEW"), Sequence("MASKING", "POLICY"), "PIPE", Sequence("EXTERNAL", "FUNCTION"), # Objects that also support clone "DATABASE", "SEQUENCE", ), Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), # Next set are Notification Integration statements # https://docs.snowflake.com/en/sql-reference/sql/create-notification-integration.html AnySetOf( Sequence("TYPE", Ref("EqualsSegment"), "QUEUE"), Sequence("ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "NOTIFICATION_PROVIDER", Ref("EqualsSegment"), OneOf( "AWS_SNS", "AZURE_EVENT_GRID", "GCP_PUBSUB", "AZURE_STORAGE_QUEUE", Ref("QuotedLiteralSegment"), ), ), # AWS specific params: Sequence( "AWS_SNS_TOPIC_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "AWS_SNS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), # Azure specific params: Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), OneOf( Sequence( "AZURE_STORAGE_QUEUE_PRIMARY_URI", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "AZURE_EVENT_GRID_TOPIC_ENDPOINT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), # GCP specific params: OneOf( Sequence( "GCP_PUBSUB_SUBSCRIPTION_NAME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "GCP_PUBSUB_TOPIC_NAME", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DIRECTION", Ref("EqualsSegment"), "OUTBOUND", optional=True, ), Sequence( "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), # For tags Sequence( "ALLOWED_VALUES", Delimited( Ref("QuotedLiteralSegment"), ), ), # For network policy Sequence( "ALLOWED_IP_LIST", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), ), # For network policy Sequence( "BLOCKED_IP_LIST", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), ), ), # Next set are Storage Integration statements # https://docs.snowflake.com/en/sql-reference/sql/create-storage-integration.html AnySetOf( Sequence("TYPE", Ref("EqualsSegment"), "EXTERNAL_STAGE"), Sequence("ENABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "STORAGE_PROVIDER", Ref("EqualsSegment"), OneOf("S3", "AZURE", "GCS", Ref("QuotedLiteralSegment")), ), # Azure specific params: Sequence( "AZURE_TENANT_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), # AWS specific params: Sequence( "STORAGE_AWS_ROLE_ARN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "STORAGE_AWS_OBJECT_ACL", Ref("EqualsSegment"), StringParser("'bucket-owner-full-control'", LiteralSegment), ), Sequence( "STORAGE_ALLOWED_LOCATIONS", Ref("EqualsSegment"), OneOf( Bracketed( Delimited( OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) ) ), Bracketed( Ref("QuotedStarSegment"), ), ), ), Sequence( "STORAGE_BLOCKED_LOCATIONS", Ref("EqualsSegment"), Bracketed( Delimited( OneOf( Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) ) ), ), Sequence( "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), # Next set are Pipe statements # https://docs.snowflake.com/en/sql-reference/sql/create-pipe.html Sequence( Sequence( "AUTO_INGEST", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "ERROR_INTEGRATION", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "AWS_SNS_TOPIC", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ObjectReferenceSegment"), ), optional=True, ), optional=True, ), # Next are WAREHOUSE options # https://docs.snowflake.com/en/sql-reference/sql/create-warehouse.html Sequence( Sequence("WITH", optional=True), AnyNumberOf( Ref("WarehouseObjectPropertiesSegment"), Ref("CommentEqualsClauseSegment"), Ref("WarehouseObjectParamsSegment"), ), Ref("TagBracketedEqualsSegment", optional=True), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), Ref.keyword("AS", optional=True), OneOf( Ref("SelectStatementSegment"), Sequence( Bracketed(Ref("FunctionContentsGrammar"), optional=True), "RETURNS", Ref("DatatypeSegment"), Ref("FunctionAssignerSegment"), Ref("ExpressionSegment"), Sequence( "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), optional=True, ), Ref("CopyIntoTableStatementSegment"), optional=True, ), ) class CreateUserSegment(BaseSegment): """A snowflake `CREATE USER` statement. https://docs.snowflake.com/en/sql-reference/sql/create-user.html """ type = "create_user_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), "USER", Sequence("IF", "NOT", "EXISTS", optional=True), Ref("ObjectReferenceSegment"), Indent, AnyNumberOf( Sequence( "PASSWORD", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "LOGIN_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DISPLAY_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "FIRST_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "MIDDLE_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "LAST_NAME", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "EMAIL", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "MUST_CHANGE_PASSWORD", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "DISABLED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "DAYS_TO_EXPIRY", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "MINS_TO_UNLOCK", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "DEFAULT_WAREHOUSE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DEFAULT_NAMESPACE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DEFAULT_ROLE", Ref("EqualsSegment"), OneOf( Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "DEFAULT_SECONDARY_ROLES", Ref("EqualsSegment"), Bracketed(Ref("QuotedLiteralSegment")), ), Sequence( "MINS_TO_BYPASS_MFA", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "RSA_PUBLIC_KEY", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "RSA_PUBLIC_KEY_2", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Ref("CommentEqualsClauseSegment"), ), Dedent, ) class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement, specifically for Snowflake's dialect. https://docs.snowflake.com/en/sql-reference/sql/create-view.html """ match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), AnySetOf( "SECURE", "RECURSIVE", ), Ref("TemporaryGrammar", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), AnySetOf( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("CommentClauseSegment", optional=True), ), ), ), Sequence( Ref.keyword("WITH", optional=True), "ROW", "ACCESS", "POLICY", Ref("NakedIdentifierSegment"), "ON", Bracketed( Delimited(Ref("ColumnReferenceSegment")), ), ), Ref("TagBracketedEqualsSegment"), Sequence("COPY", "GRANTS"), Ref("CommentEqualsClauseSegment"), # @TODO: Support column-level masking policy & tagging. ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), ) class AlterViewStatementSegment(BaseSegment): """An `ALTER VIEW` statement, specifically for Snowflake's dialect. https://docs.snowflake.com/en/sql-reference/sql/alter-view.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), Sequence( "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "UNSET", "COMMENT", ), Sequence( OneOf("SET", "UNSET"), "SECURE", ), Sequence("SET", Ref("TagEqualsSegment")), Sequence("UNSET", "TAG", Delimited(Ref("TagReferenceSegment"))), Delimited( Sequence( "ADD", "ROW", "ACCESS", "POLICY", Ref("FunctionNameSegment"), "ON", Bracketed(Delimited(Ref("ColumnReferenceSegment"))), ), Sequence( "DROP", "ROW", "ACCESS", "POLICY", Ref("FunctionNameSegment"), ), ), Sequence( OneOf("ALTER", "MODIFY"), OneOf( Delimited( Sequence( Ref.keyword("COLUMN", optional=True), Ref("ColumnReferenceSegment"), OneOf( Sequence( "SET", "MASKING", "POLICY", Ref("FunctionNameSegment"), Sequence( "USING", Bracketed( Delimited(Ref("ColumnReferenceSegment")) ), optional=True, ), ), Sequence("UNSET", "MASKING", "POLICY"), Sequence("SET", Ref("TagEqualsSegment")), ), ), Sequence( "COLUMN", Ref("ColumnReferenceSegment"), "UNSET", "TAG", Delimited(Ref("TagReferenceSegment")), ), ), ), ), ), ) class AlterMaterializedViewStatementSegment(BaseSegment): """An `ALTER MATERIALIZED VIEW` statement, specifically for Snowflake's dialect. https://docs.snowflake.com/en/sql-reference/sql/alter-materialized-view.html """ type = "alter_materialized_view_statement" match_grammar = Sequence( "ALTER", "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("TableReferenceSegment")), Sequence("CLUSTER", "BY", Delimited(Ref("ExpressionSegment"))), Sequence("DROP", "CLUSTERING", "KEY"), Sequence("SUSPEND", "RECLUSTER"), Sequence("RESUME", "RECLUSTER"), "SUSPEND", "RESUME", Sequence( OneOf("SET", "UNSET"), OneOf( "SECURE", Ref("CommentEqualsClauseSegment"), Ref("TagEqualsSegment"), ), ), ), ) class CreateFileFormatSegment(BaseSegment): """A snowflake `CREATE FILE FORMAT` statement. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "create_file_format_segment" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("FILE", "FORMAT"), Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), # TYPE = is included in below parameter segments. # It is valid syntax to have TYPE = after other parameters. # Below parameters are either Delimited/AnyNumberOf. # Snowflake does allow mixed but this is not supported. # @TODO: Update below when an OptionallyDelimited Class is available. OneOf( Ref("CsvFileFormatTypeParameters"), Ref("JsonFileFormatTypeParameters"), Ref("AvroFileFormatTypeParameters"), Ref("OrcFileFormatTypeParameters"), Ref("ParquetFileFormatTypeParameters"), Ref("XmlFileFormatTypeParameters"), ), Sequence( # Use a Sequence and include an optional CommaSegment here. # This allows a preceding comma when above parameters are delimited. Ref("CommaSegment", optional=True), Ref("CommentEqualsClauseSegment"), optional=True, ), ) class AlterFileFormatSegment(BaseSegment): """A snowflake `Alter FILE FORMAT` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-file-format.html """ type = "alter_file_format_segment" match_grammar = Sequence( "ALTER", Sequence("FILE", "FORMAT"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence( "SET", OneOf( Ref("CsvFileFormatTypeParameters"), Ref("JsonFileFormatTypeParameters"), Ref("AvroFileFormatTypeParameters"), Ref("OrcFileFormatTypeParameters"), Ref("ParquetFileFormatTypeParameters"), Ref("XmlFileFormatTypeParameters"), ), ), ), Sequence( # Use a Sequence and include an optional CommaSegment here. # This allows a preceding comma when above parameters are delimited. Ref("CommaSegment", optional=True), Ref("CommentEqualsClauseSegment"), optional=True, ), ) class CsvFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for CSV. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "csv_file_format_type_parameters" _file_format_type_parameter = OneOf( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'CSV'", CodeSegment, type="file_type", ), StringParser( "CSV", CodeSegment, type="file_type", ), ), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence("FILE_EXTENSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")), Sequence( "SKIP_HEADER", Ref("EqualsSegment"), Ref("IntegerSegment"), ), Sequence( OneOf( "DATE_FORMAT", "TIME_FORMAT", "TIMESTAMP_FORMAT", ), Ref("EqualsSegment"), OneOf("AUTO", Ref("QuotedLiteralSegment")), ), Sequence("BINARY_FORMAT", Ref("EqualsSegment"), OneOf("HEX", "BASE64", "UTF8")), Sequence( OneOf( "RECORD_DELIMITER", "FIELD_DELIMITER", "ESCAPE", "ESCAPE_UNENCLOSED_FIELD", "FIELD_OPTIONALLY_ENCLOSED_BY", ), Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"), optional=True)), ), Sequence( OneOf( "SKIP_BLANK_LINES", "ERROR_ON_COLUMN_COUNT_MISMATCH", "REPLACE_INVALID_CHARACTERS", "VALIDATE_UTF8", "EMPTY_FIELD_AS_NULL", "SKIP_BYTE_ORDER_MARK", "TRIM_SPACE", ), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "ENCODING", Ref("EqualsSegment"), OneOf( "UTF8", Ref("QuotedLiteralSegment"), ), ), ) match_grammar = OneOf( Delimited(_file_format_type_parameter), AnyNumberOf(_file_format_type_parameter) ) class JsonFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for JSON. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "json_file_format_type_parameters" _file_format_type_parameter = OneOf( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'JSON'", CodeSegment, type="file_type", ), StringParser( "JSON", CodeSegment, type="file_type", ), ), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence( OneOf( "DATE_FORMAT", "TIME_FORMAT", "TIMESTAMP_FORMAT", ), Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), "AUTO"), ), Sequence("BINARY_FORMAT", Ref("EqualsSegment"), OneOf("HEX", "BASE64", "UTF8")), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"), optional=True)), ), Sequence("FILE_EXTENSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")), Sequence( OneOf( "TRIM_SPACE", "ENABLE_OCTAL", "ALLOW_DUPLICATE", "STRIP_OUTER_ARRAY", "STRIP_NULL_VALUES", "REPLACE_INVALID_CHARACTERS", "IGNORE_UTF8_ERRORS", "SKIP_BYTE_ORDER_MARK", ), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ) match_grammar = OneOf( Delimited(_file_format_type_parameter), AnyNumberOf(_file_format_type_parameter) ) class AvroFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for AVRO. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "avro_file_format_type_parameters" _file_format_type_parameter = OneOf( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'AVRO'", CodeSegment, type="file_type", ), StringParser( "AVRO", CodeSegment, type="file_type", ), ), ), Sequence("COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType")), Sequence("TRIM_SPACE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), ) match_grammar = OneOf( Delimited(_file_format_type_parameter), AnyNumberOf(_file_format_type_parameter) ) class OrcFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for ORC. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "orc_file_format_type_parameters" _file_format_type_parameter = OneOf( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'ORC'", CodeSegment, type="file_type", ), StringParser( "ORC", CodeSegment, type="file_type", ), ), ), Sequence("TRIM_SPACE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar")), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), ) match_grammar = OneOf( Delimited(_file_format_type_parameter), AnyNumberOf(_file_format_type_parameter) ) class ParquetFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for PARQUET. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "parquet_file_format_type_parameters" _file_format_type_parameter = OneOf( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'PARQUET'", CodeSegment, type="file_type", ), StringParser( "PARQUET", CodeSegment, type="file_type", ), ), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence( OneOf( "SNAPPY_COMPRESSION", "BINARY_AS_TEXT", "TRIM_SPACE", ), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), ) match_grammar = OneOf( Delimited(_file_format_type_parameter), AnyNumberOf(_file_format_type_parameter) ) class XmlFileFormatTypeParameters(BaseSegment): """A Snowflake File Format Type Options segment for XML. https://docs.snowflake.com/en/sql-reference/sql/create-file-format.html """ type = "xml_file_format_type_parameters" _file_format_type_parameter = OneOf( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( StringParser( "'XML'", CodeSegment, type="file_type", ), StringParser( "XML", CodeSegment, type="file_type", ), ), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence( OneOf( "IGNORE_UTF8_ERRORS", "PRESERVE_SPACE", "STRIP_OUTER_ELEMENT", "DISABLE_SNOWFLAKE_DATA", "DISABLE_AUTO_CONVERT", "SKIP_BYTE_ORDER_MARK", ), Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ) match_grammar = OneOf( Delimited(_file_format_type_parameter), AnyNumberOf(_file_format_type_parameter) ) class AlterPipeSegment(BaseSegment): """A snowflake `Alter PIPE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-pipe.html """ type = "alter_pipe_segment" match_grammar = Sequence( "ALTER", "PIPE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", AnyNumberOf( Sequence( "PIPE_EXECUTION_PAUSED", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Ref("CommentEqualsClauseSegment"), ), ), Sequence( "UNSET", OneOf("PIPE_EXECUTION_PAUSED", "COMMENT"), ), Sequence( "SET", Ref("TagEqualsSegment"), ), Sequence( "UNSET", Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), ), Sequence( "REFRESH", Sequence( "PREFIX", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "MODIFIED_AFTER", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), ), Ref("CommaSegment", optional=True), ) class FileFormatSegment(BaseSegment): """A Snowflake FILE_FORMAT Segment. https://docs.snowflake.com/en/sql-reference/sql/create-table.html https://docs.snowflake.com/en/sql-reference/sql/create-external-table.html https://docs.snowflake.com/en/sql-reference/sql/create-stage.html """ type = "file_format_segment" match_grammar = OneOf( OneOf( Ref("QuotedLiteralSegment"), Ref("ObjectReferenceSegment"), ), Bracketed( Sequence( OneOf( Sequence( "FORMAT_NAME", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ObjectReferenceSegment"), ), ), OneOf( Ref("CsvFileFormatTypeParameters"), Ref("JsonFileFormatTypeParameters"), Ref("AvroFileFormatTypeParameters"), Ref("OrcFileFormatTypeParameters"), Ref("ParquetFileFormatTypeParameters"), Ref("XmlFileFormatTypeParameters"), ), ), Ref("FormatTypeOptions", optional=True), ), ), ) class FormatTypeOptions(BaseSegment): """A Snowflake formatTypeOptions. https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#format-type-options https://docs.snowflake.com/en/sql-reference/sql/copy-into-location.html#format-type-options This part specifically works for the format: `FILE_FORMAT = (FORMAT_NAME = myformatname)` Another case: `FILE_FORMAT = (TYPE = mytype)` their fileFormatOptions are implemented in their specific `FormatTypeParameters` """ type = "format_type_options" match_grammar = OneOf( # COPY INTO , open for extension AnySetOf( Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType"), ), Sequence( "RECORD_DELIMITER", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "FIELD_DELIMITER", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "ESCAPE", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "ESCAPE_UNENCLOSED_FIELD", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "DATA_FORMAT", Ref("EqualsSegment"), OneOf("AUTO", Ref("QuotedLiteralSegment")), ), Sequence( "TIME_FORMAT", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "TIMESTAMP_FORMAT", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "BINARY_FORMAT", Ref("EqualsSegment"), OneOf("HEX", "BASE64", "UTF8") ), Sequence( "FIELD_OPTIONALITY_ENCLOSED_BY", Ref("EqualsSegment"), OneOf("NONE", Ref("QuotedLiteralSegment")), ), Sequence( "NULL_IF", Ref("EqualsSegment"), Bracketed(Delimited(Ref("QuotedLiteralSegment"))), ), Sequence( "EMPTY_FIELD_AS_NULL", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "SNAPPY_COMPRESSION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ), # COPY INTO
, open for extension AnySetOf(), ) class CreateExternalTableSegment(BaseSegment): """A snowflake `CREATE EXTERNAL TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/create-external-table.html """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), "EXTERNAL", "TABLE", Sequence("IF", "NOT", "EXISTS", optional=True), Ref("TableReferenceSegment"), # Columns: Bracketed( Delimited( Sequence( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), "AS", OptionallyBracketed( Sequence( Ref("ExpressionSegment"), Ref("TableConstraintSegment", optional=True), Sequence( Ref.keyword("NOT", optional=True), "NULL", optional=True ), ) ), ) ), optional=True, ), # The use of AnySetOf is not strictly correct here, because LOCATION and # FILE_FORMAT are required parameters. They can however be in arbitrary order # with the other parameters. AnySetOf( Sequence("INTEGRATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment")), Sequence( "PARTITION", "BY", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), ), Sequence( Sequence("WITH", optional=True), "LOCATION", Ref("EqualsSegment"), Ref("StagePath"), ), Sequence( "REFRESH_ON_CREATE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "PATTERN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), ), Sequence( "AWS_SNS_TOPIC", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "COPY", "GRANTS", ), Sequence( Sequence("WITH", optional=True), "ROW", "ACCESS", "POLICY", Ref("NakedIdentifierSegment"), ), Ref("TagBracketedEqualsSegment"), Ref("CommentEqualsClauseSegment"), ), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause.""" match_grammar = OneOf( Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("TableReferenceSegment"), # Nested Selects Bracketed(Ref("SelectableGrammar")), Ref("ValuesClauseSegment"), Sequence( Ref("StagePath"), Bracketed( Delimited( Sequence( "FILE_FORMAT", Ref("ParameterAssignerSegment"), Ref("FileFormatSegment"), ), Sequence( "PATTERN", Ref("ParameterAssignerSegment"), Ref("QuotedLiteralSegment"), ), ), optional=True, ), ), ) class PartitionBySegment(BaseSegment): """A `PARTITION BY` for `copy_into_location` functions.""" type = "partition_by_segment" match_grammar: Matchable = Sequence( "PARTITION", "BY", Indent, # Brackets are optional in a partition by statement OptionallyBracketed(Delimited(Ref("ExpressionSegment"))), Dedent, ) class CopyIntoLocationStatementSegment(BaseSegment): """A Snowflake `COPY INTO ` statement. # https://docs.snowflake.com/en/sql-reference/sql/copy-into-location.html """ type = "copy_into_location_statement" match_grammar = Sequence( "COPY", "INTO", Ref("StorageLocation"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), Sequence( "FROM", OneOf( Ref("TableReferenceSegment"), Bracketed(Ref("SelectStatementSegment")), ), optional=True, ), OneOf( Ref("S3ExternalStageParameters"), Ref("AzureBlobStorageExternalStageParameters"), optional=True, ), Ref("InternalStageParameters", optional=True), AnySetOf( Ref("PartitionBySegment"), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), ), Ref("CopyOptionsSegment"), Sequence( "VALIDATION_MODE", Ref("EqualsSegment"), Ref("ValidationModeOptionSegment"), ), Sequence( "HEADER", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ), ) class CopyIntoTableStatementSegment(BaseSegment): """A Snowflake `COPY INTO
` statement. # https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html """ type = "copy_into_table_statement" match_grammar = Sequence( "COPY", "INTO", Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnReferenceSegment")), optional=True), Sequence( "FROM", OneOf( Ref("StorageLocation"), Bracketed(Ref("SelectStatementSegment")), ), optional=True, ), OneOf( Ref("S3ExternalStageParameters"), Ref("AzureBlobStorageExternalStageParameters"), optional=True, ), Ref("InternalStageParameters", optional=True), AnySetOf( Sequence( "FILES", Ref("EqualsSegment"), Bracketed( Delimited( Ref("QuotedLiteralSegment"), ), ), ), Sequence( "PATTERN", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment"), ), ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), ), Ref("CopyOptionsSegment"), ), Sequence( "VALIDATION_MODE", Ref("EqualsSegment"), Ref("ValidationModeOptionSegment"), optional=True, ), ) class StorageLocation(BaseSegment): """A Snowflake storage location. https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#syntax """ type = "storage_location" match_grammar = OneOf( Ref("StagePath"), Ref("S3Path"), Ref("GCSPath"), Ref("AzureBlobStoragePath"), ) class InternalStageParameters(BaseSegment): """Parameters for an internal stage in Snowflake. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ name = "internal_stage_parameters" type = "stage_parameters" match_grammar = Sequence( Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( "TYPE", Ref("EqualsSegment"), Ref("SnowflakeEncryptionOption"), ), optional=True, ), ) class S3ExternalStageParameters(BaseSegment): """Parameters for an S3 external stage in Snowflake. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ name = "s3_external_stage_parameters" type = "stage_parameters" match_grammar = Sequence( OneOf( Sequence( "STORAGE_INTEGRATION", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "CREDENTIALS", Ref("EqualsSegment"), Bracketed( OneOf( Sequence( "AWS_KEY_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), "AWS_SECRET_KEY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), Sequence( "AWS_TOKEN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), Sequence( "AWS_ROLE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) ), ), optional=True, ), Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( OneOf( Sequence( Sequence( "TYPE", Ref("EqualsSegment"), Ref("S3EncryptionOption"), optional=True, ), "MASTER_KEY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence("TYPE", Ref("EqualsSegment"), Ref("S3EncryptionOption")), Sequence( "TYPE", Ref("EqualsSegment"), Ref("S3EncryptionOption"), Sequence( "KMS_KEY_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), Sequence("TYPE", Ref("EqualsSegment"), "NONE"), ) ), optional=True, ), ) class GCSExternalStageParameters(BaseSegment): """Parameters for a GCS external stage in Snowflake. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ name = "gcs_external_stage_parameters" type = "stage_parameters" match_grammar = Sequence( Sequence( "STORAGE_INTEGRATION", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( Sequence( Ref("GCSEncryptionOption"), Sequence( "KMS_KEY_ID", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), "NONE", ), ) ), optional=True, ), ) class AzureBlobStorageExternalStageParameters(BaseSegment): """Parameters for an Azure Blob Storage external stage in Snowflake. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ name = "azure_blob_storage_external_stage_parameters" type = "stage_parameters" match_grammar = Sequence( OneOf( Sequence( "STORAGE_INTEGRATION", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "CREDENTIALS", Ref("EqualsSegment"), Bracketed( Sequence("AZURE_SAS_TOKEN"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), optional=True, ), Sequence( "ENCRYPTION", Ref("EqualsSegment"), Bracketed( Sequence( "TYPE", Ref("EqualsSegment"), OneOf( Sequence( Ref("AzureBlobStorageEncryptionOption"), Sequence( "MASTER_KEY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), "NONE", ), ) ), optional=True, ), ) class CreateStageSegment(BaseSegment): """A Snowflake CREATE STAGE statement. https://docs.snowflake.com/en/sql-reference/sql/create-stage.html """ type = "create_stage_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Ref.keyword("TEMPORARY", optional=True), "STAGE", Sequence("IF", "NOT", "EXISTS", optional=True), Ref("ObjectReferenceSegment"), Indent, OneOf( # Internal stages Sequence( Ref("InternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ) ), optional=True, ), ), # External S3 stage Sequence( "URL", Ref("EqualsSegment"), Ref("S3Path"), Ref("S3ExternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), ), optional=True, ), ), # External GCS stage Sequence( "URL", Ref("EqualsSegment"), Ref("GCSPath"), Ref("GCSExternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "NOTIFICATION_INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), ), optional=True, ), ), # External Azure Blob Storage stage Sequence( "URL", Ref("EqualsSegment"), Ref("AzureBlobStoragePath"), Ref("AzureBlobStorageExternalStageParameters", optional=True), Sequence( "DIRECTORY", Ref("EqualsSegment"), Bracketed( Sequence( "ENABLE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "AUTO_REFRESH", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "NOTIFICATION_INTEGRATION", Ref("EqualsSegment"), OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedLiteralSegment"), ), optional=True, ), ), optional=True, ), ), optional=True, ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), optional=True ), Sequence( "COPY_OPTIONS", Ref("EqualsSegment"), Bracketed(Ref("CopyOptionsSegment")), optional=True, ), Ref("TagBracketedEqualsSegment", optional=True), Ref("CommentEqualsClauseSegment", optional=True), Dedent, ) class AlterStageSegment(BaseSegment): """A Snowflake ALTER STAGE statement. https://docs.snowflake.com/en/sql-reference/sql/alter-stage.html """ type = "alter_stage_statement" match_grammar = Sequence( "ALTER", "STAGE", Sequence("IF", "EXISTS", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence( "SET", Indent, OneOf( Sequence( OneOf( Ref("InternalStageParameters"), Sequence( Sequence( "URL", Ref("EqualsSegment"), Ref("S3Path"), optional=True, ), Ref( "S3ExternalStageParameters", optional=True, ), ), Sequence( Sequence( "URL", Ref("EqualsSegment"), Ref("GCSPath"), optional=True, ), Ref( "GCSExternalStageParameters", optional=True, ), ), Sequence( Sequence( "URL", Ref("EqualsSegment"), Ref("AzureBlobStoragePath"), optional=True, ), Ref( "AzureBlobStorageExternalStageParameters", optional=True, ), ), optional=True, ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("FileFormatSegment"), optional=True, ), Sequence( "COPY_OPTIONS", Ref("EqualsSegment"), Bracketed(Ref("CopyOptionsSegment")), optional=True, ), Ref("CommentEqualsClauseSegment", optional=True), ), Ref("TagEqualsSegment"), ), Dedent, ), Sequence( "REFRESH", Sequence( "SUBPATH", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), ), ) class CreateStreamStatementSegment(BaseSegment): """A Snowflake `CREATE STREAM` statement. https://docs.snowflake.com/en/sql-reference/sql/create-stream.html """ type = "create_stream_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), "STREAM", Ref("IfNotExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Sequence("COPY", "GRANTS", optional=True), "ON", OneOf( Sequence( OneOf("TABLE", "VIEW"), Ref("ObjectReferenceSegment"), OneOf( Ref("FromAtExpressionSegment"), Ref("FromBeforeExpressionSegment"), optional=True, ), Sequence( "APPEND_ONLY", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "SHOW_INITIAL_ROWS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), ), Sequence( "EXTERNAL", "TABLE", Ref("ObjectReferenceSegment"), OneOf( Ref("FromAtExpressionSegment"), Ref("FromBeforeExpressionSegment"), optional=True, ), Sequence( "INSERT_ONLY", Ref("EqualsSegment"), Ref("TrueSegment"), optional=True, ), ), Sequence( "STAGE", Ref("ObjectReferenceSegment"), ), ), Ref("CommentEqualsClauseSegment", optional=True), ) class AlterStreamStatementSegment(BaseSegment): """A Snowflake `ALTER STREAM` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-stream.html """ type = "alter_stream_statement" match_grammar = Sequence( "ALTER", "STREAM", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence( "SET", Sequence( "APPEND_ONLY", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), Sequence( "INSERT_ONLY", Ref("EqualsSegment"), Ref("TrueSegment"), optional=True, ), Ref("TagEqualsSegment", optional=True), Ref("CommentEqualsClauseSegment", optional=True), ), Sequence( "UNSET", OneOf( Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), "COMMENT", ), ), ), ) class ShowStatementSegment(BaseSegment): """A snowflake `SHOW` statement. https://docs.snowflake.com/en/sql-reference/sql/show.html """ _object_types_plural = OneOf( "PARAMETERS", Sequence("GLOBAL", "ACCOUNTS"), "REGIONS", Sequence("REPLICATION", "ACCOUNTS"), Sequence("REPLICATION", "DATABASES"), "PARAMETERS", "VARIABLES", "TRANSACTIONS", "LOCKS", "PARAMETERS", "FUNCTIONS", Sequence("NETWORK", "POLICIES"), "SHARES", "ROLES", "GRANTS", "USERS", "WAREHOUSES", "DATABASES", Sequence( OneOf("API", "NOTIFICATION", "SECURITY", "STORAGE", optional=True), "INTEGRATIONS", ), "SCHEMAS", "OBJECTS", "TABLES", Sequence("EXTERNAL", "TABLES"), "VIEWS", Sequence("MATERIALIZED", "VIEWS"), Sequence("MASKING", "POLICIES"), "COLUMNS", Sequence("FILE", "FORMATS"), "SEQUENCES", "STAGES", "PIPES", "STREAMS", "TASKS", Sequence("USER", "FUNCTIONS"), Sequence("EXTERNAL", "FUNCTIONS"), "PROCEDURES", Sequence("FUTURE", "GRANTS"), ) _object_scope_types = OneOf( "ACCOUNT", "SESSION", Sequence( OneOf( "DATABASE", "SCHEMA", "SHARE", "ROLE", "TABLE", "TASK", "USER", "WAREHOUSE", "VIEW", ), Ref("ObjectReferenceSegment", optional=True), ), ) type = "show_statement" match_grammar = Sequence( "SHOW", OneOf("TERSE", optional=True), _object_types_plural, OneOf("HISTORY", optional=True), Sequence("LIKE", Ref("QuotedLiteralSegment"), optional=True), Sequence( OneOf("ON", "TO", "OF", "IN"), OneOf( Sequence(_object_scope_types), Ref("ObjectReferenceSegment"), ), optional=True, ), Sequence("STARTS", "WITH", Ref("QuotedLiteralSegment"), optional=True), Sequence("WITH", "PRIMARY", Ref("ObjectReferenceSegment"), optional=True), Sequence( Ref("LimitClauseSegment"), Sequence("FROM", Ref("QuotedLiteralSegment"), optional=True), optional=True, ), ) class AlterAccountStatementSegment(BaseSegment): """`ALTER ACCOUNT` statement. ALTER ACCOUNT SET { [ accountParams ] [ objectParams ] [ sessionParams ] } ALTER ACCOUNT UNSET [ , ... ] ALTER ACCOUNT SET RESOURCE_MONITOR = ALTER ACCOUNT SET { PASSWORD | SESSION } POLICY ALTER ACCOUNT UNSET { PASSWORD | SESSION } POLICY ALTER ACCOUNT SET TAG = '' [, = '' ...] ALTER ACCOUNT UNSET TAG [ , ... ] https://docs.snowflake.com/en/sql-reference/sql/alter-account All the account parameters can be found here https://docs.snowflake.com/en/sql-reference/parameters """ type = "alter_account_statement" match_grammar = Sequence( "ALTER", "ACCOUNT", OneOf( Sequence( "SET", "RESOURCE_MONITOR", Ref("EqualsSegment"), Ref("NakedIdentifierSegment"), ), Sequence( "SET", OneOf("PASSWORD", "SESSION"), "POLICY", Ref("TableReferenceSegment"), ), Sequence( "SET", Ref("TagEqualsSegment"), ), Sequence( "SET", Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), Ref("NakedIdentifierSegment"), ), ), ), ), Sequence( "UNSET", OneOf("PASSWORD", "SESSION"), "POLICY", ), Sequence( "UNSET", OneOf( Sequence("TAG", Delimited(Ref("TagReferenceSegment"))), Delimited(Ref("NakedIdentifierSegment")), ), ), ), ) class AlterUserStatementSegment(BaseSegment): """`ALTER USER` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-user.html All user parameters can be found here https://docs.snowflake.com/en/sql-reference/parameters.html """ type = "alter_user_statement" match_grammar = Sequence( "ALTER", "USER", Sequence("IF", "EXISTS", optional=True), Ref("RoleReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence("RESET", "PASSWORD"), Sequence("ABORT", "ALL", "QUERIES"), Sequence( "ADD", "DELEGATED", "AUTHORIZATION", "OF", "ROLE", Ref("ObjectReferenceSegment"), "TO", "SECURITY", "INTEGRATION", Ref("ObjectReferenceSegment"), ), Sequence( "REMOVE", "DELEGATED", OneOf( Sequence( "AUTHORIZATION", "OF", "ROLE", Ref("ObjectReferenceSegment") ), "AUTHORIZATIONS", ), "FROM", "SECURITY", "INTEGRATION", Ref("ObjectReferenceSegment"), ), # Snowflake supports the SET command with space delimited parameters, but # it also supports using commas which is better supported by `Delimited`, so # we will just use that. Sequence( "SET", Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf(Ref("LiteralGrammar"), Ref("ObjectReferenceSegment")), ), ), ), Sequence("UNSET", Delimited(Ref("ParameterNameSegment"))), ), ) class CreateRoleStatementSegment(ansi.CreateRoleStatementSegment): """A `CREATE ROLE` statement. Redefined because it's much simpler than postgres. https://docs.snowflake.com/en/sql-reference/sql/create-role.html """ match_grammar = Sequence( "CREATE", Sequence( "OR", "REPLACE", optional=True, ), "ROLE", Sequence( "IF", "NOT", "EXISTS", optional=True, ), Ref("RoleReferenceSegment"), Sequence( "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ) class ResourceMonitorOptionsSegment(BaseSegment): """A `RESOURCE MONITOR` options statement. https://docs.snowflake.com/en/sql-reference/sql/create-resource-monitor https://docs.snowflake.com/en/sql-reference/sql/alter-resource-monitor """ type = "resource_monitor_options" match_grammar = AnySetOf( Sequence( "CREDIT_QUOTA", Ref("EqualsSegment"), Ref("IntegerSegment"), optional=True, ), Sequence( "FREQUENCY", Ref("EqualsSegment"), OneOf("MONTHLY", "DAILY", "WEEKLY", "YEARLY", "NEVER"), optional=True, ), Sequence( "START_TIMESTAMP", Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), "IMMEDIATELY"), optional=True, ), Sequence( "END_TIMESTAMP", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "NOTIFY_USERS", Ref("EqualsSegment"), Bracketed( Delimited( Ref("ObjectReferenceSegment"), ), ), optional=True, ), Sequence( "TRIGGERS", AnyNumberOf( Sequence( "ON", Ref("IntegerSegment"), "PERCENT", "DO", OneOf("SUSPEND", "SUSPEND_IMMEDIATE", "NOTIFY"), ), ), optional=True, ), ) class CreateResourceMonitorStatementSegment(BaseSegment): """A `CREATE RESOURCE MONITOR` statement. https://docs.snowflake.com/en/sql-reference/sql/create-resource-monitor """ type = "create_resource_monitor_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("RESOURCE", "MONITOR"), Ref("ObjectReferenceSegment"), "WITH", Ref("ResourceMonitorOptionsSegment"), ) class AlterResourceMonitorStatementSegment(BaseSegment): """An `ALTER RESOURCE MONITOR` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-resource-monitor """ type = "alter_resource_monitor_statement" match_grammar = Sequence( "ALTER", Sequence("RESOURCE", "MONITOR"), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), "SET", Ref("ResourceMonitorOptionsSegment"), ) class ExplainStatementSegment(ansi.ExplainStatementSegment): """An `Explain` statement. EXPLAIN [ USING { TABULAR | JSON | TEXT } ] https://docs.snowflake.com/en/sql-reference/sql/explain.html """ match_grammar = Sequence( "EXPLAIN", Sequence( "USING", OneOf("TABULAR", "JSON", "TEXT"), optional=True, ), ansi.ExplainStatementSegment.explainable_stmt, ) class AlterSessionStatementSegment(BaseSegment): """Snowflake's ALTER SESSION statement. ``` ALTER SESSION SET = ; ALTER SESSION UNSET , [ , , ... ]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-session.html """ type = "alter_session_statement" match_grammar = Sequence( "ALTER", "SESSION", OneOf( Ref("AlterSessionSetClauseSegment"), Ref("AlterSessionUnsetClauseSegment"), ), ) class AlterSessionSetClauseSegment(BaseSegment): """Snowflake's ALTER SESSION SET clause. ``` [ALTER SESSION] SET = ; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-session.html """ type = "alter_session_set_statement" match_grammar = Sequence( "SET", Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), ) class AlterSessionUnsetClauseSegment(BaseSegment): """Snowflake's ALTER SESSION UNSET clause. ``` [ALTER SESSION] UNSET , [ , , ... ]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-session.html """ type = "alter_session_unset_clause" match_grammar = Sequence( "UNSET", Delimited(Ref("ParameterNameSegment")), ) class AlterTaskStatementSegment(BaseSegment): """Snowflake's ALTER TASK statement. ``` ALTER TASK [IF EXISTS] RESUME; ALTER TASK [IF EXISTS] SUSPEND; ALTER TASK [IF EXISTS] REMOVE AFTER ; ALTER TASK [IF EXISTS] ADD AFTER ; ALTER TASK [IF EXISTS] SET [WAREHOUSE = ] [SCHEDULE = ] [ALLOW_OVERLAPPING_EXECUTION = TRUE|FALSE]; ALTER TASK [IF EXISTS] SET = [ , = , ...]; ALTER TASK [IF EXISTS] UNSET [ , , ... ]; ALTER TASK [IF EXISTS] MODIFY AS ; ALTER TASK [IF EXISTS] MODIFY WHEN ; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-task.html """ type = "alter_task_statement" match_grammar = Sequence( "ALTER", "TASK", Sequence("IF", "EXISTS", optional=True), Ref("ObjectReferenceSegment"), OneOf( "RESUME", "SUSPEND", Sequence("REMOVE", "AFTER", Ref("ObjectReferenceSegment")), Sequence("ADD", "AFTER", Ref("ObjectReferenceSegment")), Ref("AlterTaskSpecialSetClauseSegment"), Ref("AlterTaskSetClauseSegment"), Ref("AlterTaskUnsetClauseSegment"), Sequence( "MODIFY", "AS", ansi.ExplainStatementSegment.explainable_stmt, ), Sequence("MODIFY", "WHEN", Ref("BooleanLiteralGrammar")), ), ) class AlterTaskSpecialSetClauseSegment(BaseSegment): """Snowflake's ALTER TASK special SET clause. ``` [ALTER TASK ] SET [WAREHOUSE = ] [SCHEDULE = ] [ALLOW_OVERLAPPING_EXECUTION = TRUE|FALSE]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-task.html """ type = "alter_task_special_set_clause" match_grammar = Sequence( "SET", AnySetOf( Sequence( "WAREHOUSE", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), optional=True, ), Sequence( "SCHEDULE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), Sequence( "ALLOW_OVERLAPPING_EXECUTION", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), optional=True, ), min_times=1, ), ) class AlterTaskSetClauseSegment(BaseSegment): """Snowflake's ALTER TASK SET clause. ``` [ALTER TASK ] SET = [ , = , ...]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-task.html """ type = "alter_task_set_clause" match_grammar = Sequence( "SET", Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), ), ), ) class AlterTaskUnsetClauseSegment(BaseSegment): """Snowflake's ALTER TASK UNSET clause. ``` [ALTER TASK ] UNSET [ , , ... ]; ``` https://docs.snowflake.com/en/sql-reference/sql/alter-task.html """ type = "alter_task_unset_clause" match_grammar = Sequence( "UNSET", Delimited(Ref("ParameterNameSegment")), ) class ExecuteImmediateClauseSegment(BaseSegment): """Snowflake's EXECUTE IMMEDIATE clause. ``` EXECUTE IMMEDIATE '' [ USING ( [ , ... ] ) ] EXECUTE IMMEDIATE [ USING ( [ , ... ] ) ] EXECUTE IMMEDIATE $ [ USING ( [ , ... ] ) ] ``` https://docs.snowflake.com/en/sql-reference/sql/execute-immediate """ type = "execute_immediate_clause" match_grammar = Sequence( "EXECUTE", "IMMEDIATE", OneOf( Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment"), Sequence( Ref("ColonSegment"), Ref("LocalVariableNameSegment"), ), ), Sequence( "USING", Bracketed(Delimited(Ref("LocalVariableNameSegment"))), optional=True, ), ) class ExecuteTaskClauseSegment(BaseSegment): """Snowflake's EXECUTE TASK clause. ``` EXECUTE TASK ``` https://docs.snowflake.com/en/sql-reference/sql/execute-task """ type = "execute_task_clause" match_grammar = Sequence( "EXECUTE", "TASK", Ref("ObjectReferenceSegment"), ) ############################ # MERGE ############################ class MergeUpdateClauseSegment(ansi.MergeUpdateClauseSegment): """`UPDATE` clause within the `MERGE` statement.""" match_grammar = Sequence( "UPDATE", Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), ) class MergeDeleteClauseSegment(ansi.MergeDeleteClauseSegment): """`DELETE` clause within the `MERGE` statement.""" match_grammar = Sequence( "DELETE", Ref("WhereClauseSegment", optional=True), ) class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment): """`INSERT` clause within the `MERGE` statement.""" match_grammar = Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, Ref("ValuesClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://docs.snowflake.com/en/sql-reference/sql/delete.html """ type = "delete_statement" match_grammar = Sequence( "DELETE", "FROM", Ref("TableReferenceSegment"), Ref("AliasExpressionSegment", optional=True), Sequence( "USING", Indent, Delimited( Sequence( Ref("TableExpressionSegment"), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, optional=True, ), Ref("WhereClauseSegment", optional=True), ) class DescribeStatementSegment(BaseSegment): """`DESCRIBE` statement grammar. https://docs.snowflake.com/en/sql-reference/sql/desc.html """ type = "describe_statement" match_grammar = Sequence( OneOf("DESCRIBE", "DESC"), OneOf( # https://docs.snowflake.com/en/sql-reference/sql/desc-result.html Sequence( "RESULT", OneOf( Ref("QuotedLiteralSegment"), Sequence("LAST_QUERY_ID", Bracketed()), ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-network-policy.html Sequence( "NETWORK", "POLICY", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-share.html Sequence( "SHARE", Ref("ObjectReferenceSegment"), Sequence( Ref("DotSegment"), Ref("ObjectReferenceSegment"), optional=True, ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-user.html Sequence( "USER", Ref("ObjectReferenceSegment"), ), Sequence( "WAREHOUSE", Ref("ObjectReferenceSegment"), ), Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-integration.html Sequence( OneOf("API", "NOTIFICATION", "SECURITY", "STORAGE", optional=True), "INTEGRATION", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-session-policy.html Sequence( "SESSION", "POLICY", Ref("ObjectReferenceSegment"), ), Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-table.html Sequence( "TABLE", Ref("TableReferenceSegment"), Sequence( "TYPE", Ref("EqualsSegment"), OneOf("COLUMNS", "STAGE"), optional=True, ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-external-table.html Sequence( "EXTERNAL", "TABLE", Ref("TableReferenceSegment"), Sequence( "TYPE", Ref("EqualsSegment"), OneOf("COLUMNS", "STAGE"), optional=True, ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-view.html Sequence( "VIEW", Ref("TableReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-materialized-view.html Sequence( "MATERIALIZED", "VIEW", Ref("TableReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-sequence.html Sequence( "SEQUENCE", Ref("SequenceReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-masking-policy.html Sequence( "MASKING", "POLICY", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-row-access-policy.html Sequence( "ROW", "ACCESS", "POLICY", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-file-format.html Sequence( "FILE", "FORMAT", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-stage.html Sequence( "STAGE", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-pipe.html Sequence( "PIPE", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-stream.html Sequence( "STREAM", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-task.html Sequence( "TASK", Ref("ObjectReferenceSegment"), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-function.html Sequence( "FUNCTION", Ref("FunctionNameSegment"), Bracketed( Delimited( Ref("DatatypeSegment"), optional=True, ), ), ), # https://docs.snowflake.com/en/sql-reference/sql/desc-procedure.html Sequence( "PROCEDURE", Ref("FunctionNameSegment"), Bracketed( Delimited( Ref("DatatypeSegment"), optional=True, ), ), ), ), ) class TransactionStatementSegment(ansi.TransactionStatementSegment): """`BEGIN`, `START TRANSACTION`, `COMMIT`, AND `ROLLBACK` statement grammar. Overwrites ANSI to match correct Snowflake grammar. https://docs.snowflake.com/en/sql-reference/sql/begin.html https://docs.snowflake.com/en/sql-reference/sql/commit.html https://docs.snowflake.com/en/sql-reference/sql/rollback.html """ match_grammar = OneOf( Sequence( "BEGIN", OneOf("WORK", "TRANSACTION", optional=True), Sequence("NAME", Ref("ObjectReferenceSegment"), optional=True), ), Sequence( "START", "TRANSACTION", Sequence("NAME", Ref("ObjectReferenceSegment"), optional=True), ), Sequence( "COMMIT", Sequence("WORK", optional=True), ), "ROLLBACK", ) class TruncateStatementSegment(ansi.TruncateStatementSegment): """`TRUNCATE TABLE` statement. https://docs.snowflake.com/en/sql-reference/sql/truncate-table.html """ match_grammar = Sequence( "TRUNCATE", Ref.keyword("TABLE", optional=True), Sequence("IF", "EXISTS", optional=True), Ref("TableReferenceSegment"), ) class UnsetStatementSegment(BaseSegment): """An `UNSET` statement. https://docs.snowflake.com/en/sql-reference/sql/unset.html """ type = "unset_statement" match_grammar = Sequence( "UNSET", OneOf( Ref("LocalVariableNameSegment"), Bracketed( Delimited( Ref("LocalVariableNameSegment"), ), ), ), ) class UndropStatementSegment(BaseSegment): """`UNDROP` statement. DATABASE: https://docs.snowflake.com/en/sql-reference/sql/undrop-database.html SCHEMA: https://docs.snowflake.com/en/sql-reference/sql/undrop-schema.html TABLE: https://docs.snowflake.com/en/sql-reference/sql/undrop-table.html """ type = "undrop_statement" match_grammar = Sequence( "UNDROP", OneOf( Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ), Sequence( "SCHEMA", Ref("SchemaReferenceSegment"), ), Sequence( "TABLE", Ref("TableReferenceSegment"), ), ), ) class CommentStatementSegment(BaseSegment): """`COMMENT` statement grammar. https://docs.snowflake.com/en/sql-reference/sql/comment.html N.B. this applies to all objects, so there may be some I've missed here so add any others to the OneOf grammar below. """ type = "comment_statement" match_grammar = Sequence( "COMMENT", Sequence( "IF", "EXISTS", optional=True, ), "ON", OneOf( "COLUMN", "TABLE", "VIEW", "SCHEMA", "DATABASE", "WAREHOUSE", "USER", "STAGE", "FUNCTION", "PROCEDURE", "SEQUENCE", "SHARE", "PIPE", "STREAM", "TASK", Sequence( "NETWORK", "POLICY", ), Sequence( OneOf( "API", "NOTIFICATION", "SECURITY", "STORAGE", ), "INTEGRATION", ), Sequence( "SESSION", "POLICY", ), Sequence( "EXTERNAL", "TABLE", ), Sequence( "MATERIALIZED", "VIEW", ), Sequence( "MASKING", "POLICY", ), Sequence( "ROW", "ACCESS", "POLICY", ), Sequence( "FILE", "FORMAT", ), ), Ref("ObjectReferenceSegment"), "IS", Ref("QuotedLiteralSegment"), ) class UseStatementSegment(ansi.UseStatementSegment): """A `USE` statement. https://docs.snowflake.com/en/sql-reference/sql/use.html """ match_grammar = Sequence( "USE", OneOf( Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("WAREHOUSE", Ref("ObjectReferenceSegment")), Sequence( Ref.keyword("DATABASE", optional=True), Ref("DatabaseReferenceSegment"), ), Sequence( Ref.keyword("SCHEMA", optional=True), Ref("SchemaReferenceSegment"), ), Sequence( "SECONDARY", "ROLES", OneOf( "ALL", "NONE", ), ), ), ) class CallStatementSegment(BaseSegment): """`CALL` statement. https://docs.snowflake.com/en/sql-reference/sql/call.html """ type = "call_statement" match_grammar = Sequence( "CALL", Sequence( Ref("FunctionNameSegment"), Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), parse_mode=ParseMode.GREEDY, ), ), ) class LimitClauseSegment(ansi.LimitClauseSegment): """A `LIMIT` clause. https://docs.snowflake.com/en/sql-reference/constructs/limit.html """ match_grammar = OneOf( Sequence( "LIMIT", Indent, Ref("LimitLiteralGrammar"), Dedent, Sequence( "OFFSET", Indent, Ref("LimitLiteralGrammar"), Dedent, optional=True, ), ), Sequence( Sequence( "OFFSET", Indent, Ref("LimitLiteralGrammar"), OneOf( "ROW", "ROWS", optional=True, ), Dedent, optional=True, ), "FETCH", Indent, OneOf( "FIRST", "NEXT", optional=True, ), Ref("LimitLiteralGrammar"), OneOf( "ROW", "ROWS", optional=True, ), Ref.keyword("ONLY", optional=True), Dedent, ), ) class SelectClauseSegment(ansi.SelectClauseSegment): """A group of elements in a select target statement.""" match_grammar = ansi.SelectClauseSegment.match_grammar.copy( terminators=[Ref.keyword("FETCH"), Ref.keyword("OFFSET")], ) class OrderByClauseSegment(ansi.OrderByClauseSegment): """An `ORDER BY` clause. https://docs.snowflake.com/en/sql-reference/constructs/order-by.html """ match_grammar = Sequence( "ORDER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), terminators=["LIMIT", "FETCH", "OFFSET", Ref("FrameClauseUnitGrammar")], ), Dedent, ) class FrameClauseSegment(ansi.FrameClauseSegment): """A frame clause for window functions. https://docs.snowflake.com/en/sql-reference/functions-analytic.html#window-frame-syntax-and-usage """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), Ref("ReferencedVariableNameSegment"), "UNBOUNDED", ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) class DropProcedureStatementSegment(BaseSegment): """A snowflake `DROP PROCEDURE ...` statement. https://docs.snowflake.com/en/sql-reference/sql/drop-procedure.html """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", "PROCEDURE", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), ) class DropExternalTableStatementSegment(BaseSegment): """A snowflake `DROP EXTERNAL TABLE ...` statement. https://docs.snowflake.com/en/sql-reference/sql/drop-external-table.html """ type = "drop_external_table_statement" match_grammar = Sequence( "DROP", "EXTERNAL", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement.""" type = "drop_function_statement" match_grammar = Sequence( "DROP", Ref.keyword("EXTERNAL", optional=True), "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), Ref("FunctionParameterListGrammar"), ) class DropMaterializedViewStatementSegment(BaseSegment): """A snowflake `DROP MATERIALIZED VIEW ...` statement. https://docs.snowflake.com/en/sql-reference/sql/drop-materialized-view.html """ type = "drop_materialized_view_statement" match_grammar = Sequence( "DROP", "MATERIALIZED", "VIEW", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class DropObjectStatementSegment(BaseSegment): """A snowflake `DROP ...` statement. https://docs.snowflake.com/en/sql-reference/sql/drop.html """ type = "drop_object_statement" match_grammar = Sequence( "DROP", OneOf( Sequence( OneOf( "CONNECTION", Sequence("FILE", "FORMAT"), Sequence( OneOf( "API", "NOTIFICATION", "SECURITY", "STORAGE", optional=True ), "INTEGRATION", ), "PIPE", Sequence("ROW", "ACCESS", "POLICY"), "STAGE", "STREAM", "TAG", "TASK", ), Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), ), Sequence( OneOf(Sequence("RESOURCE", "MONITOR"), "SHARE"), Ref("ObjectReferenceSegment"), ), Sequence( OneOf( Sequence("MANAGED", "ACCOUNT"), Sequence("MASKING", "POLICY"), ), Ref("SingleIdentifierGrammar"), ), Sequence( OneOf( Sequence("NETWORK", "POLICY"), ), Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ), Sequence( OneOf("WAREHOUSE", Sequence("SESSION", "POLICY")), Ref("IfExistsGrammar", optional=True), Ref("SingleIdentifierGrammar"), ), Sequence( "SEQUENCE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ), ), ) class ListStatementSegment(BaseSegment): """A snowflake `LIST @ ...` statement. https://docs.snowflake.com/en/sql-reference/sql/list.html """ type = "list_statement" match_grammar = Sequence( OneOf("LIST", "LS"), Ref("StagePath"), Sequence( "PATTERN", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True ), ) class GetStatementSegment(BaseSegment): """A snowflake `GET @ ...` statement. https://docs.snowflake.com/en/sql-reference/sql/get.html """ type = "get_statement" match_grammar = Sequence( "GET", Ref("StagePath"), OneOf( Ref("UnquotedFilePath"), Ref("QuotedLiteralSegment"), ), AnySetOf( Sequence( "PARALLEL", Ref("EqualsSegment"), Ref("IntegerSegment"), ), Sequence( "PATTERN", Ref("EqualsSegment"), OneOf( Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment") ), ), ), ) class PutStatementSegment(BaseSegment): """A snowflake `PUT ...` statement. https://docs.snowflake.com/en/sql-reference/sql/put.html """ type = "put_statement" match_grammar = Sequence( "PUT", OneOf( Ref("UnquotedFilePath"), Ref("QuotedLiteralSegment"), ), Ref("StagePath"), AnySetOf( Sequence( "PARALLEL", Ref("EqualsSegment"), Ref("IntegerSegment"), ), Sequence( "AUTO_COMPRESS", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "SOURCE_COMPRESSION", Ref("EqualsSegment"), Ref("CompressionType") ), Sequence( "OVERWRITE", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), ), ) class RemoveStatementSegment(BaseSegment): """A snowflake `REMOVE @ ...` statement. https://docs.snowflake.com/en/sql-reference/sql/remove.html """ type = "remove_statement" match_grammar = Sequence( OneOf( "REMOVE", "RM", ), Ref("StagePath"), Sequence( "PATTERN", Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), Ref("ReferencedVariableNameSegment")), optional=True, ), ) class SetOperatorSegment(ansi.SetOperatorSegment): """A set operator such as Union, Minus, Except or Intersect.""" type = "set_operator" match_grammar: Matchable = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), Sequence( OneOf( "INTERSECT", "EXCEPT", ), Ref.keyword("ALL", optional=True), ), "MINUS", ) class ShorthandCastSegment(BaseSegment): """A casting operation using '::'.""" type = "cast_expression" match_grammar: Matchable = Sequence( OneOf( Ref("Expression_D_Grammar"), Ref("CaseExpressionSegment"), ), AnyNumberOf( Sequence( Ref("CastOperatorSegment"), Ref("DatatypeSegment"), OneOf( Ref("TimeZoneGrammar"), AnyNumberOf( Ref("ArrayAccessorSegment"), ), optional=True, ), ), min_times=1, ), ) class AlterDatabaseSegment(BaseSegment): """An `ALTER DATABASE` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-database """ type = "alter_database_statement" match_grammar = Sequence( "ALTER", "DATABASE", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence("SWAP", "WITH", Ref("ObjectReferenceSegment")), Sequence( "SET", OneOf( Ref("TagEqualsSegment"), Delimited( Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), OneOf( Ref("BooleanLiteralGrammar"), Ref("QuotedLiteralSegment"), Ref("NumericLiteralSegment"), ), ), ), ), ), Sequence("UNSET", "TAG", Delimited(Ref("TagReferenceSegment"))), Sequence( "UNSET", Delimited( AnySetOf( "DATA_RETENTION_TIME_IN_DAYS", "MAX_DATA_EXTENSION_TIME_IN_DAYS", "DEFAULT_DDL_COLLATION", "COMMENT", ), ), ), ), ) class AlterMaskingPolicySegment(BaseSegment): """An `ALTER MASKING POLICY` statement. https://docs.snowflake.com/en/sql-reference/sql/alter-masking-policy """ type = "alter_masking_policy" match_grammar = Sequence( "ALTER", "MASKING", "POLICY", Ref("IfExistsGrammar", optional=True), Ref("ObjectReferenceSegment"), OneOf( Sequence("RENAME", "TO", Ref("ObjectReferenceSegment")), Sequence( "SET", "BODY", Ref("FunctionAssignerSegment"), Ref("ExpressionSegment"), ), Sequence("SET", Ref("TagEqualsSegment")), Sequence("UNSET", "TAG", Delimited(Ref("TagReferenceSegment"))), Sequence( "SET", "COMMENT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment") ), Sequence("UNSET", "COMMENT"), ), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_snowflake_keywords.py000066400000000000000000000142301451700765000255300ustar00rootroot00000000000000"""A list of all Snowflake SQL key words. https://docs.snowflake.com/en/sql-reference/reserved-keywords.html """ snowflake_reserved_keywords = """ALL ALTER AND ANY AS BETWEEN BY CAST CHECK CONNECT CONNECTION CONSTRAINT CREATE CURRENT CURRENT_DATE CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DELETE DISTINCT DO DROP ELSE EXISTS FOLLOWING FOR FROM FULL GRANT GROUP GSCLUSTER HAVING ILIKE IN INCREMENT INNER INSERT INSERT_ONLY INTERSECT INTO IS JOIN LATERAL LEFT LIKE LOCALTIME LOCALTIMESTAMP MATCH_RECOGNIZE MINUS NATURAL NOT NOTIFY NULL NULL_IF OF ON OR ORDER QUALIFY REGEXP REVOKE RIGHT RLIKE ROW ROWS SAMPLE SELECT SET SOME START STRICT TABLE TABLESAMPLE THEN TO TRIGGER TRY_CAST UNION UNIQUE UPDATE UNPIVOT USING VALUES WHEN WHENEVER WHERE WITH """ snowflake_unreserved_keywords = """ABORT ABORT_STATEMENT ACCESS ACCOUNT ACCOUNTS ADD ADMIN AFTER ALLOWED_IP_LIST ALLOWED_VALUES ALLOW_DUPLICATE ALLOW_OVERLAPPING_EXECUTION API API_INTEGRATION APPEND_ONLY APPLY ARRAY ASC AT ATTACH AUTHORIZATION AUTHORIZATIONS AUTO AUTOINCREMENT AUTO_COMPRESS AUTO_INCREMENT AUTO_INGEST AUTO_REFRESH AUTO_RESUME AUTO_SUSPEND AVRO AWS_KEY_ID AWS_ROLE AWS_SECRET_KEY AWS_SNS AWS_SNS_ROLE_ARN AWS_SNS_TOPIC AWS_SNS_TOPIC_ARN AWS_TOKEN AZURE AZURE_EVENT_GRID AZURE_EVENT_GRID_TOPIC_ENDPOINT AZURE_SAS_TOKEN AZURE_STORAGE_QUEUE AZURE_STORAGE_QUEUE_PRIMARY_URI AZURE_TENANT_ID BASE64 BEFORE BEGIN BERNOULLI BINARY BINARY_AS_TEXT BINARY_FORMAT BINDING BLOCK BLOCKED_IP_LIST BODY BROTLI BZ2 CACHE CALL CALLED CALLER CASCADE CASE CASES CASE_INSENSITIVE CASE_SENSITIVE CHAIN CHANGES CHANGE_TRACKING CHARACTER CLONE CLUSTER CLUSTERING COLLATE COLUMN COLUMNS COMMENT COMMIT COMPRESSION CONCURRENTLY CONNECT_BY_ROOT CONTEXT_HEADERS CONTINUE COPY COPY_OPTIONS CREDENTIALS CREDIT_QUOTA CROSS CSV CUBE CURRENT_ACCOUNT CURRENT_CLIENT CURRENT_DATABASE CURRENT_IP_ADDRESS CURRENT_REGION CURRENT_ROLE CURRENT_SCHEMA CURRENT_SCHEMAS CURRENT_SESSION CURRENT_STATEMENT CURRENT_TRANSACTION CURRENT_VERSION CURRENT_WAREHOUSE CURSOR CYCLE DATA DATABASE DATABASES DATA_FORMAT DATA_RETENTION_TIME_IN_DAYS DATE DATE_FORMAT DAILY DAYS_TO_EXPIRY DEFAULT DEFAULT_DDL_COLLATION DEFAULT_NAMESPACE DEFAULT_ROLE DEFAULT_SECONDARY_ROLES DEFAULT_WAREHOUSE DEFERRABLE DEFERRED DEFINE DEFLATE DELEGATED DESC DESCRIBE DETAILED_OUTPUT DIRECTION DIRECTORY DISABLE DISABLED DISABLE_AUTO_CONVERT DISABLE_SNOWFLAKE_DATA DISPLAY_NAME DOMAIN DOUBLE ECONOMY EMAIL EMPTY EMPTY_FIELD_AS_NULL ENABLE ENABLED ENABLE_OCTAL ENCODING ENCRYPTION END END_TIMESTAMP ENFORCED ENFORCE_LENGTH ENUM ERROR_INTEGRATION ERROR_ON_COLUMN_COUNT_MISMATCH ESCAPE ESCAPE_UNENCLOSED_FIELD EXCEPT EXCHANGE EXCLUDE EXECUTE EXECUTION EXPLAIN EXTENSION EXTERNAL EXTERNAL_STAGE FETCH FIELD_DELIMITER FIELD_OPTIONALLY_ENCLOSED_BY FILE FILES FILE_EXTENSION FILE_FORMAT FILTER FINAL FIRST FIRST_NAME FOR FORCE FOREIGN FORMAT FORMATS FORMAT_NAME FREQUENCY FUNCTION FUNCTIONS FUTURE GCP_PUBSUB GCP_PUBSUB_SUBSCRIPTION_NAME GCP_PUBSUB_TOPIC_NAME GCS GET GLOBAL GRANTED GRANTS GROUPING GZIP HANDLER HEADER HEADERS HEX HISTORY IDENTIFIER IDENTITY IF IGNORE IGNORE_UTF8_ERRORS IMMEDIATE IMMEDIATELY IMMUTABLE IMPORT IMPORTS IMPORTED INCLUDE_QUERY_ID INDEX INFORMATION INITIALLY INITIALLY_SUSPENDED INPUT INTEGRATION INTEGRATIONS INTERVAL ISSUE JAVA JAVASCRIPT JSON KEY KMS_KEY_ID LANGUAGE LARGE LAST LAST_NAME LAST_QUERY_ID LAST_TRANSACTION LET LIMIT LIST LISTING LOCAL LOCATION LOCKS LOGIN_NAME LS LZO M MANAGE MANAGED MASKING MASTER_KEY MATCH MATCHED MATCHES MATCH_BY_COLUMN_NAME MATERIALIZED MAXVALUE MAX_BATCH_ROWS MAX_CLUSTER_COUNT MAX_CONCURRENCY_LEVEL MAX_DATA_EXTENSION_TIME_IN_DAYS MAX_FILE_SIZE MAX_SIZE MEASURES MERGE MIDDLE_NAME MINS_TO_BYPASS_MFA MINS_TO_UNLOCK MINVALUE MIN_CLUSTER_COUNT ML MODEL MODIFIED_AFTER MODIFY MONITOR MONTHLY MUST_CHANGE_PASSWORD NAME NAN NETWORK NEXT NEXTVAL NEVER NO NOCACHE NOCYCLE NONE NOORDER NOTIFICATION NOTIFICATION_INTEGRATION NOTIFICATION_PROVIDER NOTIFY_USERS NULLS NULL_IF OBJECT OBJECTS OFFSET OMIT ONE ONLY ON_ERROR OPERATE OPTIMIZATION OPTION OPTIONS ORC ORGANIZATION OUTBOUND OUTER OVER OVERLAPS OVERRIDE OVERWRITE OWNER OWNERSHIP PACKAGES PARALLEL PARAMETERS PARQUET PARTITION PASSWORD PAST PATTERN FIELD_OPTIONALITY_ENCLOSED_BY PER PERCENT PERMUTE PIPE PIPES PIPE_EXECUTION_PAUSED PIVOT POLICIES POLICY PRECEDING PRECISION PREFIX PRESERVE_SPACE PRIMARY PRIOR PRIVILEGES PROCEDURE PROCEDURES PUBLIC PURGE PUT PYTHON QUERIES QUEUE RANGE RAW_DEFLATE READ RECLUSTER RECORD_DELIMITER RECURSIVE REFERENCES REFERENCE_USAGE REFRESH REFRESH_ON_CREATE REGIONS REMOVE RENAME REPEATABLE REPLACE REPLACE_INVALID_CHARACTERS REPLICATION REQUEST_TRANSLATOR RESET RESOURCE RESOURCE_MONITOR RESPECT RESPONSE_TRANSLATOR RESTRICT RESTRICTIONS RESULT RESULTSET RESUME RETURN RETURNS RETURN_ALL_ERRORS RETURN_ERRORS RETURN_FAILED_ONLY RM ROLE ROLES ROLLBACK ROLLUP ROUTINE ROUTINES ROW RSA_PUBLIC_KEY RSA_PUBLIC_KEY_2 RUNNING RUNTIME_VERSION S3 SCALA SCALING_POLICY SCHEDULE SCHEMA SCHEMAS SEARCH SECONDARY SECURE SECURITY SEED SEPARATOR SEQUENCE SEQUENCES SERVER SESSION SESSION_USER SETS SHARE SHARES SHARE_RESTRICTIONS SHOW SHOW_INITIAL_ROWS SINGLE SIZE_LIMIT SKIP SKIP_BLANK_LINES SKIP_BYTE_ORDER_MARK SKIP_FILE SKIP_HEADER SNAPPY SNAPPY_COMPRESSION SNOWFLAKE_FULL SNOWFLAKE_SSE SOURCE_COMPRESSION SQL STAGE STAGES STAGE_COPY_OPTIONS STAGE_FILE_FORMAT STANDARD STARTS START_TIMESTAMP STATEMENT STATEMENT_QUEUED_TIMEOUT_IN_SECONDS STATEMENT_TIMEOUT_IN_SECONDS STORAGE STORAGE_ALLOWED_LOCATIONS STORAGE_AWS_OBJECT_ACL STORAGE_AWS_ROLE_ARN STORAGE_BLOCKED_LOCATIONS STORAGE_INTEGRATION STORAGE_PROVIDER STREAM STREAMS STRIP_NULL_VALUES STRIP_OUTER_ARRAY STRIP_OUTER_ELEMENT SUBPATH SUPPORT SUSPEND SUSPEND_IMMEDIATE SUSPENDED SWAP SYSDATE SYSTEM TABLES TABLESPACE TABULAR TAG TARGET_PATH TASK TASKS TEMP TEMPLATE TEMPORARY TERSE TEXT TIME TIMESTAMP TIMESTAMP_FORMAT TIME_FORMAT TOP TRANSACTION TRANSACTIONS TRANSIENT TRIM_SPACE TRIGGERS TRUNCATE TRUNCATECOLUMNS TYPE UNBOUNDED UNDROP UNMATCHED UNSET UNSIGNED URL US USAGE USE USER USERS USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE USER_TASK_TIMEOUT_MS USE_ANY_ROLE UTF8 VALIDATE_UTF8 VALIDATION_MODE VALUE VARIABLES VARIANT VARYING VERSION VIEW VIEWS VOLATILE WAIT_FOR_COMPLETION WAREHOUSE WAREHOUSES WAREHOUSE_SIZE WAREHOUSE_TYPE WEEKLY WINDOW WITH WITHIN WITHOUT WORK WOY WRAPPER WRITE XML YEARLY ZONE ZSTD """ sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_soql.py000066400000000000000000000063371451700765000225770ustar00rootroot00000000000000"""The SOQL dialect. https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql.htm """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( BaseSegment, CodeSegment, LiteralSegment, OneOf, Ref, RegexLexer, Sequence, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi ansi_dialect = load_raw_dialect("ansi") soql_dialect = ansi_dialect.copy_as("soql") soql_dialect.insert_lexer_matchers( [ # Date and datetime literals as per: # https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_dateformats.htm RegexLexer( "datetime_literal", r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(Z|(\+|\-)[0-9]{2}:[0-9]{2})", # noqa E501 CodeSegment, ), RegexLexer( "date_literal", r"[0-9]{4}-[0-9]{2}-[0-9]{2}", CodeSegment, ), ], before="numeric_literal", ) date_literals = { "YESTERDAY", "TODAY", "TOMORROW", "LAST_WEEK", "THIS_WEEK", "NEXT_WEEK", "LAST_MONTH", "THIS_MONTH", "NEXT_MONTH", "LAST_90_DAYS", "NEXT_90_DAYS", "THIS_QUARTER", "LAST_QUARTER", "NEXT_QUARTER", "THIS_YEAR", "LAST_YEAR", "NEXT_YEAR", "THIS_FISCAL_QUARTER", "LAST_FISCAL_QUARTER", "NEXT_FISCAL_QUARTER", "THIS_FISCAL_YEAR", "LAST_FISCAL_YEAR", "NEXT_FISCAL_YEAR", } date_n_literals = { "LAST_N_DAYS", "NEXT_N_DAYS", "LAST_N_WEEKS", "NEXT_N_WEEKS", "LAST_N_MONTHS", "NEXT_N_MONTHS", "LAST_N_QUARTERS", "NEXT_N_QUARTERS", "LAST_N_YEARS", "NEXT_N_YEARS", "LAST_N_FISCAL_QUARTERS", "NEXT_N_FISCAL_QUARTERS", "LAST_N_FISCAL_YEARS", "NEXT_N_FISCAL_YEARS", } soql_dialect.sets("reserved_keywords").update(date_literals | date_n_literals) soql_dialect.sets("bare_functions").update(date_literals) class DateLiteralNSegment(BaseSegment): """A Date literal keyword that takes the :n integer suffix. https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_dateformats.htm """ type = "date_n_literal" match_grammar = Sequence( OneOf(*date_n_literals), Ref("ColonSegment"), Ref("NumericLiteralSegment"), allow_gaps=False, ) soql_dialect.replace( Expression_C_Grammar=ansi_dialect.get_grammar("Expression_C_Grammar").copy( insert=[ Ref("DateLiteralNSegment"), ] ), DateTimeLiteralGrammar=OneOf( TypedParser("date_literal", LiteralSegment, type="date_literal"), TypedParser("datetime_literal", LiteralSegment, type="datetime_literal"), Sequence( OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), TypedParser( "single_quote", LiteralSegment, type="date_constructor_literal" ), ), ), ) class StatementSegment(ansi.StatementSegment): """SOQL seems to only support SELECT statements. https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql.htm """ match_grammar = Ref("SelectableGrammar") sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_sparksql.py000066400000000000000000003061141451700765000234550ustar00rootroot00000000000000"""The ANSI Compliant SparkSQL dialect. Inherits from ANSI. Spark SQL ANSI Mode is more restrictive regarding keywords than the Default Mode, and still shares some syntax with hive. Based on: https://spark.apache.org/docs/latest/sql-ref.html https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html https://github.com/apache/spark/blob/master/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, Anything, BaseSegment, Bracketed, BracketedSegment, CodeSegment, CommentSegment, ComparisonOperatorSegment, Conditional, Dedent, Delimited, IdentifierSegment, Indent, KeywordSegment, LiteralSegment, Matchable, MultiStringParser, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, Sequence, StringLexer, StringParser, SymbolSegment, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects import dialect_hive as hive from sqlfluff.dialects.dialect_sparksql_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") hive_dialect = load_raw_dialect("hive") sparksql_dialect = ansi_dialect.copy_as("sparksql") sparksql_dialect.patch_lexer_matchers( [ # Spark SQL, only -- is used for single-line comment RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": "--"}, ), # == and <=> are valid equal operations # <=> is a non-null equals in Spark SQL # https://spark.apache.org/docs/latest/api/sql/index.html#_10 RegexLexer("equals", r"==|<=>|=", CodeSegment), # identifiers are delimited with ` # within a delimited identifier, ` is used to escape special characters, # including ` # Ex: select `delimited `` with escaped` from `just delimited` # https://spark.apache.org/docs/latest/sql-ref-identifier.html#delimited-identifier RegexLexer( "back_quote", r"`([^`]|``)*`", CodeSegment, ), # Numeric literal matches integers, decimals, and exponential formats. # https://spark.apache.org/docs/latest/sql-ref-literals.html#numeric-literal # Pattern breakdown: # (?> Atomic grouping # (https://www.regular-expressions.info/atomic.html). # 3 distinct groups here: # 1. Obvious fractional types # (can optionally be exponential). # 2. Integer followed by exponential. # These must be fractional types. # 3. Integer only. # These can either be integral or # fractional types. # # (?> 1. # \d+\.\d+ e.g. 123.456 # |\d+\. e.g. 123. # |\.\d+ e.g. .123 # ) # ([eE][+-]?\d+)? Optional exponential. # ([dDfF]|BD|bd)? Fractional data types. # |\d+[eE][+-]?\d+([dDfF]|BD|bd)? 2. Integer + exponential with # fractional data types. # |\d+([dDfFlLsSyY]|BD|bd)? 3. Integer only with integral or # fractional data types. # ) # ( # (?<=\.) If matched character ends with . # (e.g. 123.) then don't worry about # word boundary check. # |(?=\b) Check that we are at word boundary to # avoid matching valid naked identifiers # (e.g. 123column). # ) RegexLexer( "numeric_literal", ( r"(?>(?>\d+\.\d+|\d+\.|\.\d+)([eE][+-]?\d+)?([dDfF]|BD|bd)?" r"|\d+[eE][+-]?\d+([dDfF]|BD|bd)?" r"|\d+([dDfFlLsSyY]|BD|bd)?)" r"((?<=\.)|(?=\b))" ), CodeSegment, ), ] ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer( "bytes_single_quote", r"X'([^'\\]|\\.)*'", CodeSegment, ), RegexLexer( "bytes_double_quote", r'X"([^"\\]|\\.)*"', CodeSegment, ), ], before="single_quote", ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer( "at_sign_literal", r"@\w*", CodeSegment, ), ], before="word", ) sparksql_dialect.insert_lexer_matchers( [ RegexLexer( "file_literal", ( r"[a-zA-Z0-9]*:?([a-zA-Z0-9\-_\.]*(\/|\\)){2,}" r"((([a-zA-Z0-9\-_\.]*(:|\?|=|&)[a-zA-Z0-9\-_\.]*)+)" r"|([a-zA-Z0-9\-_\.]*\.[a-z]+))" ), CodeSegment, ), ], before="newline", ) # Set the bare functions sparksql_dialect.sets("bare_functions").clear() sparksql_dialect.sets("bare_functions").update( [ "CURRENT_DATE", "CURRENT_TIMESTAMP", "CURRENT_USER", ] ) # Set the datetime units sparksql_dialect.sets("datetime_units").clear() sparksql_dialect.sets("datetime_units").update( [ "YEAR", "YEARS", "YYYY", "YY", "QUARTER", "QUARTERS", "MONTH", "MONTHS", "MON", "MM", "WEEK", "WEEKS", "DAY", "DAYS", "DD", "HOUR", "HOURS", "MINUTE", "MINUTES", "SECOND", "SECONDS", "MILLISECOND", "MILLISECONDS", "MICROSECOND", "MICROSECONDS", ] ) # Set Keywords sparksql_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) sparksql_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) # Set Angle Bracket Pairs sparksql_dialect.bracket_sets("angle_bracket_pairs").update( [ ("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False), ] ) # Real Segments sparksql_dialect.replace( ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("EqualsSegment_a"), Ref("EqualsSegment_b"), Ref("GreaterThanSegment"), Ref("LessThanSegment"), Ref("GreaterThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment"), Ref("NotEqualToSegment"), Ref("LikeOperatorSegment"), Sequence("IS", "DISTINCT", "FROM"), Sequence("IS", "NOT", "DISTINCT", "FROM"), ), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), Sequence("CLUSTER", "BY"), Sequence("DISTRIBUTE", "BY"), Sequence("SORT", "BY"), "HAVING", "QUALIFY", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), "KEYS", ), TemporaryGrammar=Sequence( Sequence("GLOBAL", optional=True), OneOf("TEMP", "TEMPORARY"), ), QuotedLiteralSegment=OneOf( TypedParser("single_quote", LiteralSegment, type="quoted_literal"), TypedParser("double_quote", LiteralSegment, type="quoted_literal"), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar").copy( insert=[ Ref("BytesQuotedLiteralSegment"), ] ), NaturalJoinKeywordsGrammar=Sequence( "NATURAL", Ref("JoinTypeKeywords", optional=True), ), LikeGrammar=OneOf( # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-like.html # ilike: https://github.com/apache/spark/pull/33966/files Sequence( OneOf("LIKE", "ILIKE"), OneOf( "ALL", "ANY", # `SOME` is equivalent to `ANY` "SOME", optional=True, ), ), "RLIKE", "REGEXP", ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("SingleQuotedIdentifierSegment"), Ref("BackQuotedIdentifierSegment"), ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence( OneOf( "CLUSTER", "DISTRIBUTE", "GROUP", "ORDER", "SORT", ), "BY", ), Sequence("ORDER", "BY"), Sequence("DISTRIBUTE", "BY"), "HAVING", "QUALIFY", "WINDOW", "OVERLAPS", "APPLY", ), GroupByClauseTerminatorGrammar=OneOf( Sequence( OneOf( "ORDER", "DISTRIBUTE", "CLUSTER", "SORT", ), "BY", ), "LIMIT", "HAVING", "WINDOW", ), HavingClauseTerminatorGrammar=OneOf( Sequence( OneOf( "ORDER", "CLUSTER", "DISTRIBUTE", "SORT", ), "BY", ), "LIMIT", "QUALIFY", "WINDOW", ), ArithmeticBinaryOperatorGrammar=OneOf( Ref("PlusSegment"), Ref("MinusSegment"), Ref("DivideSegment"), Ref("MultiplySegment"), Ref("ModuloSegment"), Ref("BitwiseAndSegment"), Ref("BitwiseOrSegment"), Ref("BitwiseXorSegment"), Ref("BitwiseLShiftSegment"), Ref("BitwiseRShiftSegment"), Ref("DivBinaryOperatorSegment"), ), BinaryOperatorGrammar=OneOf( Ref("ArithmeticBinaryOperatorGrammar"), Ref("StringBinaryOperatorGrammar"), Ref("BooleanBinaryOperatorGrammar"), Ref("ComparisonOperatorGrammar"), # Add arrow operators for lambdas (e.g. aggregate) Ref("RightArrowOperator"), ), AccessorGrammar=AnyNumberOf( Ref("ArrayAccessorSegment"), # Add in semi structured expressions Ref("SemiStructuredAccessorSegment"), ), ObjectReferenceTerminatorGrammar=OneOf( "ON", "AS", "USING", Ref("CommaSegment"), Ref("CastOperatorSegment"), Ref("StartSquareBracketSegment"), Ref("StartBracketSegment"), Ref("BinaryOperatorGrammar"), Ref("DelimiterGrammar"), Ref("JoinLikeClauseGrammar"), BracketedSegment, ), FunctionContentsExpressionGrammar=OneOf( Ref("ExpressionSegment"), Ref("StarSegment"), ), ) sparksql_dialect.add( FileLiteralSegment=TypedParser("file_literal", LiteralSegment, type="file_literal"), BackQuotedIdentifierSegment=TypedParser( "back_quote", IdentifierSegment, type="quoted_identifier", trim_chars=("`",), ), NakedSemiStructuredElementSegment=RegexParser( r"[A-Z0-9_]*", CodeSegment, type="semi_structured_element", ), QuotedSemiStructuredElementSegment=TypedParser( "single_quote", CodeSegment, type="semi_structured_element", ), RightArrowOperator=StringParser("->", SymbolSegment, type="binary_operator"), BinaryfileKeywordSegment=StringParser( "BINARYFILE", KeywordSegment, type="file_format", ), JsonfileKeywordSegment=StringParser( "JSONFILE", KeywordSegment, type="file_format", ), RcfileKeywordSegment=StringParser("RCFILE", KeywordSegment, type="file_format"), SequencefileKeywordSegment=StringParser( "SEQUENCEFILE", KeywordSegment, type="file_format" ), TextfileKeywordSegment=StringParser("TEXTFILE", KeywordSegment, type="file_format"), StartAngleBracketSegment=StringParser( "<", SymbolSegment, type="start_angle_bracket" ), EndAngleBracketSegment=StringParser(">", SymbolSegment, type="end_angle_bracket"), EqualsSegment_a=StringParser("==", ComparisonOperatorSegment), EqualsSegment_b=StringParser("<=>", ComparisonOperatorSegment), FileKeywordSegment=MultiStringParser( ["FILE", "FILES"], KeywordSegment, type="file_keyword" ), JarKeywordSegment=MultiStringParser( ["JAR", "JARS"], KeywordSegment, type="file_keyword" ), NoscanKeywordSegment=StringParser("NOSCAN", KeywordSegment, type="keyword"), WhlKeywordSegment=StringParser("WHL", KeywordSegment, type="file_keyword"), # Add relevant Hive Grammar CommentGrammar=hive_dialect.get_grammar("CommentGrammar"), LocationGrammar=hive_dialect.get_grammar("LocationGrammar"), SerdePropertiesGrammar=hive_dialect.get_grammar("SerdePropertiesGrammar"), StoredAsGrammar=hive_dialect.get_grammar("StoredAsGrammar"), StoredByGrammar=hive_dialect.get_grammar("StoredByGrammar"), StorageFormatGrammar=hive_dialect.get_grammar("StorageFormatGrammar"), TerminatedByGrammar=hive_dialect.get_grammar("TerminatedByGrammar"), # Add Spark Grammar PropertyGrammar=Sequence( Ref("PropertyNameSegment"), Ref("EqualsSegment", optional=True), OneOf( Ref("LiteralGrammar"), Ref("SingleIdentifierGrammar"), ), ), PropertyNameListGrammar=Delimited(Ref("PropertyNameSegment")), BracketedPropertyNameListGrammar=Bracketed(Ref("PropertyNameListGrammar")), PropertyListGrammar=Delimited(Ref("PropertyGrammar")), BracketedPropertyListGrammar=Bracketed(Ref("PropertyListGrammar")), OptionsGrammar=Sequence("OPTIONS", Ref("BracketedPropertyListGrammar")), BucketSpecGrammar=Sequence( Ref("ClusteredBySpecGrammar"), Ref("SortedBySpecGrammar", optional=True), "INTO", Ref("NumericLiteralSegment"), "BUCKETS", ), ClusteredBySpecGrammar=Sequence( "CLUSTERED", "BY", Ref("BracketedColumnReferenceListGrammar"), ), DatabasePropertiesGrammar=Sequence( "DBPROPERTIES", Ref("BracketedPropertyListGrammar") ), DataSourcesV2FileTypeGrammar=OneOf( # https://github.com/apache/spark/tree/master/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2 # noqa: E501 # Separated here because these allow for additional # commands such as Select From File # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html # Spark Core Data Sources # https://spark.apache.org/docs/latest/sql-data-sources.html "AVRO", "CSV", "JSON", "PARQUET", "ORC", # Separated here because these allow for additional commands # Similar to DataSourcesV2 "DELTA", # https://github.com/delta-io/delta "CSV", "ICEBERG", "TEXT", "BINARYFILE", ), FileFormatGrammar=OneOf( Ref("DataSourcesV2FileTypeGrammar"), "SEQUENCEFILE", "TEXTFILE", "RCFILE", "JSONFILE", Sequence( "INPUTFORMAT", Ref("QuotedLiteralSegment"), "OUTPUTFORMAT", Ref("QuotedLiteralSegment"), ), ), TimestampAsOfGrammar=Sequence( "TIMESTAMP", "AS", "OF", OneOf( Ref("QuotedLiteralSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), ), ), VersionAsOfGrammar=Sequence( "VERSION", "AS", "OF", Ref("NumericLiteralSegment"), ), # Adding Hint related segments so they are not treated as generic comments # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html StartHintSegment=StringParser("/*+", SymbolSegment, type="start_hint"), EndHintSegment=StringParser("*/", SymbolSegment, type="end_hint"), PartitionSpecGrammar=Sequence( OneOf( "PARTITION", Sequence("PARTITIONED", "BY"), ), Bracketed( Delimited( OneOf( Ref("ColumnDefinitionSegment"), Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment", optional=True), Ref("LiteralGrammar", optional=True), Ref("CommentGrammar", optional=True), ), Ref("IcebergTransformationSegment", optional=True), ), ), ), ), PartitionFieldGrammar=Sequence( "PARTITION", "FIELD", Delimited( OneOf( Ref("ColumnDefinitionSegment"), Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment", optional=True), Ref("LiteralGrammar", optional=True), Ref("CommentGrammar", optional=True), ), Ref("IcebergTransformationSegment", optional=True), ), ), Sequence( Ref.keyword("WITH", optional=True), Delimited( OneOf( Ref("ColumnDefinitionSegment"), Sequence( Ref("ColumnReferenceSegment"), Ref("EqualsSegment", optional=True), Ref("LiteralGrammar", optional=True), Ref("CommentGrammar", optional=True), ), Ref("IcebergTransformationSegment", optional=True), ), ), optional=True, ), Sequence("AS", Ref("NakedIdentifierSegment"), optional=True), ), # NB: Redefined from `NakedIdentifierSegment` which uses an anti-template to # not match keywords; however, SparkSQL allows keywords to be used in table # and runtime properties. PropertiesNakedIdentifierSegment=RegexParser( r"[A-Z0-9]*[A-Z][A-Z0-9]*", IdentifierSegment, type="properties_naked_identifier", ), ResourceFileGrammar=OneOf( Ref("JarKeywordSegment"), Ref("WhlKeywordSegment"), Ref("FileKeywordSegment"), ), ResourceLocationGrammar=Sequence( "USING", Ref("ResourceFileGrammar"), Ref("QuotedLiteralSegment"), ), SortedBySpecGrammar=Sequence( "SORTED", "BY", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), ) ) ), optional=True, ), UnsetTablePropertiesGrammar=Sequence( "UNSET", "TBLPROPERTIES", Ref("IfExistsGrammar", optional=True), Ref("BracketedPropertyNameListGrammar"), ), TablePropertiesGrammar=Sequence( "TBLPROPERTIES", Ref("BracketedPropertyListGrammar") ), BytesQuotedLiteralSegment=OneOf( TypedParser( "bytes_single_quote", LiteralSegment, type="bytes_quoted_literal", ), TypedParser( "bytes_double_quote", LiteralSegment, type="bytes_quoted_literal", ), ), JoinTypeKeywords=OneOf( "CROSS", "INNER", Sequence( OneOf( "FULL", "LEFT", "RIGHT", ), Ref.keyword("OUTER", optional=True), ), Sequence( Ref.keyword("LEFT", optional=True), "SEMI", ), Sequence( Ref.keyword("LEFT", optional=True), "ANTI", ), ), AtSignLiteralSegment=TypedParser( "at_sign_literal", LiteralSegment, type="at_sign_literal", trim_chars=("@",), ), # This is the same as QuotedLiteralSegment but # is given a different `name` to stop LT01 flagging # TODO: Work out how the LT01 change influence this. SignedQuotedLiteralSegment=OneOf( TypedParser( "single_quote", LiteralSegment, type="signed_quoted_literal", ), TypedParser( "double_quote", LiteralSegment, type="signed_quoted_literal", ), ), # Delta Live Tables CREATE TABLE and VIEW statements OrRefreshGrammar=Sequence("OR", "REFRESH"), # Databricks widget WidgetNameIdentifierSegment=RegexParser( r"[A-Z][A-Z0-9_]*", CodeSegment, type="widget_name_identifier", ), WidgetDefaultGrammar=Sequence( "DEFAULT", Ref("QuotedLiteralSegment"), ), TableDefinitionSegment=Sequence( OneOf(Ref("OrReplaceGrammar"), Ref("OrRefreshGrammar"), optional=True), Ref("TemporaryGrammar", optional=True), Ref.keyword("EXTERNAL", optional=True), Ref.keyword("STREAMING", optional=True), Ref.keyword("LIVE", optional=True), "TABLE", Ref("IfNotExistsGrammar", optional=True), OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), OneOf( # Columns and comment syntax: Bracketed( Delimited( Sequence( OneOf( Ref("ColumnDefinitionSegment"), Ref("GeneratedColumnDefinitionSegment"), ), Ref("CommentGrammar", optional=True), ), ), ), # Like Syntax Sequence( "LIKE", OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), ), optional=True, ), Ref("UsingClauseSegment", optional=True), AnySetOf( Ref("RowFormatClauseSegment"), Ref("StoredAsGrammar"), Ref("CommentGrammar"), Ref("OptionsGrammar"), Ref("PartitionSpecGrammar"), Ref("BucketSpecGrammar"), optional=True, ), Indent, AnyNumberOf( Ref("LocationGrammar", optional=True), Ref("CommentGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), ), Dedent, # Create AS syntax: Sequence( Ref.keyword("AS", optional=True), OptionallyBracketed(Ref("SelectableGrammar")), optional=True, ), ), ) # Adding Hint related grammar before comment `block_comment` and # `single_quote` so they are applied before comment lexer so # hints are treated as such instead of comments when parsing. # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html sparksql_dialect.insert_lexer_matchers( [ StringLexer("start_hint", "/*+", CodeSegment), ], before="block_comment", ) sparksql_dialect.insert_lexer_matchers( [ StringLexer("end_hint", "*/", CodeSegment), ], before="single_quote", ) sparksql_dialect.insert_lexer_matchers( # Lambda expressions: # https://github.com/apache/spark/blob/b4c019627b676edf850c00bb070377896b66fad2/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseLexer.g4#L396 # https://github.com/apache/spark/blob/b4c019627b676edf850c00bb070377896b66fad2/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBaseParser.g4#L837-L838 [ StringLexer("right_arrow", "->", CodeSegment), ], before="like_operator", ) class SQLConfPropertiesSegment(BaseSegment): """A SQL Config Option.""" type = "sql_conf_option" match_grammar = Sequence( StringParser("-", SymbolSegment, type="dash"), StringParser("v", SymbolSegment, type="sql_conf_option"), allow_gaps=False, ) class DivBinaryOperatorSegment(BaseSegment): """DIV type binary_operator.""" type = "binary_operator" match_grammar = Ref.keyword("DIV") class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`.""" type = "qualify_clause" match_grammar = Sequence( "QUALIFY", Indent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) # Hive Segments class RowFormatClauseSegment(hive.RowFormatClauseSegment): """`ROW FORMAT` clause in a CREATE HIVEFORMAT TABLE statement.""" pass class SkewedByClauseSegment(hive.SkewedByClauseSegment): """`SKEWED BY` clause in a CREATE HIVEFORMAT TABLE statement.""" pass # Primitive Data Types class PrimitiveTypeSegment(BaseSegment): """Spark SQL Primitive data types. https://spark.apache.org/docs/latest/sql-ref-datatypes.html """ type = "primitive_type" match_grammar = OneOf( "BOOLEAN", # TODO : not currently supported; add segment - see NumericLiteralSegment # "BYTE", "TINYINT", # TODO : not currently supported; add segment - see NumericLiteralSegment # "SHORT", "LONG", "SMALLINT", "INT", "INTEGER", "BIGINT", "FLOAT", "REAL", "DOUBLE", "DATE", "TIMESTAMP", "STRING", Sequence( OneOf("CHAR", "CHARACTER", "VARCHAR", "DECIMAL", "DEC", "NUMERIC"), Ref("BracketedArguments", optional=True), ), "BINARY", "INTERVAL", ) class ArrayTypeSegment(hive.ArrayTypeSegment): """ARRAY type as per hive.""" pass class StructTypeSegment(hive.StructTypeSegment): """STRUCT type as per hive.""" pass class StructTypeSchemaSegment(hive.StructTypeSchemaSegment): """STRUCT type schema as per hive.""" pass class SemiStructuredAccessorSegment(BaseSegment): """A semi-structured data accessor segment. https://docs.databricks.com/en/sql/language-manual/functions/colonsign.html """ type = "semi_structured_expression" match_grammar = Sequence( Ref("ColonSegment"), OneOf( Ref("NakedSemiStructuredElementSegment"), Bracketed(Ref("QuotedSemiStructuredElementSegment"), bracket_type="square"), ), Ref("ArrayAccessorSegment", optional=True), AnyNumberOf( Sequence( OneOf( # Can be delimited by dots or colons Ref("DotSegment"), Ref("ColonSegment"), ), OneOf( Ref("NakedSemiStructuredElementSegment"), Bracketed( Ref("QuotedSemiStructuredElementSegment"), bracket_type="square" ), ), allow_gaps=True, ), Ref("ArrayAccessorSegment", optional=True), allow_gaps=True, ), allow_gaps=True, ) class DatatypeSegment(BaseSegment): """Spark SQL Data types. https://spark.apache.org/docs/latest/sql-ref-datatypes.html """ type = "data_type" match_grammar = OneOf( Ref("PrimitiveTypeSegment"), Ref("ArrayTypeSegment"), Sequence( "MAP", Bracketed( Sequence( Ref("DatatypeSegment"), Ref("CommaSegment"), Ref("DatatypeSegment"), ), bracket_pairs_set="angle_bracket_pairs", bracket_type="angle", ), ), Ref("StructTypeSegment"), ) # Data Definition Statements # http://spark.apache.org/docs/latest/sql-ref-syntax-ddl.html class AlterDatabaseStatementSegment(BaseSegment): """An `ALTER DATABASE/SCHEMA` statement. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-database.html """ type = "alter_database_statement" match_grammar = Sequence( "ALTER", OneOf("DATABASE", "SCHEMA"), Ref("DatabaseReferenceSegment"), "SET", Ref("DatabasePropertiesGrammar"), ) class AlterTableStatementSegment(ansi.AlterTableStatementSegment): """A `ALTER TABLE` statement to change the table schema or properties. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-table.html https://docs.delta.io/latest/delta-constraints.html#constraints """ type = "alter_table_statement" match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Indent, OneOf( # ALTER TABLE - RENAME TO `table_identifier` Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), # ALTER TABLE - RENAME `partition_spec` Sequence( Ref("PartitionSpecGrammar"), "RENAME", "TO", Ref("PartitionSpecGrammar"), ), # ALTER TABLE - RENAME TO 'column_identifier' Sequence( "RENAME", "COLUMN", Ref("ColumnReferenceSegment"), "TO", Ref("ColumnReferenceSegment"), ), # ALTER TABLE - ADD COLUMNS Sequence( "ADD", OneOf("COLUMNS", "COLUMN"), Indent, OptionallyBracketed( Delimited( Sequence( Ref("ColumnFieldDefinitionSegment"), OneOf( "FIRST", Sequence( "AFTER", Ref("ColumnReferenceSegment"), ), optional=True, ), ), ), ), Dedent, ), # ALTER TABLE - ALTER OR CHANGE COLUMN Sequence( OneOf("ALTER", "CHANGE"), Ref.keyword("COLUMN", optional=True), Indent, AnyNumberOf( Ref( "ColumnReferenceSegment", exclude=OneOf( "COMMENT", "TYPE", Ref("DatatypeSegment"), "FIRST", "AFTER", "SET", "DROP", ), ), max_times=2, ), Ref.keyword("TYPE", optional=True), Ref("DatatypeSegment", optional=True), Ref("CommentGrammar", optional=True), OneOf( "FIRST", Sequence( "AFTER", Ref("ColumnReferenceSegment"), ), optional=True, ), Sequence(OneOf("SET", "DROP"), "NOT", "NULL", optional=True), Dedent, ), # ALTER TABLE - REPLACE COLUMNS Sequence( "REPLACE", "COLUMNS", Bracketed( Delimited( Sequence( Ref("ColumnDefinitionSegment"), Ref("CommentGrammar", optional=True), ), ), ), ), # ALTER TABLE - DROP COLUMN # https://docs.delta.io/2.0.0/delta-batch.html#drop-columns Sequence( "DROP", OneOf( Sequence( "COLUMN", Ref("ColumnReferenceSegment"), ), Sequence( "COLUMNS", Bracketed( Delimited(AnyNumberOf(Ref("ColumnReferenceSegment"))), ), ), ), ), # ALTER TABLE - ADD PARTITION Sequence( "ADD", Ref("IfNotExistsGrammar", optional=True), AnyNumberOf( Ref("PartitionSpecGrammar"), Ref("PartitionFieldGrammar"), min_times=1, ), ), # ALTER TABLE - DROP PARTITION Sequence( "DROP", Ref("IfExistsGrammar", optional=True), OneOf( Ref("PartitionSpecGrammar"), Ref("PartitionFieldGrammar"), ), Sequence("PURGE", optional=True), ), Sequence( "Replace", Ref("PartitionFieldGrammar"), ), # ALTER TABLE - REPAIR PARTITION Sequence("RECOVER", "PARTITIONS"), # ALTER TABLE - SET PROPERTIES Sequence("SET", Ref("TablePropertiesGrammar")), # ALTER TABLE - UNSET PROPERTIES Ref("UnsetTablePropertiesGrammar"), # ALTER TABLE - SET SERDE Sequence( Ref("PartitionSpecGrammar", optional=True), "SET", OneOf( Sequence( "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar"), ), Sequence( "SERDE", Ref("QuotedLiteralSegment"), Ref("SerdePropertiesGrammar", optional=True), ), ), ), # ALTER TABLE - SET FILE FORMAT Sequence( Ref("PartitionSpecGrammar", optional=True), "SET", "FILEFORMAT", Ref("DataSourceFormatSegment"), ), # ALTER TABLE - CHANGE FILE LOCATION Sequence( Ref("PartitionSpecGrammar", optional=True), "SET", Ref("LocationGrammar"), ), # ALTER TABLE - ADD/DROP CONSTRAINTS (DELTA) Sequence( Indent, OneOf("ADD", "DROP"), "CONSTRAINT", Ref( "ColumnReferenceSegment", exclude=Ref.keyword("CHECK"), ), Ref.keyword("CHECK", optional=True), Bracketed(Ref("ExpressionSegment"), optional=True), Dedent, ), # ALTER TABLE - ICEBERG WRITE ORDER / DISTRIBUTION # https://iceberg.apache.org/docs/latest/spark-ddl/#alter-table--write-ordered-by Sequence( "WRITE", AnyNumberOf( Sequence("DISTRIBUTED", "BY", "PARTITION", optional=True), Sequence( Ref.keyword("LOCALLY", optional=True), "ORDERED", "BY", Indent, Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), # NB: This isn't really ANSI, and isn't supported # in Mysql,but is supported in enough other dialects # for it to make sense here for now. Sequence( "NULLS", OneOf("FIRST", "LAST"), optional=True ), ), optional=True, ), Dedent, optional=True, ), min_times=1, max_times_per_element=1, ), ), # ALTER TABLE - ICEBERG SET IDENTIFIER FIELDS Sequence( "SET", "IDENTIFIER", "FIELDS", Indent, Delimited( Sequence( Ref("ColumnReferenceSegment"), ), ), Dedent, ), # ALTER TABLE - ICEBERG DROP IDENTIFIER FIELDS Sequence( "DROP", "IDENTIFIER", "FIELDS", Indent, Delimited( Sequence( Ref("ColumnReferenceSegment"), ), ), Dedent, ), ), Dedent, ) class ColumnFieldDefinitionSegment(ansi.ColumnDefinitionSegment): """A column field definition, e.g. for CREATE TABLE or ALTER TABLE. This supports the iceberg syntax and allows for iceberg syntax such as ADD COLUMN a.b. """ match_grammar: Matchable = Sequence( Ref("ColumnReferenceSegment"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class AlterViewStatementSegment(BaseSegment): """A `ALTER VIEW` statement to change the view schema or properties. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-view.html """ type = "alter_view_statement" match_grammar = Sequence( "ALTER", "VIEW", Ref("TableReferenceSegment"), OneOf( Sequence( "RENAME", "TO", Ref("TableReferenceSegment"), ), Sequence("SET", Ref("TablePropertiesGrammar")), Ref("UnsetTablePropertiesGrammar"), Sequence( "AS", OptionallyBracketed(Ref("SelectStatementSegment")), ), ), ) class CreateDatabaseStatementSegment(ansi.CreateDatabaseStatementSegment): """A `CREATE DATABASE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-database.html """ match_grammar = Sequence( "CREATE", OneOf("DATABASE", "SCHEMA"), Ref("IfNotExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("CommentGrammar", optional=True), Ref("LocationGrammar", optional=True), Sequence( "WITH", "DBPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True ), ) class CreateFunctionStatementSegment(ansi.CreateFunctionStatementSegment): """A `CREATE FUNCTION` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-function.html """ match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfNotExistsGrammar", optional=True), Ref("FunctionNameIdentifierSegment"), "AS", Ref("QuotedLiteralSegment"), Ref("ResourceLocationGrammar", optional=True), ) class CreateTableStatementSegment(ansi.CreateTableStatementSegment): """A `CREATE TABLE` statement using a Data Source or Like. http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-datasource.html https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-like.html https://docs.delta.io/latest/delta-batch.html#create-a-table """ match_grammar = Sequence("CREATE", Ref("TableDefinitionSegment")) class CreateHiveFormatTableStatementSegment(hive.CreateTableStatementSegment): """A `CREATE TABLE` statement using Hive format. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-hiveformat.html """ pass class CreateViewStatementSegment(ansi.CreateViewStatementSegment): """A `CREATE VIEW` statement. https://spark.apache.org/docs/3.0.0/sql-ref-syntax-ddl-create-view.html#syntax """ match_grammar = Sequence( "CREATE", OneOf(Ref("OrReplaceGrammar"), Ref("OrRefreshGrammar"), optional=True), Ref("TemporaryGrammar", optional=True), Ref.keyword("STREAMING", optional=True), Ref.keyword("LIVE", optional=True), "VIEW", Ref("IfNotExistsGrammar", optional=True), Ref("TableReferenceSegment"), # Columns and comment syntax: Sequence( Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("CommentGrammar", optional=True), ), ), ), optional=True, ), Sequence("USING", Ref("DataSourceFormatSegment"), optional=True), Ref("OptionsGrammar", optional=True), Ref("CommentGrammar", optional=True), Ref("TablePropertiesGrammar", optional=True), Sequence("AS", OptionallyBracketed(Ref("SelectableGrammar")), optional=True), Ref("WithNoSchemaBindingClauseSegment", optional=True), ) class CreateWidgetStatementSegment(BaseSegment): """A `CREATE WIDGET` STATEMENT. https://docs.databricks.com/notebooks/widgets.html#databricks-widget-api """ type = "create_widget_statement" match_grammar = Sequence( "CREATE", "WIDGET", OneOf( Sequence( "DROPDOWN", Ref("WidgetNameIdentifierSegment"), Ref("WidgetDefaultGrammar"), Sequence("CHOICES", Ref("SelectStatementSegment")), ), Sequence( "TEXT", Ref("WidgetNameIdentifierSegment"), Ref("WidgetDefaultGrammar") ), ), ) class ReplaceTableStatementSegment(BaseSegment): """A `REPLACE TABLE` statement using the iceberg table format. https://iceberg.apache.org/docs/latest/spark-ddl/#replace-table--as-select """ type = "replace_table_statement" match_grammar = Sequence("REPLACE", Ref("TableDefinitionSegment")) class RemoveWidgetStatementSegment(BaseSegment): """A `REMOVE WIDGET` STATEMENT. https://docs.databricks.com/notebooks/widgets.html#databricks-widget-api """ type = "remove_widget_statement" match_grammar = Sequence( "REMOVE", "WIDGET", Ref("WidgetNameIdentifierSegment"), ) class DropDatabaseStatementSegment(ansi.DropDatabaseStatementSegment): """A `DROP DATABASE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-database.html """ type = "drop_database_statement" match_grammar: Matchable = Sequence( "DROP", OneOf("DATABASE", "SCHEMA"), Ref("IfExistsGrammar", optional=True), Ref("DatabaseReferenceSegment"), Ref("DropBehaviorGrammar", optional=True), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` STATEMENT. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-function.html """ type = "drop_function_statement" match_grammar = Sequence( "DROP", Ref("TemporaryGrammar", optional=True), "FUNCTION", Ref("IfExistsGrammar", optional=True), Ref("FunctionNameSegment"), ) class MsckRepairTableStatementSegment(hive.MsckRepairTableStatementSegment): """A `REPAIR TABLE` statement using Hive MSCK (Metastore Check) format. This class inherits from Hive since Spark leverages Hive format for this command and is dependent on the Hive metastore. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-repair-table.html """ pass class TruncateStatementSegment(ansi.TruncateStatementSegment): """A `TRUNCATE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-truncate-table.html """ match_grammar = Sequence( "TRUNCATE", "TABLE", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ) class UseDatabaseStatementSegment(BaseSegment): """A `USE DATABASE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-usedb.html """ type = "use_database_statement" match_grammar = Sequence( "USE", Ref("DatabaseReferenceSegment"), ) # Data Manipulation Statements class InsertStatementSegment(BaseSegment): """A `INSERT [TABLE]` statement to insert or overwrite new rows into a table. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-into.html https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-table.html """ type = "insert_statement" match_grammar = Sequence( "INSERT", OneOf("INTO", "OVERWRITE"), Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ), Sequence( "FROM", Ref("TableReferenceSegment"), "SELECT", Delimited( Ref("ColumnReferenceSegment"), ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), ), ) class InsertOverwriteDirectorySegment(BaseSegment): """An `INSERT OVERWRITE [LOCAL] DIRECTORY` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-directory.html """ type = "insert_overwrite_directory_statement" match_grammar = Sequence( "INSERT", "OVERWRITE", Ref.keyword("LOCAL", optional=True), "DIRECTORY", Ref("QuotedLiteralSegment", optional=True), "USING", Ref("DataSourceFormatSegment"), Ref("OptionsGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), ), ) class InsertOverwriteDirectoryHiveFmtSegment(BaseSegment): """An `INSERT OVERWRITE [LOCAL] DIRECTORY` statement in Hive format. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-insert-overwrite-directory-hive.html """ type = "insert_overwrite_directory_hive_fmt_statement" match_grammar = Sequence( "INSERT", "OVERWRITE", Ref.keyword("LOCAL", optional=True), "DIRECTORY", Ref("QuotedLiteralSegment"), Ref("RowFormatClauseSegment", optional=True), Ref("StoredAsGrammar", optional=True), OneOf( AnyNumberOf( Ref("ValuesClauseSegment"), min_times=1, ), Ref("SelectableGrammar"), ), ) class LoadDataSegment(BaseSegment): """A `LOAD DATA` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-dml-load.html """ type = "load_data_statement" match_grammar = Sequence( "LOAD", "DATA", Ref.keyword("LOCAL", optional=True), "INPATH", Ref("QuotedLiteralSegment"), Ref.keyword("OVERWRITE", optional=True), "INTO", "TABLE", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ) # Data Retrieval Statements class ClusterByClauseSegment(BaseSegment): """A `CLUSTER BY` clause from `SELECT` statement. Equivalent to `DISTRIBUTE BY` and `SORT BY` in tandem. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-clusterby.html """ type = "cluster_by_clause" match_grammar = Sequence( "CLUSTER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `CLUSTER BY 1` Ref("NumericLiteralSegment"), # Can cluster by an expression Ref("ExpressionSegment"), ), ), terminators=[ "LIMIT", "HAVING", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ], ), Dedent, ) class DistributeByClauseSegment(BaseSegment): """A `DISTRIBUTE BY` clause from `SELECT` statement. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-distribute-by.html """ type = "distribute_by_clause" match_grammar = Sequence( "DISTRIBUTE", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `DISTRIBUTE BY 1` Ref("NumericLiteralSegment"), # Can distribute by an expression Ref("ExpressionSegment"), ), ), terminators=[ "SORT", "LIMIT", "HAVING", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ], ), Dedent, ) class HintFunctionSegment(BaseSegment): """A Function within a SparkSQL Hint. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ type = "hint_function" match_grammar = Sequence( Ref("FunctionNameSegment"), Bracketed( Delimited( AnyNumberOf( Ref("SingleIdentifierGrammar"), Ref("NumericLiteralSegment"), Ref("TableReferenceSegment"), Ref("ColumnReferenceSegment"), min_times=1, ), ), # May be Bare Function unique to Hints, i.e. REBALANCE optional=True, ), ) class SelectHintSegment(BaseSegment): """Spark Select Hints. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ type = "select_hint" match_grammar = Sequence( Sequence( Ref("StartHintSegment"), Delimited( AnyNumberOf( Ref("HintFunctionSegment"), # At least function should be supplied min_times=1, ), terminators=[Ref("EndHintSegment")], ), Ref("EndHintSegment"), ), ) class LimitClauseSegment(ansi.LimitClauseSegment): """A `LIMIT` clause like in `SELECT`. Enhanced from ANSI dialect. :: Spark does not allow explicit or implicit `OFFSET` (implicit being 1000, 20 for example) :: Spark allows an `ALL` quantifier or a function expression as an input to `LIMIT` https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-limit.html """ match_grammar = Sequence( "LIMIT", Indent, OneOf( Ref("NumericLiteralSegment"), "ALL", Ref("FunctionSegment"), ), Dedent, ) class SetOperatorSegment(ansi.SetOperatorSegment): """A set operator such as Union, Minus, Except or Intersect. Enhanced from ANSI dialect. :: Spark allows the `ALL` keyword to follow Except and Minus. :: Distinct allows the `DISTINCT` and `ALL` keywords. # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-setops.html """ match_grammar = OneOf( Sequence( OneOf("EXCEPT", "MINUS"), Ref.keyword("ALL", optional=True), ), Sequence( OneOf("UNION", "INTERSECT"), OneOf("DISTINCT", "ALL", optional=True), ), ) class SelectClauseModifierSegment(ansi.SelectClauseModifierSegment): """Things that come after SELECT but before the columns. Enhance `SelectClauseModifierSegment` from Ansi to allow SparkSQL Hints https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-hints.html """ match_grammar = Sequence( # TODO New Rule warning of Join Hints priority if multiple specified # When different join strategy hints are specified on # both sides of a join, Spark prioritizes the BROADCAST # hint over the MERGE hint over the SHUFFLE_HASH hint # over the SHUFFLE_REPLICATE_NL hint. # # Spark will issue Warning in the following example: # # SELECT # /*+ BROADCAST(t1), MERGE(t1, t2) */ # t1.a, # t1.b, # t2.c # FROM t1 INNER JOIN t2 ON t1.key = t2.key; # # Hints should be listed in order of priority in Select Ref("SelectHintSegment", optional=True), OneOf("DISTINCT", "ALL", optional=True), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """Enhance unordered `SELECT` statement for valid SparkSQL clauses. This is designed for use in the context of set operations, for other use cases, we should use the main SelectStatementSegment. """ match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], # Removing non-valid clauses that exist in ANSI dialect remove=[Ref("OverlapsClauseSegment", optional=True)], ) class SelectStatementSegment(ansi.SelectStatementSegment): """Enhance `SELECT` statement for valid SparkSQL clauses.""" match_grammar = ansi.SelectStatementSegment.match_grammar.copy( # TODO New Rule: Warn of mutual exclusion of following clauses # DISTRIBUTE, SORT, CLUSTER and ORDER BY if multiple specified insert=[ Ref("ClusterByClauseSegment", optional=True), Ref("DistributeByClauseSegment", optional=True), Ref("SortByClauseSegment", optional=True), ], before=Ref("LimitClauseSegment", optional=True), ).copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), ) class GroupByClauseSegment(ansi.GroupByClauseSegment): """Enhance `GROUP BY` clause like in `SELECT` for 'CUBE' and 'ROLLUP`. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-groupby.html """ match_grammar = Sequence( "GROUP", "BY", Indent, OneOf( Delimited( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("CubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment"), Ref("ExpressionSegment"), ), Sequence( Delimited( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), OneOf( Ref("WithCubeRollupClauseSegment"), Ref("GroupingSetsClauseSegment") ), ), ), Dedent, ) class WithCubeRollupClauseSegment(BaseSegment): """A `[WITH CUBE | WITH ROLLUP]` clause after the `GROUP BY` clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-groupby.html """ type = "with_cube_rollup_clause" match_grammar = Sequence( "WITH", OneOf("CUBE", "ROLLUP"), ) class SortByClauseSegment(BaseSegment): """A `SORT BY` clause like in `SELECT`. This clause is mutually exclusive with SORT BY, ORDER BY and DISTRIBUTE BY. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-sortby.html """ type = "sort_by_clause" match_grammar = Sequence( "SORT", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), # NB: This isn't really ANSI, and isn't supported in Mysql, # but is supported in enough other dialects for it to make # sense here for now. Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True), ), terminators=[ "LIMIT", "HAVING", "QUALIFY", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "SEPARATOR", ], ), Dedent, ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """A `TABLESAMPLE` clause following a table identifier. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-sampling.html """ match_grammar = Sequence( "TABLESAMPLE", OneOf( Bracketed( Ref("NumericLiteralSegment"), OneOf( "PERCENT", "ROWS", ), ), Bracketed( "BUCKET", Ref("NumericLiteralSegment"), "OUT", "OF", Ref("NumericLiteralSegment"), ), ), ) class LateralViewClauseSegment(BaseSegment): """A `LATERAL VIEW` like in a `FROM` clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-lateral-view.html """ type = "lateral_view_clause" match_grammar = Sequence( Indent, "LATERAL", "VIEW", Ref.keyword("OUTER", optional=True), Ref("FunctionSegment"), OneOf( Sequence( Ref("SingleIdentifierGrammar"), Sequence( Ref.keyword("AS", optional=True), Delimited(Ref("SingleIdentifierGrammar")), optional=True, ), ), Sequence( Ref.keyword("AS", optional=True), Delimited(Ref("SingleIdentifierGrammar")), ), ), Dedent, ) class PivotClauseSegment(BaseSegment): """A `PIVOT` clause as using in FROM clause. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-pivot.html """ type = "pivot_clause" match_grammar = Sequence( Indent, "PIVOT", Bracketed( Indent, Delimited( Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ), "FOR", OptionallyBracketed( OneOf( Ref("SingleIdentifierGrammar"), Delimited( Ref("SingleIdentifierGrammar"), ), ), ), "IN", Bracketed( Delimited( Sequence( OneOf( Bracketed( Delimited( Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), Delimited( Ref("ExpressionSegment"), ), ), Ref("AliasExpressionSegment", optional=True), ), ), ), Dedent, ), Dedent, ) class TransformClauseSegment(BaseSegment): """A `TRANSFORM` clause like used in `SELECT`. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-transform.html """ type = "transform_clause" match_grammar = Sequence( "TRANSFORM", Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ), parse_mode=ParseMode.GREEDY, ), Indent, Ref("RowFormatClauseSegment", optional=True), "USING", Ref("QuotedLiteralSegment"), Sequence( "AS", Bracketed( Delimited( AnyNumberOf( Ref("SingleIdentifierGrammar"), Ref("DatatypeSegment"), ), ), ), optional=True, ), Ref("RowFormatClauseSegment", optional=True), ) class ExplainStatementSegment(ansi.ExplainStatementSegment): """An `Explain` statement. Enhanced from ANSI dialect to allow for additional parameters. EXPLAIN [ EXTENDED | CODEGEN | COST | FORMATTED ] explainable_stmt https://spark.apache.org/docs/latest/sql-ref-syntax-qry-explain.html """ explainable_stmt = Ref("StatementSegment") match_grammar = Sequence( "EXPLAIN", OneOf( "EXTENDED", "CODEGEN", "COST", "FORMATTED", optional=True, ), explainable_stmt, ) # Auxiliary Statements class AddFileSegment(BaseSegment): """A `ADD {FILE | FILES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-file.html """ type = "add_file_statement" match_grammar = Sequence( "ADD", Ref("FileKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment")), ) class AddJarSegment(BaseSegment): """A `ADD {JAR | JARS}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html """ type = "add_jar_statement" match_grammar = Sequence( "ADD", Ref("JarKeywordSegment"), AnyNumberOf( Ref("QuotedLiteralSegment"), Ref("FileLiteralSegment"), ), ) class AnalyzeTableSegment(BaseSegment): """An `ANALYZE {TABLE | TABLES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-analyze-table.html """ type = "analyze_table_statement" match_grammar = Sequence( "ANALYZE", OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), Ref( "PartitionSpecGrammar", optional=True, ), "COMPUTE", "STATISTICS", OneOf( "NOSCAN", Sequence( "FOR", "COLUMNS", OptionallyBracketed( Delimited( Ref( "ColumnReferenceSegment", ), ), ), ), optional=True, ), ), Sequence( "TABLES", Sequence( OneOf( "FROM", "IN", ), Ref( "DatabaseReferenceSegment", ), optional=True, ), "COMPUTE", "STATISTICS", Ref.keyword( "NOSCAN", optional=True, ), ), ), ) class CacheTableSegment(BaseSegment): """A `CACHE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-cache-table.html """ type = "cache_table" match_grammar = Sequence( "CACHE", Ref.keyword("LAZY", optional=True), "TABLE", Ref("TableReferenceSegment"), Ref("OptionsGrammar", optional=True), Sequence( Ref.keyword("AS", optional=True), Ref("SelectableGrammar"), optional=True ), ) class ClearCacheSegment(BaseSegment): """A `CLEAR CACHE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-clear-cache.html """ type = "clear_cache" match_grammar = Sequence( "CLEAR", "CACHE", ) class DescribeStatementSegment(BaseSegment): """A `DESCRIBE` statement. This class provides coverage for databases, tables, functions, and queries. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one describe vs another, but they could be broken out to one class per describe statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-database.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-function.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-query.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-describe-table.html """ type = "describe_statement" match_grammar = Sequence( OneOf("DESCRIBE", "DESC"), OneOf( Sequence( OneOf("DATABASE", "SCHEMA"), Ref.keyword("EXTENDED", optional=True), Ref("DatabaseReferenceSegment"), ), Sequence( "FUNCTION", Ref.keyword("EXTENDED", optional=True), Ref("FunctionNameSegment"), ), Sequence( Ref.keyword("TABLE", optional=True), Ref.keyword("EXTENDED", optional=True), Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), # can be fully qualified column after table is listed # [database.][table.][column] Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar"), allow_gaps=False, ), max_times=2, allow_gaps=False, ), optional=True, allow_gaps=False, ), ), Sequence( Ref.keyword("QUERY", optional=True), OneOf( Sequence( "TABLE", Ref("TableReferenceSegment"), ), Sequence( "FROM", Ref("TableReferenceSegment"), "SELECT", Delimited( Ref("ColumnReferenceSegment"), ), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ), Ref("StatementSegment"), ), ), exclude=OneOf( Ref.keyword("HISTORY"), Ref.keyword("DETAIL"), ), ), ) class ListFileSegment(BaseSegment): """A `LIST {FILE | FILES}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-list-file.html """ type = "list_file_statement" match_grammar = Sequence( "LIST", Ref("FileKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment")), ) class ListJarSegment(BaseSegment): """A `ADD {JAR | JARS}` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html """ type = "list_jar_statement" match_grammar = Sequence( "LIST", Ref("JarKeywordSegment"), AnyNumberOf(Ref("QuotedLiteralSegment")), ) class RefreshStatementSegment(BaseSegment): """A `REFRESH` statement for given data source path. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one refresh vs another, but they could be broken out to one class per refresh statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-refresh-function.html """ type = "refresh_statement" match_grammar = Sequence( "REFRESH", OneOf( Ref("QuotedLiteralSegment"), Sequence( Ref.keyword("TABLE", optional=True), Ref("TableReferenceSegment"), ), Sequence( "FUNCTION", Ref("FunctionNameSegment"), ), ), ) class ResetStatementSegment(BaseSegment): """A `RESET` statement used to reset runtime configurations. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-conf-mgmt-reset.html """ type = "reset_statement" match_grammar = Sequence( "RESET", Delimited( Ref("SingleIdentifierGrammar"), delimiter=Ref("DotSegment"), optional=True, ), ) class SetStatementSegment(BaseSegment): """A `SET` statement used to set runtime properties. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-conf-mgmt-set.html """ type = "set_statement" match_grammar = Sequence( "SET", Ref("SQLConfPropertiesSegment", optional=True), OneOf( Ref("PropertyListGrammar"), Ref("PropertyNameSegment"), optional=True, ), ) class ShowStatement(BaseSegment): """Common class for `SHOW` statements. NB: These are similar enough that it makes sense to include them in a common class, especially since there wouldn't be any specific rules that would apply to one show vs another, but they could be broken out to one class per show statement type. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-columns.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-create-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-databases.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-functions.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-partitions.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-table.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-tables.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-tblproperties.html https://spark.apache.org/docs/latest/sql-ref-syntax-aux-show-views.html """ type = "show_statement" match_grammar = Sequence( "SHOW", OneOf( # SHOW CREATE TABLE Sequence( "CREATE", "TABLE", Ref("TableExpressionSegment"), Sequence( "AS", "SERDE", optional=True, ), ), # SHOW COLUMNS Sequence( "COLUMNS", "IN", Ref("TableExpressionSegment"), Sequence( "IN", Ref("DatabaseReferenceSegment"), optional=True, ), ), # SHOW { DATABASES | SCHEMAS } Sequence( OneOf("DATABASES", "SCHEMAS"), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), # SHOW FUNCTIONS Sequence( OneOf("USER", "SYSTEM", "ALL", optional=True), "FUNCTIONS", OneOf( # qualified function from a database Sequence( Ref("DatabaseReferenceSegment"), Ref("DotSegment"), Ref("FunctionNameSegment"), allow_gaps=False, optional=True, ), # non-qualified function Ref("FunctionNameSegment", optional=True), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), ), # SHOW PARTITIONS Sequence( "PARTITIONS", Ref("TableReferenceSegment"), Ref("PartitionSpecGrammar", optional=True), ), # SHOW TABLE Sequence( "TABLE", "EXTENDED", Sequence( OneOf("IN", "FROM"), Ref("DatabaseReferenceSegment"), optional=True, ), "LIKE", Ref("QuotedLiteralSegment"), Ref("PartitionSpecGrammar", optional=True), ), # SHOW TABLES Sequence( "TABLES", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), # SHOW TBLPROPERTIES Sequence( "TBLPROPERTIES", Ref("TableReferenceSegment"), Ref("BracketedPropertyNameListGrammar", optional=True), ), # SHOW VIEWS Sequence( "VIEWS", Sequence( OneOf("FROM", "IN"), Ref("DatabaseReferenceSegment"), optional=True, ), Sequence( "LIKE", Ref("QuotedLiteralSegment"), optional=True, ), ), ), ) class UncacheTableSegment(BaseSegment): """AN `UNCACHE TABLE` statement. https://spark.apache.org/docs/latest/sql-ref-syntax-aux-cache-uncache-table.html """ type = "uncache_table" match_grammar = Sequence( "UNCACHE", "TABLE", Ref("IfExistsGrammar", optional=True), Ref("TableReferenceSegment"), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( # Segments defined in Spark3 dialect insert=[ # Data Definition Statements Ref("AlterDatabaseStatementSegment"), Ref("AlterTableStatementSegment"), Ref("AlterViewStatementSegment"), Ref("CreateHiveFormatTableStatementSegment"), Ref("MsckRepairTableStatementSegment"), Ref("UseDatabaseStatementSegment"), # Auxiliary Statements Ref("AddFileSegment"), Ref("AddJarSegment"), Ref("AnalyzeTableSegment"), Ref("CacheTableSegment"), Ref("ClearCacheSegment"), Ref("ListFileSegment"), Ref("ListJarSegment"), Ref("RefreshStatementSegment"), Ref("ResetStatementSegment"), Ref("SetStatementSegment"), Ref("ShowStatement"), Ref("UncacheTableSegment"), # Data Manipulation Statements Ref("InsertOverwriteDirectorySegment"), Ref("InsertOverwriteDirectoryHiveFmtSegment"), Ref("LoadDataSegment"), # Data Retrieval Statements Ref("ClusterByClauseSegment"), Ref("DistributeByClauseSegment"), # Delta Lake Ref("VacuumStatementSegment"), Ref("DescribeHistoryStatementSegment"), Ref("DescribeDetailStatementSegment"), Ref("GenerateManifestFileStatementSegment"), Ref("ConvertToDeltaStatementSegment"), Ref("RestoreTableStatementSegment"), # Databricks - Delta Live Tables Ref("ConstraintStatementSegment"), Ref("ApplyChangesIntoStatementSegment"), # Databricks - widgets Ref("CreateWidgetStatementSegment"), Ref("RemoveWidgetStatementSegment"), Ref("ReplaceTableStatementSegment"), ], remove=[ Ref("TransactionStatementSegment"), Ref("CreateSchemaStatementSegment"), Ref("SetSchemaStatementSegment"), Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), ], ) class JoinClauseSegment(ansi.JoinClauseSegment): """Any number of join clauses, including the `JOIN` keyword. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-join.html """ match_grammar = OneOf( # NB These qualifiers are optional # TODO: Allow nested joins like: # ....FROM S1.T1 t1 LEFT JOIN ( S2.T2 t2 JOIN S3.T3 t3 ON t2.col1=t3.col1) ON # tab1.col1 = tab2.col1 Sequence( Ref("JoinTypeKeywords", optional=True), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, Conditional(Indent, indented_using_on=True), # NB: this is optional OneOf( # ON clause Ref("JoinOnConditionSegment"), # USING clause Sequence( "USING", Conditional(Indent, indented_using_on=False), Bracketed( # NB: We don't use BracketedColumnReferenceListGrammar # here because we're just using SingleIdentifierGrammar, # rather than ObjectReferenceSegment or # ColumnReferenceSegment. This is a) so that we don't # lint it as a reference and b) because the column will # probably be returned anyway during parsing. Delimited(Ref("SingleIdentifierGrammar")), parse_mode=ParseMode.GREEDY, ), Conditional(Dedent, indented_using_on=False), ), # Unqualified joins *are* allowed. They just might not # be a good idea. optional=True, ), Conditional(Dedent, indented_using_on=True), ), # Note NATURAL joins do not support Join conditions Sequence( Ref("NaturalJoinKeywordsGrammar"), Ref("JoinKeywordsGrammar"), Indent, Ref("FromExpressionElementSegment"), Dedent, ), ) class AliasExpressionSegment(ansi.AliasExpressionSegment): """A reference to an object with an `AS` clause. The optional AS keyword allows both implicit and explicit aliasing. Note also that it's possible to specify just column aliases without aliasing the table as well: .. code-block:: sql SELECT * FROM VALUES (1,2) as t (a, b); SELECT * FROM VALUES (1,2) as (a, b); SELECT * FROM VALUES (1,2) as t; Note that in Spark SQL, identifiers are quoted using backticks (`my_table`) rather than double quotes ("my_table"). Quoted identifiers are allowed in aliases, but unlike ANSI which allows single quoted identifiers ('my_table') in aliases, this is not allowed in Spark and so the definition of this segment must depart from ANSI. """ match_grammar = Sequence( Ref.keyword("AS", optional=True), OneOf( # maybe table alias and column aliases Sequence( Ref("SingleIdentifierGrammar", optional=True), Bracketed(Ref("SingleIdentifierListSegment")), ), # just a table alias Ref("SingleIdentifierGrammar"), exclude=OneOf( "LATERAL", Ref("JoinTypeKeywords"), "WINDOW", "PIVOT", "KEYS", "FROM", ), ), ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause, as typically used with `INSERT` or `SELECT`. The Spark SQL reference does not mention `VALUES` clauses except in the context of `INSERT` statements. However, they appear to behave much the same as in `postgres `. In short, they can appear anywhere a `SELECT` can, and also as bare `VALUES` statements. Here are some examples: .. code-block:: sql VALUES 1,2 LIMIT 1; SELECT * FROM VALUES (1,2) as t (a,b); SELECT * FROM (VALUES (1,2) as t (a,b)); WITH a AS (VALUES 1,2) SELECT * FROM a; """ match_grammar = Sequence( "VALUES", Delimited( OneOf( Bracketed( Delimited( # NULL keyword used in # INSERT INTO statement. "NULL", Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), "NULL", Ref("ExpressionSegment"), exclude=OneOf("VALUES"), ), ), # LIMIT/ORDER are unreserved in sparksql. Ref( "AliasExpressionSegment", exclude=OneOf("LIMIT", "ORDER"), optional=True, ), Ref("OrderByClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), ) class TableExpressionSegment(ansi.TableExpressionSegment): """The main table expression e.g. within a FROM clause. Enhance to allow for additional clauses allowed in Spark and Delta Lake. """ match_grammar = OneOf( Ref("ValuesClauseSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Sequence( OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), OneOf( Ref("AtSignLiteralSegment"), Sequence( Indent, OneOf( Ref("TimestampAsOfGrammar"), Ref("VersionAsOfGrammar"), ), Dedent, ), optional=True, ), ), # Nested Selects Bracketed(Ref("SelectableGrammar")), ) class FileReferenceSegment(BaseSegment): """A reference to a file for direct query. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html """ type = "file_reference" match_grammar = Sequence( Ref("DataSourcesV2FileTypeGrammar"), Ref("DotSegment"), # NB: Using `QuotedLiteralSegment` here causes `FileReferenceSegment` # to match as a `TableReferenceSegment` Ref("BackQuotedIdentifierSegment"), ) class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """A table expression. Enhanced from ANSI to allow for `LATERAL VIEW` clause """ match_grammar = Sequence( Ref("PreTableFunctionKeywordsGrammar", optional=True), OptionallyBracketed(Ref("TableExpressionSegment")), Ref( "AliasExpressionSegment", exclude=OneOf( Ref("FromClauseTerminatorGrammar"), Ref("SamplingExpressionSegment"), ), optional=True, ), Ref("SamplingExpressionSegment", optional=True), # NB: `LateralViewClauseSegment`, `NamedWindowSegment`, # and `PivotClauseSegment should come after Alias/Sampling # expressions so those are matched before AnyNumberOf(Ref("LateralViewClauseSegment")), Ref("NamedWindowSegment", optional=True), Ref("PivotClauseSegment", optional=True), Ref("PostTableExpressionGrammar", optional=True), ) class PropertyNameSegment(BaseSegment): """A segment for a property name to set and retrieve table and runtime properties. https://spark.apache.org/docs/latest/configuration.html#application-properties """ type = "property_name_identifier" match_grammar = Sequence( OneOf( Delimited( Ref("PropertiesNakedIdentifierSegment"), delimiter=Ref("DotSegment"), allow_gaps=False, ), Ref("SingleIdentifierGrammar"), ), ) class GeneratedColumnDefinitionSegment(BaseSegment): """A generated column definition, e.g. for CREATE TABLE or ALTER TABLE. https://docs.delta.io/latest/delta-batch.html#use-generated-columns """ type = "generated_column_definition" match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) Sequence( "GENERATED", "ALWAYS", "AS", Bracketed( OneOf( Ref("FunctionSegment"), Ref("BareFunctionSegment"), ), ), ), AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), ), ) class MergeUpdateClauseSegment(ansi.MergeUpdateClauseSegment): """`UPDATE` clause within the `MERGE` statement.""" type = "merge_update_clause" match_grammar: Matchable = Sequence( "UPDATE", OneOf( Sequence("SET", Ref("WildcardIdentifierSegment")), Sequence( Indent, Ref("SetClauseListSegment"), Dedent, ), ), ) class MergeInsertClauseSegment(ansi.MergeInsertClauseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar: Matchable = Sequence( "INSERT", OneOf( Ref("WildcardIdentifierSegment"), Sequence( Indent, Ref("BracketedColumnReferenceListGrammar"), Dedent, Ref("ValuesClauseSegment"), ), ), ) class UpdateStatementSegment(ansi.UpdateStatementSegment): """An `Update` statement. Enhancing from ANSI dialect to be SparkSQL & Delta Lake specific. https://docs.delta.io/latest/delta-update.html#update-a-table """ match_grammar: Matchable = Sequence( "UPDATE", OneOf( Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), # SET is not a reserved word in all dialects (e.g. RedShift) # So specifically exclude as an allowed implicit alias to avoid parsing errors Ref( "AliasExpressionSegment", exclude=Ref.keyword("SET"), optional=True, ), Ref("SetClauseListSegment"), Ref("WhereClauseSegment"), ) class IntervalLiteralSegment(BaseSegment): """An interval literal segment. https://spark.apache.org/docs/latest/sql-ref-literals.html#interval-literal """ type = "interval_literal" match_grammar: Matchable = Sequence( Ref("SignedSegmentGrammar", optional=True), OneOf( Ref("NumericLiteralSegment"), Ref("SignedQuotedLiteralSegment"), ), Ref("DatetimeUnitSegment"), Ref.keyword("TO", optional=True), Ref("DatetimeUnitSegment", optional=True), ) class IntervalExpressionSegment(ansi.IntervalExpressionSegment): """An interval expression segment. Redefining from ANSI dialect to allow for additional syntax. https://spark.apache.org/docs/latest/sql-ref-literals.html#interval-literal """ match_grammar: Matchable = Sequence( "INTERVAL", OneOf( AnyNumberOf( Ref("IntervalLiteralSegment"), ), Ref("QuotedLiteralSegment"), ), ) class VacuumStatementSegment(BaseSegment): """A `VACUUM` statement segment. https://docs.delta.io/latest/delta-utility.html#remove-files-no-longer-referenced-by-a-delta-table """ type = "vacuum_statement" match_grammar: Matchable = Sequence( "VACUUM", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), OneOf( Sequence( "RETAIN", Ref("NumericLiteralSegment"), Ref("DatetimeUnitSegment"), ), Sequence( "DRY", "RUN", ), optional=True, ), ) class DescribeHistoryStatementSegment(BaseSegment): """A `DESCRIBE HISTORY` statement segment. https://docs.delta.io/latest/delta-utility.html#retrieve-delta-table-history """ type = "describe_history_statement" match_grammar: Matchable = Sequence( "DESCRIBE", "HISTORY", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), Ref("LimitClauseSegment", optional=True), ) class DescribeDetailStatementSegment(BaseSegment): """A `DESCRIBE DETAIL` statement segment. https://docs.delta.io/latest/delta-utility.html#retrieve-delta-table-details """ type = "describe_detail_statement" match_grammar: Matchable = Sequence( "DESCRIBE", "DETAIL", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), ) class GenerateManifestFileStatementSegment(BaseSegment): """A statement to `GENERATE` manifest files for a Delta Table. https://docs.delta.io/latest/delta-utility.html#generate-a-manifest-file """ type = "generate_manifest_file_statement" match_grammar: Matchable = Sequence( "GENERATE", StringParser( "symlink_format_manifest", CodeSegment, type="symlink_format_manifest", ), "FOR", "TABLE", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), ) class ConvertToDeltaStatementSegment(BaseSegment): """A statement to convert other file formats to Delta. https://docs.delta.io/latest/delta-utility.html#convert-a-parquet-table-to-a-delta-table https://docs.databricks.com/delta/delta-utility.html#convert-an-iceberg-table-to-a-delta-table """ type = "convert_to_delta_statement" match_grammar: Matchable = Sequence( "CONVERT", "TO", "DELTA", Ref("FileReferenceSegment"), Sequence("NO", "STATISTICS", optional=True), Ref("PartitionSpecGrammar", optional=True), ) class RestoreTableStatementSegment(BaseSegment): """A statement to `RESTORE` a Delta Table to a previous version. https://docs.delta.io/latest/delta-utility.html#restore-a-delta-table-to-an-earlier-state """ type = "restore_table_statement" match_grammar: Matchable = Sequence( "RESTORE", "TABLE", OneOf( Ref("QuotedLiteralSegment"), Ref("FileReferenceSegment"), Ref("TableReferenceSegment"), ), "TO", OneOf( Ref("TimestampAsOfGrammar"), Ref("VersionAsOfGrammar"), ), ) class ConstraintStatementSegment(BaseSegment): """A `CONSTRAINT` statement to to define data quality on data contents. https://docs.databricks.com/workflows/delta-live-tables/delta-live-tables-expectations.html#manage-data-quality-with-delta-live-tables """ type = "constraint_statement" match_grammar: Matchable = Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), "EXPECT", Bracketed(Ref("ExpressionSegment")), Sequence("ON", "VIOLATION", optional=True), OneOf( Sequence("FAIL", "UPDATE"), Sequence("DROP", "ROW"), optional=True, ), ) class ApplyChangesIntoStatementSegment(BaseSegment): """A statement ingest CDC data a target table. https://docs.databricks.com/workflows/delta-live-tables/delta-live-tables-cdc.html#sql """ type = "apply_changes_into_statement" match_grammar = Sequence( Sequence( "APPLY", "CHANGES", "INTO", ), Indent, Ref("TableExpressionSegment"), Dedent, Ref("FromClauseSegment"), Sequence( "KEYS", Indent, Ref("BracketedColumnReferenceListGrammar"), Dedent, ), Sequence("IGNORE", "NULL", "UPDATES", optional=True), Ref("WhereClauseSegment", optional=True), AnyNumberOf( Sequence( "APPLY", "AS", OneOf("DELETE", "TRUNCATE"), "WHEN", Ref("ColumnReferenceSegment"), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), # NB: Setting max_times to allow for one instance # of DELETE and TRUNCATE at most max_times=2, ), Sequence( "SEQUENCE", "BY", Ref("ColumnReferenceSegment"), ), Sequence( "COLUMNS", OneOf( Delimited( Ref("ColumnReferenceSegment"), ), Sequence( Ref("StarSegment"), "EXCEPT", Ref("BracketedColumnReferenceListGrammar"), ), ), ), Sequence( "STORED", "AS", "SCD", "TYPE", Ref("NumericLiteralSegment"), optional=True, ), ) class WildcardExpressionSegment(ansi.WildcardExpressionSegment): """An extension of the star expression for Databricks.""" match_grammar = ansi.WildcardExpressionSegment.match_grammar.copy( insert=[ # Optional EXCEPT clause # https://docs.databricks.com/release-notes/runtime/9.0.html#exclude-columns-in-select--public-preview Ref("ExceptClauseSegment", optional=True), ] ) class ExceptClauseSegment(BaseSegment): """SELECT * EXCEPT clause.""" type = "select_except_clause" match_grammar = Sequence( "EXCEPT", Bracketed(Delimited(Ref("SingleIdentifierGrammar"))), ) class SelectClauseSegment(BaseSegment): """A group of elements in a select target statement. It's very similar to `SelectClauseSegment` from `dialect_ansi` except does not have set `SetOperatorSegment` as possible terminator - this is to avoid issues with wrongly recognized `EXCEPT`. """ type = "select_clause" match_grammar = Sequence( "SELECT", OneOf( Ref("TransformClauseSegment"), Sequence( Ref( "SelectClauseModifierSegment", optional=True, ), Indent, Delimited( Ref("SelectClauseElementSegment"), allow_trailing=True, ), ), ), Dedent, terminators=[ "FROM", "WHERE", "UNION", Sequence("ORDER", "BY"), "LIMIT", "OVERLAPS", ], parse_mode=ParseMode.GREEDY_ONCE_STARTED, ) class UsingClauseSegment(BaseSegment): """`USING` clause segment.""" type = "using_clause" match_grammar = Sequence("USING", Ref("DataSourceFormatSegment")) class DataSourceFormatSegment(BaseSegment): """Data source format segment.""" type = "data_source_format" match_grammar = OneOf( Ref("FileFormatGrammar"), # NB: JDBC is part of DataSourceV2 but not included # there since there are no significant syntax changes "JDBC", Ref( "ObjectReferenceSegment" ), # This allows for formats such as org.apache.spark.sql.jdbc ) class IcebergTransformationSegment(BaseSegment): """A Transformation expressions used in PARTITIONED BY. This segment is to be used in creating hidden partitions in the iceberg table format. https://iceberg.apache.org/docs/latest/spark-ddl/#partitioned-by """ type = "iceberg_transformation" match_grammar = OneOf( Sequence( OneOf( "YEARS", "MONTHS", "DAYS", "DATE", "HOURS", "DATE_HOUR", ), Bracketed(Ref("ColumnReferenceSegment")), ), Sequence( OneOf("BUCKET", "TRUNCATE"), Bracketed( Sequence( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("ColumnReferenceSegment"), ) ), ), ) class FrameClauseSegment(ansi.FrameClauseSegment): """A frame clause for window functions. This overrides the ansi dialect frame clause segment as the sparksql frame clause allows for a more expressive frame syntax. https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-window.html """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), "UNBOUNDED", Ref("IntervalExpressionSegment"), ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_sparksql_keywords.py000066400000000000000000000112621451700765000254010ustar00rootroot00000000000000"""A List of Spark SQL keywords. https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html#sql-keywords """ RESERVED_KEYWORDS = [ "ALL", "AND", "ANY", "AS", "AUTHORIZATION", "BOTH", "CASE", "CAST", "CHECK", "COLLATE", "COLUMN", "CONSTRAINT", "CREATE", "CROSS", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "ELSE", "END", "ESCAPE", "EXCEPT", "FALSE", "FETCH", "FILTER", "FOR", "FOREIGN", "FROM", "FULL", "GRANT", "GROUP", "HAVING", "IN", "INNER", "INTERSECT", "INTO", "IS", "JOIN", "LEADING", "LEFT", "NATURAL", "NOT", "NULL", "ON", "ONLY", "OR", "ORDER", "OUTER", "OVERLAPS", "PRIMARY", "REFERENCES", "RIGHT", "SELECT", "SESSION_USER", "SOME", "TABLE", "THEN", "TO", "TRAILING", "UNION", "UNIQUE", "UNKNOWN", "USER", "USING", "WHEN", "WHERE", "WITH", ] UNRESERVED_KEYWORDS = [ "ADD", "AFTER", "ALTER", "ANALYZE", "ANTI", "ARCHIVE", "ARRAY", "ASC", "AT", "BERNOULLI", "BETWEEN", "BUCKET", "BUCKETS", "BY", "CACHE", "CASCADE", "CHANGE", "CLEAR", "CLUSTER", "CLUSTERED", "CODEGEN", "COLLECTION", "COLUMNS", "COMMENT", "COMMIT", "COMPACT", "COMPACTIONS", "COMPUTE", "CONCATENATE", "COST", "CUBE", "CURRENT", "DATA", "DATE", "DATE_HOUR", "DATABASE", "DATABASES", "DAY", "DAYS", "DBPROPERTIES", "DEFINED", "DELETE", "DELIMITED", "DESC", "DESCRIBE", "DFS", "DIRECTORIES", "DIRECTORY", "DISTINCT", "DISTRIBUTE", "DISTRIBUTED", "DIV", "DROP", "ESCAPED", "EXCHANGE", "EXISTS", "EXPLAIN", "EXPORT", "EXTENDED", "EXTERNAL", "EXTRACT", "FIELD", "FIELDS", "FILEFORMAT", "FIRST", "FOLLOWING", "FORMAT", "FORMATTED", "FUNCTION", "FUNCTIONS", "GLOBAL", "GROUPING", "HOUR", "HOURS", "IDENTIFIER", "IF", "IGNORE", "ILIKE", "IMPORT", "INDEX", "INDEXES", "INPATH", "INPUTFORMAT", "INSERT", "INTERVAL", "ITEMS", "KEYS", "LAST", "LAZY", "LIKE", "LIMIT", "LINES", "LIST", "LOAD", "LOCAL", "LOCALLY", "LOCATION", "LOCK", "LOCKS", "LOGICAL", "MACRO", "MAP", "MATCHED", "MERGE", "MINUTE", "MONTH", "MONTHS", "MSCK", "NAMESPACE", "NAMESPACES", "NO", "NULLS", "OF", "OPTION", "OPTIONS", "ORDERED", "OUT", "OUTPUTFORMAT", "OVER", "OVERLAY", "OVERWRITE", "PARTITION", "PARTITIONED", "PARTITIONS", "PERCENTLIT", "PIVOT", "PLACING", "POSITION", "PRECEDING", "PRINCIPALS", "PROPERTIES", "PURGE", "QUALIFY", "QUERY", "RANGE", "RECORDREADER", "RECORDWRITER", "RECOVER", "REDUCE", "REFRESH", "RENAME", "REPAIR", "REPEATABLE", "REPLACE", "RESET", "RESPECT", "RESTRICT", "REVOKE", "RLIKE", "ROLE", "ROLES", "ROLLBACK", "ROLLUP", "ROW", "ROWS", "SCHEMA", "SECOND", "SEMI", "SEPARATED", "SERDE", "SERDEPROPERTIES", "SET", "SETMINUS", "SETS", "SHOW", "SKEWED", "SORT", "SORTED", "START", "STATISTICS", "STORED", "STRATIFY", "STRUCT", "SUBSTR", "SUBSTRING", "SYNC", "SYSTEM", "TABLES", "TABLESAMPLE", "TBLPROPERTIES", "TEMP", "TEMPORARY", "TERMINATED", "TIME", "TOUCH", "TRANSACTION", "TRANSACTIONS", "TRANSFORM", "TRIM", "TRUE", "TRUNCATE", "TRY_CAST", "TYPE", "UNARCHIVE", "UNBOUNDED", "UNCACHE", "UNLOCK", "UNSET", "UPDATE", "USE", "VALUES", "VIEW", "VIEWS", "WRITE", "WINDOW", "YEAR", "YEARS", "ZONE", # Spark Core Data Sources # https://spark.apache.org/docs/latest/sql-data-sources.html "AVRO", "CSV", "JSON", "PARQUET", "ORC", "JDBC", # Community Contributed Data Sources "DELTA", # https://github.com/delta-io/delta "XML", # https://github.com/databricks/spark-xml "ICEBERG", # Delta Lake "DETAIL", "DRY", "GENERATE", "HISTORY", "RETAIN", "RUN", # Databricks - Delta Live Tables "CHANGES", "DELETES", "EXPECT", "FAIL", "LIVE", "SCD", "STREAMING", "UPDATES", "VIOLATION", # Databricks widget "WIDGET", "DROPDOWN", "TEXT", "CHOICES", "REMOVE", ] sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_sqlite.py000066400000000000000000000376361451700765000231300ustar00rootroot00000000000000"""The sqlite dialect. https://www.sqlite.org/ """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseSegment, Bracketed, Delimited, LiteralSegment, Matchable, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, Sequence, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_sqlite_keywords import ( RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") sqlite_dialect = ansi_dialect.copy_as("sqlite") sqlite_dialect.sets("reserved_keywords").clear() sqlite_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) sqlite_dialect.sets("unreserved_keywords").clear() sqlite_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) sqlite_dialect.replace( BooleanBinaryOperatorGrammar=OneOf( Ref("AndOperatorGrammar"), Ref("OrOperatorGrammar"), "REGEXP" ), PrimaryKeyGrammar=Sequence( "PRIMARY", "KEY", Sequence("AUTOINCREMENT", optional=True) ), TemporaryTransientGrammar=Ref("TemporaryGrammar"), DateTimeLiteralGrammar=Sequence( OneOf("DATE", "DATETIME"), TypedParser("single_quote", LiteralSegment, type="date_constructor_literal"), ), BaseExpressionElementGrammar=OneOf( Ref("LiteralGrammar"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), Sequence( Ref("DatatypeSegment"), Ref("LiteralGrammar"), ), ), AutoIncrementGrammar=Nothing(), CommentClauseSegment=Nothing(), IntervalExpressionSegment=Nothing(), TimeZoneGrammar=Nothing(), FetchClauseSegment=Nothing(), TrimParametersGrammar=Nothing(), LikeGrammar=Sequence("LIKE"), OverlapsClauseSegment=Nothing(), MLTableExpressionSegment=Nothing(), MergeIntoLiteralGrammar=Nothing(), SamplingExpressionSegment=Nothing(), OrderByClauseTerminators=OneOf( "LIMIT", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "WINDOW", ), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), ), GroupByClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "HAVING", "WINDOW", ), PostFunctionGrammar=Ref("FilterClauseGrammar"), IgnoreRespectNullsGrammar=Nothing(), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), ), FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # Trim function Sequence( Ref("TrimParametersGrammar"), Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")), "FROM", Ref("ExpressionSegment"), ), # An extract-like or substring-like function Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), "FROM", Ref("ExpressionSegment"), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref( "OrderByClauseSegment" ), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake).. # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), Ref("IndexColumnDefinitionSegment"), ), # NOTE: This block was copy/pasted from dialect_ansi.py with these changes made: # - "PRIOR" keyword removed from Expression_A_Unary_Operator_Grammar Expression_A_Unary_Operator_Grammar=OneOf( Ref( "SignedSegmentGrammar", exclude=Sequence(Ref("QualifiedNumericLiteralSegment")), ), Ref("TildeSegment"), Ref("NotOperatorGrammar"), ), IsClauseGrammar=OneOf( "NULL", Ref("BooleanLiteralGrammar"), ), ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Minus, Except or Intersect.""" type = "set_operator" match_grammar: Matchable = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), Sequence( OneOf( "INTERSECT", "EXCEPT", ), Ref.keyword("ALL", optional=True), ), exclude=Sequence("EXCEPT", Bracketed(Anything())), ) class DatatypeSegment(ansi.DatatypeSegment): """A data type segment. Supports timestamp with(out) time zone. Doesn't currently support intervals. """ type = "data_type" match_grammar: Matchable = OneOf( Sequence( "DOUBLE", "PRECISION", ), Sequence("UNSIGNED", "BIG", "INT"), Sequence( OneOf( Sequence( OneOf("VARYING", "NATIVE"), OneOf("CHARACTER"), ), Ref("DatatypeIdentifierSegment"), ), Ref("BracketedArguments", optional=True), ), ) class TableEndClauseSegment(BaseSegment): """Support Table Options at end of tables. https://www.sqlite.org/syntax/table-options.html """ type = "table_end_clause_segment" match_grammar: Matchable = Delimited(Sequence("WITHOUT", "ROWID"), "STRICT") class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause like in `INSERT`.""" type = "values_clause" match_grammar: Matchable = Sequence( "VALUES", Delimited( Sequence( Bracketed( Delimited( "DEFAULT", Ref("ExpressionSegment"), ), parse_mode=ParseMode.GREEDY, ), ), ), ) class IndexColumnDefinitionSegment(BaseSegment): """A column definition for CREATE INDEX. Overridden from ANSI to allow expressions https://www.sqlite.org/expridx.html. """ type = "index_column_definition" match_grammar: Matchable = Sequence( OneOf( Ref("SingleIdentifierGrammar"), # Column name Ref("ExpressionSegment"), # Expression for simple functions ), OneOf("ASC", "DESC", optional=True), ) class InsertStatementSegment(BaseSegment): """An`INSERT` statement. https://www.sqlite.org/lang_insert.html """ type = "insert_statement" match_grammar = Sequence( OneOf( Sequence( "INSERT", Sequence( "OR", OneOf( "ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK", ), optional=True, ), ), # REPLACE is just an alias for INSERT OR REPLACE "REPLACE", ), "INTO", Ref("TableReferenceSegment"), Ref("BracketedColumnReferenceListGrammar", optional=True), OneOf( Ref("ValuesClauseSegment"), OptionallyBracketed(Ref("SelectableGrammar")), Ref("DefaultValuesGrammar"), ), ) class ColumnConstraintSegment(ansi.ColumnConstraintSegment): """Overriding ColumnConstraintSegment to allow for additional segment parsing.""" match_grammar = ansi.ColumnConstraintSegment.match_grammar.copy( insert=[ OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ], ) class TableConstraintSegment(ansi.TableConstraintSegment): """Overriding TableConstraintSegment to allow for additional segment parsing.""" match_grammar: Matchable = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( # CHECK ( ) Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))), Sequence( # UNIQUE ( column_name [, ... ] ) "UNIQUE", Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? ), Sequence( # PRIMARY KEY ( column_name [, ... ] ) index_parameters Ref("PrimaryKeyGrammar"), # Columns making up PRIMARY KEY constraint Ref("BracketedColumnReferenceListGrammar"), # Later add support for index_parameters? ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), Ref( "ReferenceDefinitionGrammar" ), # REFERENCES reftable [ ( refcolumn) ] ), ), OneOf("DEFERRABLE", Sequence("NOT", "DEFERRABLE"), optional=True), OneOf( Sequence("INITIALLY", "DEFERRED"), Sequence("INITIALLY", "IMMEDIATE"), optional=True, ), ) class TransactionStatementSegment(ansi.TransactionStatementSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement. As per https://www.sqlite.org/lang_transaction.html """ type = "transaction_statement" match_grammar: Matchable = Sequence( OneOf("BEGIN", "COMMIT", "ROLLBACK", "END"), OneOf("TRANSACTION", optional=True), Sequence("TO", "SAVEPOINT", Ref("ObjectReferenceSegment"), optional=True), ) class PragmaReferenceSegment(ansi.ObjectReferenceSegment): """A Pragma object.""" type = "pragma_reference" class PragmaStatementSegment(BaseSegment): """A Pragma Statement. As per https://www.sqlite.org/pragma.html """ type = "pragma_statement" _pragma_value = OneOf( Ref("LiteralGrammar"), Ref("BooleanLiteralGrammar"), "YES", "NO", "ON", "OFF", "NONE", "FULL", "INCREMENTAL", "DELETE", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "NORMAL", "EXCLUSIVE", "FAST", "EXTRA", "DEFAULT", "FILE", "PASSIVE", "RESTART", "RESET", ) match_grammar = Sequence( "PRAGMA", Ref("PragmaReferenceSegment"), Bracketed(_pragma_value, optional=True), Sequence( Ref("EqualsSegment"), OptionallyBracketed(_pragma_value), optional=True ), ) class CreateTriggerStatementSegment(ansi.CreateTriggerStatementSegment): """Create Trigger Statement. https://www.sqlite.org/lang_createtrigger.html """ type = "create_trigger" match_grammar: Matchable = Sequence( "CREATE", Ref("TemporaryGrammar", optional=True), "TRIGGER", Ref("IfNotExistsGrammar", optional=True), Ref("TriggerReferenceSegment"), OneOf("BEFORE", "AFTER", Sequence("INSTEAD", "OF"), optional=True), OneOf( "DELETE", "INSERT", Sequence( "UPDATE", Sequence( "OF", Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), ), ), "ON", Ref("TableReferenceSegment"), Sequence("FOR", "EACH", "ROW", optional=True), Sequence("WHEN", Bracketed(Ref("ExpressionSegment")), optional=True), "BEGIN", Delimited( Ref("UpdateStatementSegment"), Ref("InsertStatementSegment"), Ref("DeleteStatementSegment"), Ref("SelectableGrammar"), delimiter=AnyNumberOf(Ref("DelimiterGrammar"), min_times=1), allow_gaps=True, allow_trailing=True, ), "END", ) class UnorderedSelectStatementSegment(BaseSegment): """A `SELECT` statement without any ORDER clauses or later. Replaces (without overriding) ANSI to remove Eager Matcher """ type = "select_statement" match_grammar = Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("OverlapsClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ) class SelectStatementSegment(BaseSegment): """A `SELECT` statement. Replaces (without overriding) ANSI to remove Eager Matcher """ type = "select_statement" # Remove the Limit and Window statements from ANSI match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("FetchClauseSegment", optional=True), Ref("LimitClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ] ) class CreateIndexStatementSegment(ansi.CreateIndexStatementSegment): """A `CREATE INDEX` statement. As per https://www.sqlite.org/lang_createindex.html """ type = "create_index_statement" match_grammar: Matchable = Sequence( "CREATE", Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Sequence( Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ), ) ), Ref("WhereClauseSegment", optional=True), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = OneOf( Ref("AlterTableStatementSegment"), Ref("CreateIndexStatementSegment"), Ref("CreateTableStatementSegment"), Ref("CreateTriggerStatementSegment"), Ref("CreateViewStatementSegment"), Ref("DeleteStatementSegment"), Ref("DropIndexStatementSegment"), Ref("DropTableStatementSegment"), Ref("DropTriggerStatementSegment"), Ref("DropViewStatementSegment"), Ref("ExplainStatementSegment"), Ref("InsertStatementSegment"), Ref("PragmaStatementSegment"), Ref("SelectableGrammar"), Ref("TransactionStatementSegment"), Ref("UpdateStatementSegment"), Bracketed(Ref("StatementSegment")), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_sqlite_keywords.py000066400000000000000000000055141451700765000250450ustar00rootroot00000000000000"""A List of SQLite keywords. https://www.sqlite.org/lang_keywords.html Augmented with data types, and a couple of omitted keywords. """ RESERVED_KEYWORDS = [ "ABORT", "ACTION", "ADD", "AFTER", "ALL", "ALTER", "ALWAYS", "ANALYZE", "AND", "AS", "ASC", "ATTACH", "AUTOINCREMENT", "BEFORE", "BEGIN", "BETWEEN", "BY", "CASCADE", "CASE", "CAST", "CHECK", "COLLATE", "COLUMN", "COMMIT", "CONFLICT", "CONSTRAINT", "CREATE", "CROSS", "CURRENT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "DATABASE", "DEFAULT", "DEFERRABLE", "DEFERRED", "DELETE", "DESC", "DETACH", "DISTINCT", "DO", "DROP", "EACH", "ELSE", "END", "ESCAPE", "EXCEPT", "EXCLUDE", "EXCLUSIVE", "EXISTS", "EXPLAIN", "FAIL", "FILTER", "FIRST", "FOLLOWING", "FOR", "FOREIGN", "FROM", "FULL", "GENERATED", "GLOB", "GROUP", "GROUPS", "HAVING", "IF", "IGNORE", "IMMEDIATE", "IN", "INDEX", "INDEXED", "INITIALLY", "INNER", "INSERT", "INSTEAD", "INTERSECT", "INTO", "IS", "ISNULL", "JOIN", "KEY", "LAST", "LEFT", "LIKE", "LIMIT", "MATCH", "MATERIALIZED", "NATURAL", "NO", "NOT", "NOTHING", "NOTNULL", "NULL", "NULLS", "OF", "OFFSET", "ON", "OR", "ORDER", "OTHERS", "OUTER", "OVER", "PARTITION", "PLAN", "PRAGMA", "PRECEDING", "PRIMARY", "QUERY", "RAISE", "RANGE", "RECURSIVE", "REFERENCES", "REGEXP", "REINDEX", "RELEASE", "RENAME", "REPLACE", "RESTRICT", "RETURNING", "RIGHT", "ROLLBACK", "ROW", "ROWS", "SAVEPOINT", "SELECT", "SET", "TABLE", "TEMP", "TEMPORARY", "THEN", "TIES", "TO", "TRANSACTION", "TRIGGER", "UNBOUNDED", "UNION", "UNIQUE", "UPDATE", "USING", "VACUUM", "VALUES", "VIEW", "VIRTUAL", "WHEN", "WHERE", "WINDOW", "WITH", "WITHOUT", ] UNRESERVED_KEYWORDS = [ "INT", "INTEGER", "TINYINT", "SMALLINT", "MEDIUMINT", "BIGINT", "UNSIGNED", "INT2", "INT8", "CHARACTER", "VARCHAR", "VARYING", "NCHAR", "NATIVE", "NVARCHAR", "TEXT", "CLOB", "BLOB", "REAL", "BIG", "DOUBLE", "PRECISION", "FLOAT", "NUMERIC", "DECIMAL", "BOOLEAN", "DATE", "DATETIME", "ROWID", "YES", "OFF", "NONE", "INCREMENTAL", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "NORMAL", "FAST", "EXTRA", "FILE", "PASSIVE", "RESTART", "RESET", "STRICT", "BINARY", "NOCASE", "RTRIM", ] sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_teradata.py000066400000000000000000000663751451700765000234160ustar00rootroot00000000000000"""The Teradata dialect. This inherits from the ansi dialect, with changes as specified by Teradata Database SQL Data Definition Language Syntax and Examples Release Number 15.10 Release Date December 2015 """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseSegment, Bracketed, CodeSegment, ComparisonOperatorSegment, CompositeComparisonOperatorSegment, Dedent, Delimited, Indent, Matchable, OneOf, OptionallyBracketed, Ref, RegexLexer, Sequence, StringParser, ) from sqlfluff.dialects import dialect_ansi as ansi ansi_dialect = load_raw_dialect("ansi") teradata_dialect = ansi_dialect.copy_as("teradata") teradata_dialect.patch_lexer_matchers( [ # so it also matches 1. RegexLexer( "numeric_literal", r"([0-9]+(\.[0-9]*)?)", CodeSegment, ), ] ) # Remove unused keywords from the dialect. teradata_dialect.sets("unreserved_keywords").difference_update( [ # 'auto_increment', # The following are moved to being reserved keywords "UNION", "TIMESTAMP", ] ) teradata_dialect.sets("unreserved_keywords").update( [ "AUTOINCREMENT", "ACTIVITYCOUNT", "CASESPECIFIC", "CS", "DAYS", "DEL", "DUAL", "ERRORCODE", "EXPORT", "FALLBACK", "FORMAT", "HASH", "IMPORT", "JOURNAL", "LABEL", "LOGON", "LOGOFF", "MACRO", "MAXINTERVALS", "MAXVALUELENGTH", "MEETS", "MERGEBLOCKRATIO", "NONE", "PERCENT", "PROFILE", "PROTECTION", "QUERY_BAND", "QUIT", "RUN", "SAMPLE", "SEL", "SS", "STAT", "STATS", "STATISTICS", "SUMMARY", "THRESHOLD", "UC", "UPPERCASE", ] ) teradata_dialect.sets("reserved_keywords").update(["UNION", "TIMESTAMP"]) teradata_dialect.sets("bare_functions").update(["DATE"]) teradata_dialect.replace( # ANSI standard comparison operators plus Teradata extensions ComparisonOperatorGrammar=OneOf( Ref("EqualsSegment"), Ref("EqualsSegment_a"), Ref("GreaterThanSegment"), Ref("GreaterThanSegment_a"), Ref("LessThanSegment"), Ref("LessThanSegment_a"), Ref("GreaterThanOrEqualToSegment"), Ref("GreaterThanOrEqualToSegment_a"), Ref("LessThanOrEqualToSegment"), Ref("LessThanOrEqualToSegment_a"), Ref("NotEqualToSegment"), Ref("NotEqualToSegment_a"), Ref("NotEqualToSegment_b"), Ref("NotEqualToSegment_c"), Ref("LikeOperatorSegment"), Sequence("IS", "DISTINCT", "FROM"), Sequence("IS", "NOT", "DISTINCT", "FROM"), ) ) teradata_dialect.add( # Add Teradata comparison operator extensions EqualsSegment_a=StringParser("EQ", ComparisonOperatorSegment), GreaterThanSegment_a=StringParser("GT", ComparisonOperatorSegment), LessThanSegment_a=StringParser("LT", ComparisonOperatorSegment), GreaterThanOrEqualToSegment_a=StringParser("GE", ComparisonOperatorSegment), LessThanOrEqualToSegment_a=StringParser("LE", ComparisonOperatorSegment), NotEqualToSegment_a=StringParser("NE", ComparisonOperatorSegment), NotEqualToSegment_b=StringParser("NOT=", ComparisonOperatorSegment), NotEqualToSegment_c=StringParser("^=", ComparisonOperatorSegment), ) # BTEQ statement class BteqKeyWordSegment(BaseSegment): """Bteq Keywords. Often a string with a dot, sometimes followed by a Literal LOGON - Used to log into Teradata system. ACTIVITYCOUNT - Returns the number of rows affected by the previous query. ERRORCODE - Returns the status code of the previous query. DATABASE - Sets the default database. LABEL - Assigns a label to a set of SQL commands. RUN FILE - Executes the query contained in a file. GOTO - Transfers control to a label. LOGOFF - Logs off from database and terminates all sessions. IMPORT - Specifies the input file path. EXPORT - Specifies the output file path and initiates the export. """ type = "bteq_key_word_segment" match_grammar = Sequence( Ref("DotSegment", optional=True), OneOf( "IF", "THEN", "LOGON", "ACTIVITYCOUNT", "ERRORCODE", "DATABASE", "LABEL", "GOTO", "LOGOFF", "IMPORT", "EXPORT", "RUN", "QUIT", "ACTIVITYCOUNT", ), Ref("LiteralGrammar", optional=True), ) class BteqStatementSegment(BaseSegment): """Bteq statements start with a dot, followed by a Keyword. Non exhaustive and maybe catching too many statements? # BTEQ commands .if errorcode > 0 then .quit 2 .IF ACTIVITYCOUNT = 0 THEN .QUIT """ type = "bteq_statement" match_grammar = Sequence( Ref("DotSegment"), Ref("BteqKeyWordSegment"), AnyNumberOf( Ref("BteqKeyWordSegment"), # if ... then: the ... Sequence( Ref("ComparisonOperatorGrammar"), Ref("LiteralGrammar"), optional=True ), optional=True, ), ) class TdCollectStatUsingOptionClauseSegment(BaseSegment): """'using_option' for COLLECT STAT clause.""" type = "collect_stat_using_option_clause" match_grammar = Sequence( OneOf( Sequence("SAMPLE", Ref("NumericLiteralSegment"), "PERCENT"), Sequence("SYSTEM", "THRESHOLD", OneOf("PERCENT", "DAYS", optional=True)), Sequence("SYSTEM", "SAMPLE"), Sequence( "THRESHOLD", Ref("NumericLiteralSegment"), OneOf("PERCENT", "DAYS"), ), Sequence("NO", "THRESHOLD", OneOf("PERCENT", "DAYS", optional=True)), Sequence("NO", "SAMPLE"), Sequence("MAXINTERVALS", Ref("NumericLiteralSegment")), Sequence("SYSTEM", "MAXINTERVALS"), Sequence("MAXVALUELENGTH", Ref("NumericLiteralSegment")), Sequence("SYSTEM", "MAXVALUELENGTH"), "SAMPLE", ), Sequence("FOR", "CURRENT", optional=True), ) class TdOrderByStatClauseSegment(BaseSegment): """An `ORDER BY (VALUES|HASH) (column_name)` clause in COLLECT STATS.""" type = "stat_orderby_clause" match_grammar = Sequence( "ORDER", "BY", OneOf("VALUES", "HASH"), Bracketed(Ref("ColumnReferenceSegment")) ) # Collect Statistics statement class TdCollectStatisticsStatementSegment(BaseSegment): """A `COLLECT STATISTICS (Optimizer Form)` statement. # TODO: add expression COLLECT [SUMMARY] (STATISTICS|STAT) [[COLUMN| [UNIQUE] INDEX] (expression (, expression ...)] ON TABLENAME """ type = "collect_statistics_statement" match_grammar = Sequence( "COLLECT", Ref.keyword("SUMMARY", optional=True), OneOf("STAT", "STATS", "STATISTICS"), Sequence( "USING", Delimited( Ref("TdCollectStatUsingOptionClauseSegment"), delimiter="AND", ), optional=True, ), Delimited( OneOf( # UNIQUE INDEX index_name ALL (column_name, ...) ORDER BY VALUES|HASH # (column_name) Sequence( Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IndexReferenceSegment", optional=True), Ref.keyword("ALL", optional=True), Bracketed(Delimited(Ref("ColumnReferenceSegment"))), Ref("TdOrderByStatClauseSegment", optional=True), ), # UNIQUE INDEX index_name Sequence( Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("IndexReferenceSegment"), ), # COLUMN ... Sequence( "COLUMN", OptionallyBracketed( Delimited( OneOf( Ref("ColumnReferenceSegment"), Ref.keyword("PARTITION"), # TODO: expression ), ), ), Sequence( Ref.keyword("AS", optional=True), Ref("ObjectReferenceSegment"), # statistics_name optional=True, ), ), ), optional=True, ), "ON", Ref.keyword("TEMPORARY", optional=True), Ref("TableReferenceSegment"), ) class TdCommentStatementSegment(BaseSegment): """A `COMMENT` statement. COMMENT [ON] (object_kind_1|object_kind_2) name [[AS|IS] comment] object_kind_1: (COLUMN|FUNCTION|GLOP SET|MACRO|MAP|METHOD|PROCEDURE|PROFILE|ROLE| TRIGGER|TYPE|VIEW) object_kind_2: (DATABASE|FILE|TABLE|USER) """ type = "comment_clause" is_ddl = True is_dml = False is_dql = False is_dcl = False match_grammar = Sequence( "COMMENT", OneOf("ON", optional=True), OneOf( Sequence("COLUMN", Ref("ColumnReferenceSegment")), Sequence("FUNCTION", Ref("ObjectReferenceSegment")), Sequence("MACRO", Ref("ObjectReferenceSegment")), Sequence("MAP", Ref("ObjectReferenceSegment")), Sequence("METHOD", Ref("ObjectReferenceSegment")), Sequence("PROCEDURE", Ref("ObjectReferenceSegment")), Sequence("PROFILE", Ref("ObjectReferenceSegment")), Sequence("ROLE", Ref("ObjectReferenceSegment")), Sequence("TRIGGER", Ref("ObjectReferenceSegment")), Sequence("TYPE", Ref("ObjectReferenceSegment")), Sequence("VIEW", Ref("TableReferenceSegment")), Sequence("DATABASE", Ref("DatabaseReferenceSegment")), Sequence("FILE", Ref("ObjectReferenceSegment")), Sequence("TABLE", Ref("TableReferenceSegment")), Sequence("USER", Ref("ObjectReferenceSegment")), ), Sequence( OneOf("AS", "IS", optional=True), Ref("QuotedLiteralSegment"), optional=True, ), ) # Rename table statement class TdRenameStatementSegment(BaseSegment): """A `RENAME TABLE` statement. https://docs.teradata.com/reader/eWpPpcMoLGQcZEoyt5AjEg/Kl~F4lxPauOELYJVuFLjag RENAME TABLE OLD_TABLENAME (TO|AS) NEW_TABLENAME """ type = "rename_table_statement" match_grammar = Sequence( "RENAME", "TABLE", Ref("TableReferenceSegment"), OneOf( "TO", "AS", ), Ref("TableReferenceSegment"), ) # Adding Teradata specific DATE FORMAT 'YYYYMM' class DatatypeSegment(ansi.DatatypeSegment): """A data type segment. DATE FORMAT 'YYYY-MM-DD' """ match_grammar = Sequence( Ref("DatatypeIdentifierSegment"), Ref("BracketedArguments", optional=True), Bracketed( OneOf( Delimited(Ref("ExpressionSegment")), # The brackets might be empty for some cases... optional=True, ), # There may be no brackets for some data types optional=True, ), Sequence( # FORMAT 'YYYY-MM-DD', "FORMAT", Ref("QuotedLiteralSegment"), optional=True ), ) class TeradataCastSegment(BaseSegment): """A casting operation using Teradata conversion syntax. https://docs.teradata.com/reader/kmuOwjp1zEYg98JsB8fu_A/ypGGhd87xi3E2E7SlNS1Xg # Teradata Conversion Syntax in Explicit Data Type Conversions expression ([data_attribute,] data_type [, data_attribute]) with data_type := a data type declaration such as INTEGER or DATE data_attribute := a data attribute such as FORMAT, NAMED or TITLE e.g. '9999-12-31' (DATE), '9999-12-31' (DATE FORMAT 'YYYY-MM-DD') '100000' (SMALLINT) DATE FORMAT 'E4,BM4BDD,BY4' DATE '2007-01-01' """ type = "cast_expression" match_grammar = Bracketed(Ref("DatatypeSegment")) class ExpressionSegment(BaseSegment): """A expression, either arithmetic or boolean. We extend the expression segment in teradata to enable casting. """ type = "expression" match_grammar = Sequence( Ref("Expression_A_Grammar"), Ref("TeradataCastSegment", optional=True), ) # Adding Teradata specific column definitions class ColumnDefinitionSegment(BaseSegment): """A column definition, e.g. for CREATE TABLE or ALTER TABLE.""" type = "column_definition" match_grammar = Sequence( Ref("ColumnReferenceSegment"), # Column name Ref("DatatypeSegment"), # Column type Bracketed(Anything(), optional=True), # For types like VARCHAR(100) AnyNumberOf( Ref("ColumnConstraintSegment", optional=True), # Adding Teradata specific column definitions Ref("TdColumnConstraintSegment", optional=True), ), ) class TdColumnConstraintSegment(BaseSegment): """Teradata specific column attributes. e.g. CHARACTER SET LATIN | [NOT] (CASESPECIFIC|CS) | (UPPERCASE|UC) """ type = "td_column_attribute_constraint" match_grammar = Sequence( OneOf( Sequence( # CHARACTER SET LATIN "CHARACTER", "SET", Ref("SingleIdentifierGrammar") ), Sequence( # [NOT] CASESPECIFIC Ref.keyword("NOT", optional=True), OneOf("CASESPECIFIC", "CS"), ), OneOf("UPPERCASE", "UC"), Sequence( # COMPRESS [(1.,3.) | 3. | NULL], "COMPRESS", OneOf( Bracketed(Delimited(Ref("LiteralGrammar"))), Ref("LiteralGrammar"), "NULL", optional=True, ), ), ), ) # Create Teradata Create Table Statement class TdCreateTableOptions(BaseSegment): """CreateTableOptions. , NO FALLBACK, NO BEFORE JOURNAL, NO AFTER JOURNAL, CHECKSUM = DEFAULT , DEFAULT MERGEBLOCKRATIO """ type = "create_table_options_statement" match_grammar = Sequence( Ref("CommaSegment"), Delimited( OneOf( # [ NO ] FALLBACK [ PROTECTION ] Sequence( Ref.keyword("NO", optional=True), "FALLBACK", Ref.keyword("PROTECTION", optional=True), ), # [NO | DUAL | LOCAL |NOT LOCAL] [AFTER | BEFORE] JOURNAL Sequence( OneOf( "NO", "DUAL", "LOCAL", Sequence("NOT", "LOCAL"), optional=True ), OneOf("BEFORE", "AFTER", optional=True), "JOURNAL", ), # CHECKSUM = (ON|OFF|DEFAULT) Sequence( "CHECKSUM", Ref("EqualsSegment"), OneOf( "ON", "OFF", "DEFAULT", ), ), # (NO|Default) MergeBlockRatio Sequence( OneOf( "DEFAULT", "NO", ), "MERGEBLOCKRATIO", ), # MergeBlockRatio = integer [PERCENT] Sequence( "MERGEBLOCKRATIO", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("PERCENT", optional=True), ), ), ), ) class TdTablePartitioningLevel(BaseSegment): """Partitioning Level. https://docs.teradata.com/reader/eWpPpcMoLGQcZEoyt5AjEg/e0GX8Iw16u1SCwYvc5qXzg partition_expression or COLUMN [[NO] AUTO COMPRESS] [[ALL BUT] column_partition] [ADD constant] column_partition := ([COLUMN|ROW] column_name (, column_name2, ...) NO AUTOCOMPRESS partition_expression := CASE_N, RANGE_N, EXTRACT, expression and in case of multi-level in parenthesis """ type = "td_partitioning_level" match_grammar = OneOf( Sequence( Ref("FunctionNameSegment"), Bracketed(Anything(optional=True)), ), Bracketed( Delimited( Sequence( Ref("FunctionNameSegment"), Bracketed(Anything(optional=True)), ), ), ), ) class TdTableConstraints(BaseSegment): """Teradata specific table attributes. e.g. UNIQUE PRIMARY INDEX Column_name | ( Column_name, ... ) NO PRIMARY INDEX ... """ type = "td_table_constraint" match_grammar = AnyNumberOf( # PRIMARY Index OneOf( Sequence( # UNIQUE PRIMARY INDEX Column_name | ( Column_name, ... ) Ref.keyword("UNIQUE", optional=True), "PRIMARY", "INDEX", Ref("ObjectReferenceSegment", optional=True), # primary index name OneOf( Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ) ), Ref("SingleIdentifierGrammar"), ), ), Sequence("NO", "PRIMARY", "INDEX"), # NO PRIMARY INDEX ), # PARTITION BY ... Sequence( # INDEX HOPR_TRN_TRAV_SIN_MP_I ( IND_TIPO_TARJETA ); "PARTITION", "BY", Ref("TdTablePartitioningLevel"), ), # Index Sequence( # INDEX HOPR_TRN_TRAV_SIN_MP_I ( IND_TIPO_TARJETA ); Ref.keyword("UNIQUE", optional=True), "INDEX", Ref("ObjectReferenceSegment"), # Index name Ref.keyword("ALL", optional=True), Bracketed( # Columns making up constraint Delimited(Ref("ColumnReferenceSegment")), ), ), # WITH DATA Sequence("WITH", Sequence("NO", optional=True), "DATA"), # AND STATISITCS Sequence( "AND", Sequence("NO", optional=True), OneOf("STAT", "STATS", "STATISTICS"), optional=True, ), # ON COMMIT PRESERVE ROWS Sequence("ON", "COMMIT", OneOf("PRESERVE", "DELETE"), "ROWS"), ) class CreateTableStatementSegment(BaseSegment): """A `CREATE [MULTISET| SET] TABLE` statement.""" type = "create_table_statement" match_grammar = Sequence( "CREATE", Sequence("OR", "REPLACE", optional=True), # Adding Teradata specific [MULTISET| SET] OneOf("SET", "MULTISET", optional=True), OneOf(Sequence("GLOBAL", "TEMPORARY"), "VOLATILE", optional=True), "TABLE", Sequence("IF", "NOT", "EXISTS", optional=True), Ref("TableReferenceSegment"), # , NO FALLBACK, NO BEFORE JOURNAL, NO AFTER JOURNAL Ref("TdCreateTableOptions", optional=True), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("ColumnDefinitionSegment"), Ref("TableConstraintSegment"), ), ) ), Ref("CommentClauseSegment", optional=True), ), # Create AS syntax: Sequence("AS", Ref("SelectableGrammar")), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), # PRIMARY INDEX( COD_TARJETA, COD_EST, IND_TIPO_TARJETA, FEC_ANIO_MES ) OneOf(Ref("TdTableConstraints"), optional=True), ) # Update class UpdateStatementSegment(BaseSegment): """A `Update from` statement. The UPDATE statement FROM clause is a Teradata extension to the ANSI SQL:2011 standard. UPDATE (
| FROM Statement) SET [ WHERE ] """ type = "update_statement" match_grammar = Sequence( "UPDATE", OneOf( Ref("TableReferenceSegment"), Ref("FromUpdateClauseSegment"), Sequence( Ref("TableReferenceSegment"), Ref("FromUpdateClauseSegment"), ), ), Ref("SetClauseListSegment"), Ref("WhereClauseSegment", optional=True), ) class FromUpdateClauseSegment(BaseSegment): """A `FROM` clause like in `SELECT` but terminated by SET.""" type = "from_in_update_clause" match_grammar = Sequence( "FROM", Delimited( # Optional old school delimited joins Ref("FromExpressionElementSegment"), ), ) # Adding Teradata specific statements class StatementSegment(ansi.StatementSegment): """A generic segment, to any of its child subsegments.""" type = "statement" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("TdCollectStatisticsStatementSegment"), Ref("BteqStatementSegment"), Ref("TdRenameStatementSegment"), Ref("QualifyClauseSegment"), Ref("TdCommentStatementSegment"), Ref("DatabaseStatementSegment"), Ref("SetSessionStatementSegment"), Ref("SetQueryBandStatementSegment"), ], ) class QualifyClauseSegment(BaseSegment): """A `QUALIFY` clause like in `SELECT`.""" type = "qualify_clause" match_grammar = Sequence( "QUALIFY", Indent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class SelectStatementSegment(ansi.SelectStatementSegment): """A `SELECT` statement. https://dev.mysql.com/doc/refman/5.7/en/select.html """ match_grammar = ansi.SelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OrderByClauseSegment", optional=True), ) class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """An unordered `SELECT` statement. https://dev.mysql.com/doc/refman/5.7/en/select.html """ match_grammar = ansi.UnorderedSelectStatementSegment.match_grammar.copy( insert=[Ref("QualifyClauseSegment", optional=True)], before=Ref("OverlapsClauseSegment", optional=True), ) class SelectClauseSegment(ansi.SelectClauseSegment): """A group of elements in a select target statement. Remove OVERLAPS as a terminator as this can be part of SelectClauseModifierSegment """ match_grammar = ansi.SelectClauseSegment.match_grammar.copy( # Allow "SEL" as in place of just "SELECT" insert=[OneOf("SELECT", "SEL")], before=Ref.keyword("SELECT"), remove=[Ref.keyword("SELECT")], terminators=[ "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), ], replace_terminators=True, ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. DEL[ETE] FROM
[ WHERE ] """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar: Matchable = Sequence( OneOf("DELETE", "DEL"), Ref("FromClauseSegment"), Ref("WhereClauseSegment", optional=True), ) class SelectClauseModifierSegment(BaseSegment): """Things that come after SELECT but before the columns. Adds NORMALIZE clause: https://docs.teradata.com/r/2_MC9vCtAJRlKle2Rpb0mA/UuxiA0mklFgv~33X5nyKMA """ type = "select_clause_modifier" match_grammar = OneOf( "DISTINCT", "ALL", Sequence( "TOP", Ref("ExpressionSegment"), Sequence("PERCENT", optional=True), Sequence("WITH", "TIES", optional=True), ), Sequence( "NORMALIZE", OneOf( Sequence( "ON", "MEETS", "OR", "OVERLAPS", ), Sequence( "ON", "OVERLAPS", ), Sequence( "ON", "OVERLAPS", "OR", "MEETS", ), optional=True, ), ), ) class DatabaseStatementSegment(BaseSegment): """A `DATABASE` statement. https://docs.teradata.com/r/Teradata-Database-SQL-Data-Definition-Language-Syntax-and-Examples/December-2015/Database-Statements/DATABASE """ type = "database_statement" match_grammar: Matchable = Sequence( "DATABASE", Ref("DatabaseReferenceSegment"), ) # Limited to SET SESSION DATABASE for now. # Many other session parameters may be set via SET SESSION. class SetSessionStatementSegment(BaseSegment): """A `SET SESSION` statement. https://docs.teradata.com/r/Teradata-Database-SQL-Data-Definition-Language-Syntax-and-Examples/December-2015/Session-Statements/SET-SESSION-DATABASE """ type = "set_session_statement" match_grammar: Matchable = Sequence( OneOf( Sequence("SET", "SESSION"), "SS", ), Ref("DatabaseStatementSegment"), ) class SetQueryBandStatementSegment(BaseSegment): """A `SET QUERY_BAND` statement. SET QUERY_BAND = { 'band_specification [...]' | NONE } [ UPDATE ] FOR { SESSION [VOLATILE] | TRANSACTION } [;] https://docs.teradata.com/r/Teradata-VantageTM-SQL-Data-Definition-Language-Syntax-and-Examples/July-2021/Session-Statements/SET-QUERY_BAND """ type = "set_query_band_statement" match_grammar: Matchable = Sequence( "SET", "QUERY_BAND", Ref("EqualsSegment"), OneOf(Ref("QuotedLiteralSegment"), "NONE"), Sequence("UPDATE", optional=True), "FOR", OneOf(Sequence("SESSION", Sequence("VOLATILE", optional=True)), "TRANSACTION"), ) class NotEqualToSegment_b(CompositeComparisonOperatorSegment): """The comparison operator extension NOT=. https://www.docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/Supported-Comparison-Operators """ match_grammar = Sequence( Ref("NotOperatorGrammar"), Ref("RawEqualsSegment"), allow_gaps=False ) class NotEqualToSegment_c(CompositeComparisonOperatorSegment): """The comparison operator extension ^=. https://www.docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/Supported-Comparison-Operators """ match_grammar = Sequence( Ref("BitwiseXorSegment"), Ref("RawEqualsSegment"), allow_gaps=False ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_trino.py000066400000000000000000000207471451700765000227550ustar00rootroot00000000000000"""The Trino dialect. See https://trino.io/docs/current/language.html """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, Anything, BaseSegment, Bracketed, Delimited, LiteralSegment, Matchable, Nothing, OneOf, Ref, Sequence, TypedParser, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_trino_keywords import ( trino_reserved_keywords, trino_unreserved_keywords, ) ansi_dialect = load_raw_dialect("ansi") trino_dialect = ansi_dialect.copy_as("trino") # Set the bare functions: https://trino.io/docs/current/functions/datetime.html trino_dialect.sets("bare_functions").update( ["current_date", "current_time", "current_timestamp", "localtime", "localtimestamp"] ) # Set keywords trino_dialect.sets("unreserved_keywords").clear() trino_dialect.update_keywords_set_from_multiline_string( "unreserved_keywords", trino_unreserved_keywords ) trino_dialect.sets("reserved_keywords").clear() trino_dialect.update_keywords_set_from_multiline_string( "reserved_keywords", trino_reserved_keywords ) trino_dialect.replace( DateTimeLiteralGrammar=OneOf( Sequence( OneOf("DATE", "TIME", "TIMESTAMP"), TypedParser( "single_quote", LiteralSegment, type="date_constructor_literal" ), ), Ref("IntervalExpressionSegment"), ), LikeGrammar=Sequence("LIKE"), # TODO: There are no custom SQL functions in Trino! How to handle this? MLTableExpressionSegment=Nothing(), FromClauseTerminatorGrammar=OneOf( "WHERE", "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "WINDOW", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("WithDataClauseSegment"), "FETCH", ), OrderByClauseTerminators=OneOf( "LIMIT", "HAVING", # For window functions "WINDOW", Ref("FrameClauseUnitGrammar"), "FETCH", ), SelectClauseTerminatorGrammar=OneOf( "FROM", "WHERE", Sequence("ORDER", "BY"), "LIMIT", Ref("SetOperatorSegment"), "FETCH", ), WhereClauseTerminatorGrammar=OneOf( "LIMIT", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", "WINDOW", "FETCH", ), HavingClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "WINDOW", "FETCH", ), GroupByClauseTerminatorGrammar=OneOf( Sequence("ORDER", "BY"), "LIMIT", "HAVING", "WINDOW", "FETCH", ), # NOTE: This block was copy/pasted from dialect_ansi.py with these changes made: # - "PRIOR" keyword removed Expression_A_Unary_Operator_Grammar=OneOf( Ref( "SignedSegmentGrammar", exclude=Sequence(Ref("QualifiedNumericLiteralSegment")), ), Ref("TildeSegment"), Ref("NotOperatorGrammar"), ), FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # Trim function Sequence( Ref("TrimParametersGrammar"), Ref("ExpressionSegment", optional=True, exclude=Ref.keyword("FROM")), "FROM", Ref("ExpressionSegment"), ), # An extract-like or substring-like function Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), "FROM", Ref("ExpressionSegment"), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref( "OrderByClauseSegment" ), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake).. # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), Ref("IgnoreRespectNullsGrammar"), Ref("IndexColumnDefinitionSegment"), Ref("EmptyStructLiteralSegment"), ), ) class DatatypeSegment(BaseSegment): """Data type segment. See https://trino.io/docs/current/language/types.html """ type = "data_type" match_grammar = OneOf( # Boolean "BOOLEAN", # Integer "TINYINT", "SMALLINT", "INTEGER", "BIGINT", # Floating-point "REAL", "DOUBLE", # Fixed-precision Sequence( "DECIMAL", Ref("BracketedArguments", optional=True), ), # String Sequence( OneOf("CHAR", "VARCHAR"), Ref("BracketedArguments", optional=True), ), "VARBINARY", "JSON", # Date and time "DATE", Sequence( OneOf("TIME", "TIMESTAMP"), Bracketed(Ref("NumericLiteralSegment"), optional=True), Sequence(OneOf("WITH", "WITHOUT"), "TIME", "ZONE", optional=True), ), # Structural "ARRAY", "MAP", "ROW", # Others "IPADDRESS", "UUID", ) class OverlapsClauseSegment(BaseSegment): """An `OVERLAPS` clause like in `SELECT.""" type = "overlaps_clause" match_grammar: Matchable = Nothing() class UnorderedSelectStatementSegment(ansi.UnorderedSelectStatementSegment): """A `SELECT` statement without any ORDER clauses or later.""" match_grammar: Matchable = Sequence( Ref("SelectClauseSegment"), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), Ref("NamedWindowSegment", optional=True), ) class ValuesClauseSegment(ansi.ValuesClauseSegment): """A `VALUES` clause within in `WITH`, `SELECT`, `INSERT`.""" match_grammar = Sequence( "VALUES", Delimited(Ref("ExpressionSegment")), ) class IntervalExpressionSegment(BaseSegment): """An interval representing a span of time. https://trino.io/docs/current/language/types.html#interval-year-to-month https://trino.io/docs/current/functions/datetime.html#date-and-time-operators """ type = "interval_expression" match_grammar = Sequence( "INTERVAL", Ref("QuotedLiteralSegment"), OneOf("YEAR", "MONTH", "DAY", "HOUR", "MINUTE", "SECOND"), ) class FrameClauseSegment(BaseSegment): """A frame clause for window functions. https://trino.io/blog/2021/03/10/introducing-new-window-features.html """ type = "frame_clause" _frame_extent = OneOf( Sequence("CURRENT", "ROW"), Sequence( OneOf( Ref("NumericLiteralSegment"), Ref("DateTimeLiteralGrammar"), "UNBOUNDED" ), OneOf("PRECEDING", "FOLLOWING"), ), ) match_grammar: Matchable = Sequence( Ref("FrameClauseUnitGrammar"), OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)), ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Intersect or Except.""" type = "set_operator" match_grammar: Matchable = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), Sequence( OneOf( "INTERSECT", "EXCEPT", ), Ref.keyword("ALL", optional=True), ), exclude=Sequence("EXCEPT", Bracketed(Anything())), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( remove=[ Ref("TransactionStatementSegment"), ], ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_trino_keywords.py000066400000000000000000000045341451700765000247000ustar00rootroot00000000000000"""Keywords in the Trino Dialect. - https://trino.io/docs/current/language/reserved.html - https://github.com/trinodb/trino/blob/ master/core/trino-parser/src/main/antlr4/io/trino/sql/parser/SqlBase.g4 """ trino_reserved_keywords = """ALTER AND AS BETWEEN BY CASE CAST CONSTRAINT CREATE CROSS CUBE CURRENT_CATALOG CURRENT_DATE CURRENT_PATH CURRENT_ROLE CURRENT_SCHEMA CURRENT_TIME CURRENT_TIMESTAMP CURRENT_USER DEALLOCATE DELETE DESCRIBE DISTINCT DROP ELSE END ESCAPE EXCEPT EXECUTE EXISTS EXTRACT FALSE FOR FROM FULL GROUP GROUPING HAVING IN INNER INSERT INTERSECT INTO IS JOIN JSON_ARRAY JSON_EXISTS JSON_OBJECT JSON_QUERY JSON_TABLE JSON_VALUE LEFT LIKE LISTAGG LOCALTIME LOCALTIMESTAMP NATURAL NORMALIZE NOT NULL ON OR ORDER OUTER PREPARE RECURSIVE RIGHT ROLLUP SELECT SKIP TABLE THEN TRIM TRUE UESCAPE UNION UNNEST USING VALUES WHEN WHERE WITH """ trino_unreserved_keywords = """ABSENT ADD ADMIN AFTER ALL ANALYZE ANY ARRAY ASC AT AUTHORIZATION BERNOULLI BIGINT BOOLEAN BOTH CALL CASCADE CATALOG CATALOGS CHAR COLUMN COLUMNS COMMENT COMMIT COMMITTED CONDITIONAL COPARTITION COUNT CURRENT DATA DATE DAY DECIMAL DEFAULT DEFINE DEFINER DENY DESC DESCRIPTOR DISTRIBUTED DOUBLE EMPTY ENCODING ERROR EXCLUDING EXPLAIN FETCH FILTER FINAL FIRST FOLLOWING FORMAT FUNCTIONS GRACE GRANT GRANTED GRANTS GRAPHVIZ GROUPS HOUR IF IGNORE IMMEDIATE INCLUDING INITIAL INPUT INTEGER INTERVAL INVOKER IO IPADDRESS ISOLATION JSON KEEP KEY KEYS LAST LATERAL LEADING LEVEL LIMIT LOCAL LOGICAL MAP MATCH MATCHED MATCHES MATCH_RECOGNIZE MATERIALIZED MEASURES MERGE MINUTE MONTH NESTED NEXT NFC NFD NFKC NFKD NO NONE NULLIF NULLS OBJECT OF OFFSET OMIT ONE ONLY OPTION ORDINALITY OUTPUT OVER OVERFLOW PARTITION PARTITIONS PASSING PAST PATH PATTERN PER PERIOD PERMUTE PLAN POSITION PRECEDING PRECISION PRIVILEGES PROPERTIES PRUNE QUOTES RANGE READ REAL REFRESH RENAME REPEATABLE REPLACE RESET RESPECT RESTRICT RETURNING REVOKE ROLE ROLES ROLLBACK ROW ROWS RUNNING SCALAR SCHEMA SCHEMAS SECOND SECURITY SEEK SERIALIZABLE SESSION SET SETS SHOW SMALLINT SOME START STATS SUBSET SUBSTRING SYSTEM TABLES TABLESAMPLE TEXT TEXT_STRING TIES TIME TIMESTAMP TINYINT TO TRAILING TRANSACTION TRUNCATE TRY_CAST TYPE UNBOUNDED UNCOMMITTED UNCONDITIONAL UNIQUE UNKNOWN UNMATCHED UPDATE USE USER UTF16 UTF32 UTF8 UUID VALIDATE VALUE VARBINARY VARCHAR VERBOSE VERSION VIEW WINDOW WITHIN WITHOUT WORK WRAPPER WRITE YEAR ZONE """ sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_tsql.py000066400000000000000000005523421451700765000226060ustar00rootroot00000000000000"""The MSSQL T-SQL dialect. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/language-elements-transact-sql """ from sqlfluff.core.dialects import load_raw_dialect from sqlfluff.core.parser import ( AnyNumberOf, AnySetOf, BaseFileSegment, BaseSegment, Bracketed, CodeSegment, CommentSegment, CompositeComparisonOperatorSegment, Conditional, Dedent, Delimited, IdentifierSegment, ImplicitIndent, Indent, LiteralSegment, Matchable, MultiStringParser, NewlineSegment, Nothing, OneOf, OptionallyBracketed, ParseMode, Ref, RegexLexer, RegexParser, SegmentGenerator, Sequence, TypedParser, WhitespaceSegment, WordSegment, ) from sqlfluff.dialects import dialect_ansi as ansi from sqlfluff.dialects.dialect_tsql_keywords import ( FUTURE_RESERVED_KEYWORDS, RESERVED_KEYWORDS, UNRESERVED_KEYWORDS, ) ansi_dialect = load_raw_dialect("ansi") tsql_dialect = ansi_dialect.copy_as("tsql") tsql_dialect.sets("reserved_keywords").clear() tsql_dialect.sets("unreserved_keywords").clear() tsql_dialect.sets("future_reserved_keywords").clear() tsql_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS) tsql_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS) tsql_dialect.sets("future_reserved_keywords").update(FUTURE_RESERVED_KEYWORDS) # Set the datetime units tsql_dialect.sets("datetime_units").clear() tsql_dialect.sets("datetime_units").update( [ "D", "DAY", "DAYS", "DAYOFYEAR", "DD", "DW", "DY", "HH", "HOUR", "INFINITE", "M", "MCS", "MI", "MICROSECOND", "MILLISECOND", "MINUTE", "MM", "MONTH", "MONTHS", "MS", "N", "NANOSECOND", "NS", "Q", "QQ", "QUARTER", "S", "SECOND", "SS", "W", "WEEK", "WEEKS", "WEEKDAY", "WK", "WW", "YEAR", "YEARS", "Y", "YY", "YYYY", ] ) tsql_dialect.sets("date_part_function_name").clear() tsql_dialect.sets("date_part_function_name").update( ["DATEADD", "DATEDIFF", "DATEDIFF_BIG", "DATENAME", "DATEPART"] ) tsql_dialect.sets("date_format").clear() tsql_dialect.sets("date_format").update( [ "mdy", "dmy", "ymd", "myd", "dym", ] ) tsql_dialect.sets("bare_functions").update( ["system_user", "session_user", "current_user"] ) tsql_dialect.sets("sqlcmd_operators").clear() tsql_dialect.sets("sqlcmd_operators").update(["r", "setvar"]) tsql_dialect.sets("file_compression").clear() tsql_dialect.sets("file_compression").update( [ "'org.apache.hadoop.io.compress.GzipCodec'", "'org.apache.hadoop.io.compress.DefaultCodec'", "'org.apache.hadoop.io.compress.SnappyCodec'", ] ) tsql_dialect.sets("file_encoding").clear() tsql_dialect.sets("file_encoding").update( [ "'UTF8'", "'UTF16'", ] ) tsql_dialect.sets("serde_method").clear() tsql_dialect.sets("serde_method").update( [ "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'", "'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'", ] ) tsql_dialect.insert_lexer_matchers( [ RegexLexer( "atsign", r"[@][a-zA-Z0-9_]+", CodeSegment, ), RegexLexer( "var_prefix", r"[$][a-zA-Z0-9_]+", CodeSegment, ), RegexLexer( "square_quote", r"\[([^\[\]]*)*\]", CodeSegment, ), # T-SQL unicode strings RegexLexer( "single_quote_with_n", r"N'([^']|'')*'", CodeSegment, ), RegexLexer( "hash_prefix", r"[#][#]?[a-zA-Z0-9_]+", CodeSegment, ), RegexLexer( "unquoted_relative_sql_file_path", # currently there is no way to pass `regex.IGNORECASE` flag to `RegexLexer` r"[.\w\\/#-]+\.[sS][qQ][lL]", CodeSegment, ), ], before="back_quote", ) tsql_dialect.patch_lexer_matchers( [ # Patching single_quote to allow for TSQL-style escaped quotes RegexLexer( "single_quote", r"'([^']|'')*'", CodeSegment, ), # Patching comments to remove hash comments RegexLexer( "inline_comment", r"(--)[^\n]*", CommentSegment, segment_kwargs={"trim_start": ("--")}, ), # Patching block comments to account for nested blocks. # N.B. this syntax is only possible via the non-standard-library # (but still backwards compatible) `regex` package. # https://pypi.org/project/regex/ # Pattern breakdown: # /\* Match opening slash. # (?> Atomic grouping # (https://www.regular-expressions.info/atomic.html). # [^*/]+ Non forward-slash or asterisk characters. # |\*(?!\/) Negative lookahead assertion to match # asterisks not followed by a forward-slash. # |/[^*] Match lone forward-slashes not followed by an asterisk. # )* Match any number of the atomic group contents. # (?> # (?R) Recursively match the block comment pattern # to match nested block comments. # (?> # [^*/]+ # |\*(?!\/) # |/[^*] # )* # )* # \*/ Match closing slash. RegexLexer( "block_comment", r"/\*(?>[^*/]+|\*(?!\/)|/[^*])*(?>(?R)(?>[^*/]+|\*(?!\/)|/[^*])*)*\*/", CommentSegment, subdivider=RegexLexer( "newline", r"\r\n|\n", NewlineSegment, ), trim_post_subdivide=RegexLexer( "whitespace", r"[^\S\r\n]+", WhitespaceSegment, ), ), RegexLexer( "word", r"[0-9a-zA-Z_#@]+", WordSegment ), # overriding to allow hash mark and at-sign in code ] ) tsql_dialect.add( BracketedIdentifierSegment=TypedParser( "square_quote", IdentifierSegment, type="quoted_identifier" ), HashIdentifierSegment=TypedParser( "hash_prefix", IdentifierSegment, type="hash_identifier" ), VariableIdentifierSegment=TypedParser( "var_prefix", IdentifierSegment, type="variable_identifier" ), BatchDelimiterGrammar=Ref("GoStatementSegment"), QuotedLiteralSegmentWithN=TypedParser( "single_quote_with_n", LiteralSegment, type="quoted_literal" ), QuotedLiteralSegmentOptWithN=OneOf( Ref("QuotedLiteralSegment"), Ref("QuotedLiteralSegmentWithN"), ), TransactionGrammar=OneOf( "TRANSACTION", "TRAN", ), SystemVariableSegment=RegexParser( r"@@[A-Za-z0-9_]+", CodeSegment, type="system_variable" ), StatementAndDelimiterGrammar=Sequence( Ref("StatementSegment"), Ref("DelimiterGrammar", optional=True), ), OneOrMoreStatementsGrammar=AnyNumberOf( Ref("StatementAndDelimiterGrammar"), min_times=1, ), TopPercentGrammar=Sequence( "TOP", OptionallyBracketed(Ref("ExpressionSegment")), Ref.keyword("PERCENT", optional=True), ), CursorNameGrammar=OneOf( Sequence(Ref.keyword("GLOBAL", optional=True), Ref("NakedIdentifierSegment")), Ref("ParameterNameSegment"), ), CredentialGrammar=Sequence( "IDENTITY", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), Sequence( Ref("CommaSegment"), "SECRET", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), AzureBlobStoragePath=RegexParser( r"'https://[a-z0-9][a-z0-9-]{1,61}[a-z0-9]\.blob\.core\.windows\.net/[a-z0-9]" r"[a-z0-9\.-]{1,61}[a-z0-9](?:/.+)?'", CodeSegment, type="external_location", ), AzureDataLakeStorageGen2Path=RegexParser( r"'https://[a-z0-9][a-z0-9-]{1,61}[a-z0-9]\.dfs\.core\.windows\.net/[a-z0-9]" r"[a-z0-9\.-]{1,61}[a-z0-9](?:/.+)?'", CodeSegment, type="external_location", ), SqlcmdOperatorSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("sqlcmd_operators"), CodeSegment, type="sqlcmd_operator", ) ), SqlcmdFilePathSegment=TypedParser( "unquoted_relative_sql_file_path", CodeSegment, type="unquoted_relative_sql_file_path", ), FileCompressionSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("file_compression"), CodeSegment, type="file_compression", ) ), FileEncodingSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("file_encoding"), CodeSegment, type="file_encoding", ) ), SerdeMethodSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("serde_method"), CodeSegment, type="serde_method", ) ), ProcedureParameterGrammar=Sequence( Ref("ParameterNameSegment", optional=True), Sequence("AS", optional=True), Ref("DatatypeSegment"), AnySetOf("VARYING", Sequence("NOT", optional=True), "NULL"), Sequence(Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True), ), DateFormatSegment=SegmentGenerator( lambda dialect: MultiStringParser( dialect.sets("date_format"), CodeSegment, type="date_format", ) ), ) tsql_dialect.replace( # Overriding to cover TSQL allowed identifier name characters # https://docs.microsoft.com/en-us/sql/relational-databases/databases/database-identifiers?view=sql-server-ver15 NakedIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords lambda dialect: RegexParser( r"[A-Z_][A-Z0-9_@$#]*", IdentifierSegment, type="naked_identifier", anti_template=r"^(" + r"|".join( dialect.sets("reserved_keywords") | dialect.sets("future_reserved_keywords") ) + r")$", ) ), # Overring ANSI BaseExpressionElement to remove Interval Expression Segment BaseExpressionElementGrammar=ansi_dialect.get_grammar( "BaseExpressionElementGrammar" ).copy( remove=[ Ref("IntervalExpressionSegment"), ] ), SingleIdentifierGrammar=OneOf( Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment"), Ref("BracketedIdentifierSegment"), Ref("HashIdentifierSegment"), Ref("ParameterNameSegment"), Ref("VariableIdentifierSegment"), ), LiteralGrammar=ansi_dialect.get_grammar("LiteralGrammar") .copy( insert=[ Ref("QuotedLiteralSegmentWithN"), ], before=Ref("NumericLiteralSegment"), remove=[ Ref("ArrayLiteralSegment"), Ref("ObjectLiteralSegment"), ], ) .copy( insert=[ Ref("ParameterNameSegment"), Ref("SystemVariableSegment"), ], ), ParameterNameSegment=RegexParser(r"@[A-Za-z0-9_]+", CodeSegment, type="parameter"), FunctionParameterGrammar=Sequence( Ref("ParameterNameSegment", optional=True), Sequence("AS", optional=True), Ref("DatatypeSegment"), Sequence("NULL", optional=True), Sequence(Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True), ), FunctionNameIdentifierSegment=SegmentGenerator( # Generate the anti template from the set of reserved keywords # minus the function names that are reserved words. lambda dialect: RegexParser( r"[A-Z][A-Z0-9_]*|\[[A-Z][A-Z0-9_]*\]", CodeSegment, type="function_name_identifier", anti_template=r"^(" + r"|".join( dialect.sets("reserved_keywords") | dialect.sets("future_reserved_keywords") ) + r")$", ) ), # Override ANSI IsClauseGrammar to remove TSQL non-keyword NAN IsClauseGrammar=OneOf( "NULL", Ref("BooleanLiteralGrammar"), ), DatatypeIdentifierSegment=SegmentGenerator( # Generate the anti template reserved keywords lambda dialect: OneOf( RegexParser( r"[A-Z][A-Z0-9_]*|\[[A-Z][A-Z0-9_]*\]", CodeSegment, type="data_type_identifier", # anti_template=r"^(NOT)$", anti_template=r"^(" + r"|".join( dialect.sets("reserved_keywords") | dialect.sets("future_reserved_keywords") ) + r")$", # TODO - this is a stopgap until we implement explicit data types ), Ref("SingleIdentifierGrammar", exclude=Ref("NakedIdentifierSegment")), ), ), PrimaryKeyGrammar=Sequence( OneOf( Sequence( "PRIMARY", "KEY", ), "UNIQUE", ), OneOf( "CLUSTERED", "NONCLUSTERED", optional=True, ), ), FromClauseTerminatorGrammar=OneOf( "WHERE", Sequence("GROUP", "BY"), Sequence("ORDER", "BY"), "HAVING", Ref("SetOperatorSegment"), Ref("WithNoSchemaBindingClauseSegment"), Ref("DelimiterGrammar"), ), # Replace ANSI LikeGrammar to remove TSQL non-keywords RLIKE and ILIKE LikeGrammar=Sequence( "LIKE", ), # Replace ANSI FunctionContentsGrammar to remove TSQL non-keyword Separator # TODO: fully represent TSQL functionality FunctionContentsGrammar=AnyNumberOf( Ref("ExpressionSegment"), # A Cast-like function Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")), # An extract-like or substring-like function Sequence( OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")), "FROM", Ref("ExpressionSegment"), ), Sequence( # Allow an optional distinct keyword here. Ref.keyword("DISTINCT", optional=True), OneOf( # Most functions will be using the delimited route # but for COUNT(*) or similar we allow the star segment # here. Ref("StarSegment"), Delimited(Ref("FunctionContentsExpressionGrammar")), ), ), Ref("OrderByClauseSegment"), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake)... # like a function call: POSITION ( 'QL' IN 'SQL') Sequence( OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), "IN", OneOf( Ref("QuotedLiteralSegment"), Ref("SingleIdentifierGrammar"), Ref("ColumnReferenceSegment"), ), ), Sequence(OneOf("IGNORE", "RESPECT"), "NULLS"), ), JoinTypeKeywordsGrammar=Sequence( OneOf( "INNER", Sequence( OneOf( "FULL", "LEFT", "RIGHT", ), Ref.keyword("OUTER", optional=True), ), ), OneOf( "LOOP", "HASH", "MERGE", optional=True, ), optional=True, ), JoinKeywordsGrammar=OneOf("JOIN", "APPLY"), NaturalJoinKeywordsGrammar=Ref.keyword("CROSS"), ExtendedNaturalJoinKeywordsGrammar=Sequence("OUTER", "APPLY"), NestedJoinGrammar=Sequence( Indent, Ref("JoinClauseSegment"), Dedent, ), # Replace Expression_D_Grammar to remove casting syntax invalid in TSQL Expression_D_Grammar=Sequence( OneOf( Ref("BareFunctionSegment"), Ref("FunctionSegment"), Bracketed( OneOf( # We're using the expression segment here rather than the grammar so # that in the parsed structure we get nested elements. Ref("ExpressionSegment"), Ref("SelectableGrammar"), Delimited( Ref( "ColumnReferenceSegment" ), # WHERE (a,b,c) IN (select a,b,c FROM...) Ref( "FunctionSegment" ), # WHERE (a, substr(b,1,3)) IN (select c,d FROM...) Ref("LiteralGrammar"), # WHERE (a, 2) IN (SELECT b, c FROM ...) ), ), parse_mode=ParseMode.GREEDY, ), # Allow potential select statement without brackets Ref("SelectStatementSegment"), Ref("LiteralGrammar"), Ref("ColumnReferenceSegment"), Ref("TypedArrayLiteralSegment"), Ref("ArrayLiteralSegment"), ), Ref("AccessorGrammar", optional=True), allow_gaps=True, ), MergeIntoLiteralGrammar=Sequence( "MERGE", Ref("TopPercentGrammar", optional=True), Ref.keyword("INTO", optional=True), ), TrimParametersGrammar=Nothing(), TemporaryGrammar=Nothing(), JoinLikeClauseGrammar=AnySetOf( Ref("PivotUnpivotStatementSegment"), min_times=1, ), CollateGrammar=Sequence("COLLATE", Ref("CollationReferenceSegment")), ) class StatementSegment(ansi.StatementSegment): """Overriding StatementSegment to allow for additional segment parsing.""" match_grammar = ansi.StatementSegment.match_grammar.copy( insert=[ Ref("IfExpressionStatement"), Ref("DeclareStatementSegment"), Ref("DeclareCursorStatementSegment"), Ref("SetStatementSegment"), Ref("AlterTableSwitchStatementSegment"), Ref("PrintStatementSegment"), Ref( "CreateTableAsSelectStatementSegment" ), # Azure Synapse Analytics specific Ref("RenameStatementSegment"), # Azure Synapse Analytics specific Ref("ExecuteScriptSegment"), Ref("DropStatisticsStatementSegment"), Ref("DropProcedureStatementSegment"), Ref("UpdateStatisticsStatementSegment"), Ref("BeginEndSegment"), Ref("TryCatchSegment"), Ref("MergeStatementSegment"), Ref("ThrowStatementSegment"), Ref("RaiserrorStatementSegment"), Ref("ReturnStatementSegment"), Ref("GotoStatement"), Ref("LabelStatementSegment"), Ref("DisableTriggerStatementSegment"), Ref("WhileExpressionStatement"), Ref("BreakStatement"), Ref("ContinueStatement"), Ref("WaitForStatementSegment"), Ref("OpenCursorStatementSegment"), Ref("CloseCursorStatementSegment"), Ref("DeallocateCursorStatementSegment"), Ref("FetchCursorStatementSegment"), Ref("CreateTypeStatementSegment"), Ref("CreateSynonymStatementSegment"), Ref("DropSynonymStatementSegment"), Ref("BulkInsertStatementSegment"), Ref("AlterIndexStatementSegment"), Ref("CreateDatabaseScopedCredentialStatementSegment"), Ref("CreateExternalDataSourceStatementSegment"), Ref("SqlcmdCommandSegment"), Ref("CreateExternalFileFormat"), Ref("CreateExternalTableStatementSegment"), Ref("DropExternalTableStatementSegment"), Ref("CopyIntoTableStatementSegment"), Ref("CreateFullTextIndexStatementSegment"), Ref("AtomicBeginEndSegment"), ], remove=[ Ref("CreateModelStatementSegment"), Ref("DropModelStatementSegment"), Ref("DescribeStatementSegment"), ], ) class GreaterThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Greater than or equal to operator. N.B. Patching to add !< and to allow spaces between operators. """ match_grammar = OneOf( Sequence( Ref("RawGreaterThanSegment"), Ref("RawEqualsSegment"), ), Sequence( Ref("RawNotSegment"), Ref("RawLessThanSegment"), ), ) class LessThanOrEqualToSegment(CompositeComparisonOperatorSegment): """Greater than or equal to operator. N.B. Patching to add !> and to allow spaces between operators. """ match_grammar = OneOf( Sequence( Ref("RawLessThanSegment"), Ref("RawEqualsSegment"), ), Sequence( Ref("RawNotSegment"), Ref("RawGreaterThanSegment"), ), ) class NotEqualToSegment(CompositeComparisonOperatorSegment): """Not equal to operator. N.B. Patching to allow spaces between operators. """ match_grammar = OneOf( Sequence(Ref("RawNotSegment"), Ref("RawEqualsSegment")), Sequence(Ref("RawLessThanSegment"), Ref("RawGreaterThanSegment")), ) class SelectClauseElementSegment(ansi.SelectClauseElementSegment): """An element in the targets of a select statement. Overriding ANSI to remove greedy logic which assumes statements have been delimited """ # Important to split elements before parsing, otherwise debugging is really hard. match_grammar = OneOf( # *, blah.*, blah.blah.*, etc. Ref("WildcardExpressionSegment"), Sequence( Ref("AltAliasExpressionSegment"), Ref("BaseExpressionElementGrammar"), ), Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), ) class AltAliasExpressionSegment(BaseSegment): """An alternative alias clause as used by tsql using `=`.""" type = "alias_expression" match_grammar = Sequence( OneOf( Ref("SingleIdentifierGrammar"), Ref("SingleQuotedIdentifierSegment"), ), Ref("RawEqualsSegment"), ) class SelectClauseModifierSegment(BaseSegment): """Things that come after SELECT but before the columns.""" type = "select_clause_modifier" match_grammar = OneOf( "DISTINCT", "ALL", Sequence( # https://docs.microsoft.com/en-us/sql/t-sql/queries/top-transact-sql?view=sql-server-ver15 "TOP", OptionallyBracketed(Ref("ExpressionSegment")), Sequence("PERCENT", optional=True), Sequence("WITH", "TIES", optional=True), ), ) class SelectClauseSegment(BaseSegment): """A group of elements in a select target statement. Overriding ANSI to remove greedy logic which assumes statements have been delimited """ type = "select_clause" match_grammar: Matchable = Sequence( "SELECT", Ref("SelectClauseModifierSegment", optional=True), Indent, # NOTE: Don't allow trailing. Delimited(Ref("SelectClauseElementSegment")), Dedent, # NOTE: In TSQL - this grammar is NOT greedy. ) class UnorderedSelectStatementSegment(BaseSegment): """A `SELECT` statement without any ORDER clauses or later. We need to change ANSI slightly to remove LimitClauseSegment and NamedWindowSegment which don't exist in T-SQL. We also need to get away from ANSI's use of terminators. There's not a clean list of terminators that can be used to identify the end of a TSQL select statement. Semi-colon is optional. """ type = "select_statement" match_grammar = Sequence( Ref("SelectClauseSegment"), Ref("IntoTableSegment", optional=True), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("GroupByClauseSegment", optional=True), Ref("HavingClauseSegment", optional=True), ) class InsertStatementSegment(BaseSegment): """An `INSERT` statement. Overriding ANSI definition to remove terminator logic that doesn't handle optional delimitation well. """ type = "insert_statement" match_grammar = Sequence( "INSERT", Ref.keyword("INTO", optional=True), Ref("TableReferenceSegment"), Ref("PostTableExpressionGrammar", optional=True), Ref("BracketedColumnReferenceListGrammar", optional=True), Ref("OutputClauseSegment", optional=True), OneOf( Ref("SelectableGrammar"), Ref("ExecuteScriptSegment"), Ref("DefaultValuesGrammar"), ), ) class BulkInsertStatementSegment(BaseSegment): """A `BULK INSERT` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/bulk-insert-transact-sql?view=sql-server-ver16 """ type = "bulk_insert_statement" match_grammar = Sequence( "BULK", "INSERT", Ref("TableReferenceSegment"), "FROM", Ref("QuotedLiteralSegment"), Ref("BulkInsertStatementWithSegment", optional=True), ) class BulkInsertStatementWithSegment(BaseSegment): """A `WITH` segment in the BULK INSERT statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/bulk-insert-transact-sql?view=sql-server-ver16 """ type = "bulk_insert_with_segment" match_grammar = Sequence( "WITH", Bracketed( Delimited( AnyNumberOf( Sequence( OneOf( "BATCHSIZE", "FIRSTROW", "KILOBYTES_PER_BATCH", "LASTROW", "MAXERRORS", "ROWS_PER_BATCH", ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( OneOf( "CODEPAGE", "DATAFILETYPE", "DATA_SOURCE", "ERRORFILE", "ERRORFILE_DATA_SOURCE", "FORMATFILE_DATA_SOURCE", "ROWTERMINATOR", "FORMAT", "FIELDQUOTE", "FORMATFILE", "FIELDTERMINATOR", ), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ORDER", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf("ASC", "DESC", optional=True), ), ), ), ), "CHECK_CONSTRAINTS", "FIRE_TRIGGERS", "KEEPIDENTITY", "KEEPNULLS", "TABLOCK", ) ) ), ) class WithCompoundStatementSegment(BaseSegment): """A `SELECT` statement preceded by a selection of `WITH` clauses. `WITH tab (col1,col2) AS (SELECT a,b FROM x)` Overriding ANSI to remove the greedy use of terminators. """ type = "with_compound_statement" # match grammar match_grammar = Sequence( "WITH", Ref.keyword("RECURSIVE", optional=True), Conditional(Indent, indented_ctes=True), Delimited( Ref("CTEDefinitionSegment"), terminators=["SELECT"], ), Conditional(Dedent, indented_ctes=True), OneOf( Ref("NonWithSelectableGrammar"), Ref("NonWithNonSelectableGrammar"), Ref("MergeStatementSegment"), ), ) class SelectStatementSegment(BaseSegment): """A `SELECT` statement. We need to change ANSI slightly to remove LimitClauseSegment and NamedWindowSegment which don't exist in T-SQL. We also need to get away from ANSI's use of terminators. There's not a clean list of terminators that can be used to identify the end of a TSQL select statement. Semi-colon is optional. """ type = "select_statement" # Remove the Limit and Window statements from ANSI match_grammar = UnorderedSelectStatementSegment.match_grammar.copy( insert=[ Ref("OrderByClauseSegment", optional=True), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), Ref("ForClauseSegment", optional=True), ] ) class IntoTableSegment(BaseSegment): """`INTO` clause within `SELECT`. https://docs.microsoft.com/en-us/sql/t-sql/queries/select-into-clause-transact-sql?view=sql-server-ver15 """ type = "into_table_clause" match_grammar = Sequence("INTO", Ref("ObjectReferenceSegment")) class WhereClauseSegment(BaseSegment): """A `WHERE` clause like in `SELECT` or `INSERT`. Overriding ANSI in order to get away from the use of terminators. There's not a clean list of terminators that can be used to identify the end of a TSQL select statement. Semi-colon is optional. """ type = "where_clause" match_grammar = Sequence( "WHERE", ImplicitIndent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class CreateIndexStatementSegment(BaseSegment): """A `CREATE INDEX` or `CREATE STATISTICS` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/statements/create-statistics-transact-sql?view=sql-server-ver15 """ type = "create_index_statement" match_grammar = Sequence( "CREATE", Ref("OrReplaceGrammar", optional=True), Sequence("UNIQUE", optional=True), OneOf("CLUSTERED", "NONCLUSTERED", optional=True), OneOf("INDEX", "STATISTICS"), Ref("IfNotExistsGrammar", optional=True), Ref("IndexReferenceSegment"), Indent, "ON", Ref("TableReferenceSegment"), Ref("BracketedIndexColumnListGrammar"), Sequence( "INCLUDE", Ref("BracketedColumnReferenceListGrammar"), optional=True, ), Ref("WhereClauseSegment", optional=True), Ref("RelationalIndexOptionsSegment", optional=True), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), Ref("FilestreamOnOptionSegment", optional=True), Ref("DelimiterGrammar", optional=True), Dedent, ) class CreateFullTextIndexStatementSegment(BaseSegment): """A `CREATE FULLTEXT INDEX` statement. https://learn.microsoft.com/fr-fr/sql/t-sql/statements/create-fulltext-index-transact-sql?view=sql-server-ver16 """ type = "create_fulltext_index_statement" _catalog_filegroup_option = Sequence( "ON", Delimited( AnySetOf( Ref("ObjectReferenceSegment"), Sequence( "FILEGROUP", Ref("ObjectReferenceSegment"), ), ), allow_trailing=True, ), optional=True, ) _with_option = Sequence( "WITH", Bracketed( OneOf( Sequence( "CHANGE_TRACKING", Ref("EqualsSegment", optional=True), OneOf( "MANUAL", "AUTO", Delimited( "OFF", Sequence( "NO", "POPULATION", optional=True, ), ), ), ), Sequence( "STOPLIST", Ref("EqualsSegment", optional=True), OneOf( "OFF", "SYSTEM", Ref("ObjectReferenceSegment"), ), ), Sequence( "SEARCH", "PROPERTY", "LIST", Ref("EqualsSegment", optional=True), Ref("ObjectReferenceSegment"), ), ), ), optional=True, ) match_grammar = Sequence( "CREATE", "FULLTEXT", "INDEX", "ON", Ref("TableReferenceSegment"), Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), AnySetOf( Sequence( "TYPE", "COLUMN", Ref("DatatypeSegment"), ), Sequence( "LANGUAGE", OneOf( Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), optional=True, ), ), "STATISTICAL_SEMANTICS", ), ), ), ), Sequence( "KEY", "INDEX", Ref("ObjectReferenceSegment"), _catalog_filegroup_option, ), _with_option, ) class AlterIndexStatementSegment(BaseSegment): """An ALTER INDEX statement. As per. https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-index-transact-sql?view=sql-server-ver15 """ type = "alter_index_statement" _low_priority_lock_wait = Sequence( "WAIT_AT_LOW_PRIORITY", Bracketed( Sequence( "MAX_DURATION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), Ref("CommaSegment"), Sequence( "ABORT_AFTER_WAIT", Ref("EqualsSegment"), OneOf( "NONE", "SELF", "BLOCKERS", ), ), ), ) _on_partitions = Sequence( Sequence( "ON", "PARTITIONS", ), Bracketed( Delimited( Ref("NumericLiteralSegment"), ), Sequence( "TO", Ref("NumericLiteralSegment"), optional=True, ), ), optional=True, ) _rebuild_index_option = AnyNumberOf( Sequence( OneOf( "PAD_INDEX", "SORT_IN_TEMPDB", "IGNORE_DUP_KEY", "STATISTICS_NORECOMPUTE", "STATISTICS_INCREMENTAL", "RESUMABLE", "ALLOW_ROW_LOCKS", "ALLOW_PAGE_LOCKS", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), Sequence( OneOf( "MAXDOP", "FILLFACTOR", "MAX_DURATION", ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), Sequence( "ONLINE", Ref("EqualsSegment"), OneOf( Sequence( "ON", Bracketed( _low_priority_lock_wait, optional=True, ), ), "OFF", ), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), OneOf( "NONE", "ROW", "PAGE", "COLUMNSTORE", "COLUMNSTORE_ARCHIVE", ), _on_partitions, ), Sequence( "XML_COMPRESSION", Ref("EqualsSegment"), OneOf( "ON", "OFF", ), _on_partitions, ), ) _single_partition_rebuild_index_option = AnyNumberOf( Sequence( OneOf( "XML_COMPRESSION", "SORT_IN_TEMPDB", "RESUMABLE", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), Sequence( OneOf( "MAXDOP", "MAX_DURATION", ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), OneOf( "NONE", "ROW", "PAGE", "COLUMNSTORE", "COLUMNSTORE_ARCHIVE", ), ), Sequence( "ONLINE", Ref("EqualsSegment"), OneOf( Sequence( "ON", Bracketed( _low_priority_lock_wait, optional=True, ), ), "OFF", ), ), ) match_grammar = Sequence( "ALTER", "INDEX", OneOf( Ref("ObjectReferenceSegment"), "ALL", ), "ON", Ref("TableReferenceSegment"), OneOf( Sequence( "REBUILD", OneOf( Sequence( Sequence( "PARTITION", Ref("EqualsSegment"), "ALL", optional=True, ), Sequence( "WITH", Bracketed( Delimited( _rebuild_index_option, ) ), optional=True, ), ), Sequence( Sequence( "PARTITION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "WITH", Bracketed( Delimited( _single_partition_rebuild_index_option, ), ), optional=True, ), ), optional=True, ), ), "DISABLE", Sequence( "REORGANIZE", Sequence( "PARTITION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "WITH", Bracketed( Sequence( OneOf( "LOB_COMPACTION", "COMPRESS_ALL_ROW_GROUPS", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), ), optional=True, ), ), Sequence( "SET", Bracketed( Delimited( AnyNumberOf( Sequence( OneOf( "ALLOW_ROW_LOCKS", "ALLOW_PAGE_LOCKS", "OPTIMIZE_FOR_SEQUENTIAL_KEY", "IGNORE_DUP_KEY", "STATISTICS_NORECOMPUTE", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), Sequence( "COMPRESSION_DELAY", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), ), ), ), ), Sequence( "RESUME", Sequence( "WITH", Bracketed( Delimited( Sequence( OneOf( "MAX_DURATION", "MAXDOP", ), Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Ref.keyword("MINUTES", optional=True), ), _low_priority_lock_wait, ), ), optional=True, ), ), "PAUSE", "ABORT", ), ) class OnPartitionOrFilegroupOptionSegment(BaseSegment): """ON partition scheme or filegroup option. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 """ type = "on_partition_or_filegroup_statement" match_grammar = OneOf( Ref("PartitionSchemeClause"), Ref("FilegroupClause"), Ref("LiteralGrammar"), # for "default" value ) class FilestreamOnOptionSegment(BaseSegment): """FILESTREAM_ON index option in `CREATE INDEX` and 'CREATE TABLE' statements. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 """ type = "filestream_on_option_statement" match_grammar = Sequence( "FILESTREAM_ON", OneOf( Ref("FilegroupNameSegment"), Ref("PartitionSchemeNameSegment"), OneOf( "NULL", Ref("LiteralGrammar"), # for "default" value ), ), ) class TextimageOnOptionSegment(BaseSegment): """TEXTIMAGE ON option in `CREATE TABLE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 """ type = "textimage_on_option_statement" match_grammar = Sequence( "TEXTIMAGE_ON", OneOf( Ref("FilegroupNameSegment"), Ref("LiteralGrammar"), # for "default" value ), ) class TableOptionSegment(BaseSegment): """TABLE option in `CREATE TABLE` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 """ _ledger_view_option = Delimited( Sequence( OneOf( "TRANSACTION_ID_COLUMN_NAME", "SEQUENCE_NUMBER_COLUMN_NAME", "OPERATION_TYPE_COLUMN_NAME", "OPERATION_TYPE_DESC_COLUMN_NAME", ), Ref("EqualsSegment"), Ref("ColumnReferenceSegment"), optional=True, ), ) _on_partitions = Sequence( Sequence( "ON", "PARTITIONS", ), Bracketed( Delimited( Ref("NumericLiteralSegment"), ), Sequence( "TO", Ref("NumericLiteralSegment"), optional=True, ), ), optional=True, ) type = "table_option_statement" match_grammar = Sequence( "WITH", Bracketed( Delimited( AnyNumberOf( Sequence("MEMORY_OPTIMIZED", Ref("EqualsSegment"), "ON"), Sequence( "DURABILITY", Ref("EqualsSegment"), OneOf("SCHEMA_ONLY", "SCHEMA_AND_DATA"), ), Sequence( "SYSTEM_VERSIONING", Ref("EqualsSegment"), "ON", Bracketed( Delimited( AnyNumberOf( Sequence( "HISTORY_TABLE", Ref("EqualsSegment"), Ref("TableReferenceSegment"), ), Sequence( "HISTORY_RETENTION_PERIOD", Ref("EqualsSegment"), OneOf( "INFINITE", Sequence( Ref( "NumericLiteralSegment", optional=True, ), OneOf( "DAYS", "WEEKS", "MONTHS", "YEARS", ), optional=True, ), ), ), Sequence( Ref("CommaSegment"), "DATA_CONSISTENCY_CHECK", Ref("EqualsSegment"), OneOf("ON", "OFF"), ), ), ), ), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), OneOf( "NONE", "ROW", "PAGE", ), _on_partitions, ), Sequence( "XML_COMPRESSION", Ref("EqualsSegment"), OneOf("ON", "OFF"), _on_partitions, ), Sequence( "FILETABLE_DIRECTORY", Ref("EqualsSegment"), Ref("LiteralGrammar"), ), Sequence( OneOf( "FILETABLE_COLLATE_FILENAME", "FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME", "FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME", "FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME", ), Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "REMOTE_DATA_ARCHIVE", Ref("EqualsSegment"), OneOf( Sequence( "ON", Bracketed( Delimited( Sequence( "FILTER_PREDICATE", Ref("EqualsSegment"), OneOf( "NULL", Ref("FunctionNameSegment"), ), optional=True, ), Sequence( "MIGRATION_STATE", Ref("EqualsSegment"), OneOf("OUTBOUND", "INBOUND", "PAUSED"), ), ), optional=True, ), ), Sequence( "OFF", Bracketed( "MIGRATION_STATE", Ref("EqualsSegment"), "PAUSED", ), ), ), ), Sequence( "DATA_DELETION", Ref("EqualsSegment"), "ON", Bracketed( "FILTER_COLUMN", Ref("EqualsSegment"), Ref("ColumnReferenceSegment"), Ref("CommaSegment"), "RETENTION_PERIOD", Ref("EqualsSegment"), Ref("NumericLiteralSegment", optional=True), Ref("DatetimeUnitSegment"), ), ), Sequence( "LEDGER", Ref("EqualsSegment"), OneOf( Sequence( "ON", Bracketed( Delimited( Sequence( "LEDGER_VIEW", Ref("EqualsSegment"), Ref("TableReferenceSegment"), Bracketed( _ledger_view_option, optional=True ), optional=True, ), Sequence( "APPEND_ONLY", Ref("EqualsSegment"), OneOf("ON", "OFF"), optional=True, ), ), optional=True, ), ), "OFF", ), ), ) ) ), ) class ReferencesConstraintGrammar(BaseSegment): """REFERENCES constraint option in `CREATE TABLE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 """ type = "references_constraint_grammar" match_grammar = Sequence( # REFERENCES reftable [ ( refcolumn) ] "REFERENCES", Ref("TableReferenceSegment"), # Foreign columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar", optional=True), Sequence( "ON", "DELETE", OneOf( Sequence("NO", "ACTION"), "CASCADE", Sequence("SET", "NULL"), Sequence("SET", "DEFAULT"), ), optional=True, ), Sequence( "ON", "UPDATE", OneOf( Sequence("NO", "ACTION"), "CASCADE", Sequence("SET", "NULL"), Sequence("SET", "DEFAULT"), ), optional=True, ), Sequence("NOT", "FOR", "REPLICATION", optional=True), ) class CheckConstraintGrammar(BaseSegment): """CHECK constraint option in `CREATE TABLE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 """ type = "check_constraint_grammar" match_grammar = Sequence( "CHECK", Sequence("NOT", "FOR", "REPLICATION", optional=True), Bracketed( Ref("ExpressionSegment"), ), ) class RelationalIndexOptionsSegment(BaseSegment): """A relational index options in `CREATE INDEX` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15 """ type = "relational_index_options" match_grammar = Sequence( "WITH", OptionallyBracketed( Delimited( AnyNumberOf( Sequence( OneOf( "PAD_INDEX", "FILLFACTOR", "SORT_IN_TEMPDB", "IGNORE_DUP_KEY", "STATISTICS_NORECOMPUTE", "STATISTICS_INCREMENTAL", "DROP_EXISTING", "RESUMABLE", "ALLOW_ROW_LOCKS", "ALLOW_PAGE_LOCKS", "OPTIMIZE_FOR_SEQUENTIAL_KEY", "MAXDOP", ), Ref("EqualsSegment"), OneOf( "ON", "OFF", Ref("LiteralGrammar"), ), ), Ref("MaxDurationSegment"), Sequence( "ONLINE", Ref("EqualsSegment"), OneOf( "OFF", Sequence( "ON", Bracketed( Sequence( "WAIT_AT_LOW_PRIORITY", Bracketed( Delimited( Ref("MaxDurationSegment"), Sequence( "ABORT_AFTER_WAIT", Ref("EqualsSegment"), OneOf( "NONE", "SELF", "BLOCKERS", ), ), ), ), ), optional=True, ), ), ), ), # for table constrains Sequence( "COMPRESSION_DELAY", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Sequence( "MINUTES", optional=True, ), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), OneOf( "NONE", "ROW", "PAGE", "COLUMNSTORE", # for table constrains "COLUMNSTORE_ARCHIVE", # for table constrains ), Ref("OnPartitionsSegment", optional=True), ), min_times=1, ), ), ), ) class MaxDurationSegment(BaseSegment): """A `MAX DURATION` clause. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15 """ type = "max_duration" match_grammar = Sequence( "MAX_DURATION", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), Sequence( "MINUTES", optional=True, ), ) class DropIndexStatementSegment(ansi.DropIndexStatementSegment): """A `DROP INDEX` statement. Overriding ANSI to include required ON clause. """ match_grammar = Sequence( "DROP", "INDEX", Ref("IfExistsGrammar", optional=True), Ref("IndexReferenceSegment"), "ON", Ref("TableReferenceSegment"), Ref("DelimiterGrammar", optional=True), ) class DropStatisticsStatementSegment(BaseSegment): """A `DROP STATISTICS` statement.""" type = "drop_statement" # DROP INDEX [CONCURRENTLY] [IF EXISTS] {RESTRICT | CASCADE} match_grammar = Sequence( "DROP", OneOf("STATISTICS"), Ref("IndexReferenceSegment"), Ref("DelimiterGrammar", optional=True), ) class UpdateStatisticsStatementSegment(BaseSegment): """An `UPDATE STATISTICS` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/update-statistics-transact-sql?view=sql-server-ver15 """ type = "update_statistics_statement" match_grammar = Sequence( "UPDATE", "STATISTICS", Ref("ObjectReferenceSegment"), OneOf( Ref("SingleIdentifierGrammar"), Bracketed( Delimited( Ref("SingleIdentifierGrammar"), ), ), optional=True, ), Ref("DelimiterGrammar", optional=True), Sequence("WITH", OneOf("FULLSCAN", "RESAMPLE"), optional=True), ) class ObjectReferenceSegment(ansi.ObjectReferenceSegment): """A reference to an object. Update ObjectReferenceSegment to only allow dot separated SingleIdentifierGrammar So Square Bracketed identifiers can be matched. """ # match grammar (allow whitespace) match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar", optional=True), ), min_times=0, max_times=3, ), ) class TableReferenceSegment(ObjectReferenceSegment): """A reference to an table, CTE, subquery or alias. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "table_reference" class SchemaReferenceSegment(ObjectReferenceSegment): """A reference to a schema. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "schema_reference" class DatabaseReferenceSegment(ObjectReferenceSegment): """A reference to a database. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "database_reference" class IndexReferenceSegment(ObjectReferenceSegment): """A reference to an index. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "index_reference" class ExtensionReferenceSegment(ObjectReferenceSegment): """A reference to an extension. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "extension_reference" class ColumnReferenceSegment(ObjectReferenceSegment): """A reference to column, field or alias. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "column_reference" class SequenceReferenceSegment(ObjectReferenceSegment): """A reference to a sequence. Overriding to capture TSQL's override of ObjectReferenceSegment """ type = "sequence_reference" class PivotColumnReferenceSegment(ObjectReferenceSegment): """A reference to a PIVOT column. Used to differentiate it from a regular column reference. """ type = "pivot_column_reference" class PivotUnpivotStatementSegment(BaseSegment): """Declaration of a variable. https://docs.microsoft.com/en-us/sql/t-sql/queries/from-using-pivot-and-unpivot?view=sql-server-ver15 """ type = "from_pivot_expression" match_grammar = Sequence( OneOf( Sequence( "PIVOT", OptionallyBracketed( Sequence( OptionallyBracketed(Ref("FunctionSegment")), "FOR", Ref("ColumnReferenceSegment"), "IN", Bracketed(Delimited(Ref("PivotColumnReferenceSegment"))), ) ), ), Sequence( "UNPIVOT", OptionallyBracketed( Sequence( OptionallyBracketed(Ref("ColumnReferenceSegment")), "FOR", Ref("ColumnReferenceSegment"), "IN", Bracketed(Delimited(Ref("PivotColumnReferenceSegment"))), ) ), ), ), Sequence("AS", optional=True), Ref("TableReferenceSegment"), ) class DeclareStatementSegment(BaseSegment): """Declaration of a variable. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/declare-local-variable-transact-sql?view=sql-server-ver15 """ type = "declare_segment" match_grammar = Sequence( "DECLARE", Indent, Delimited( Sequence( Ref("ParameterNameSegment"), Sequence("AS", optional=True), OneOf( Sequence( Ref("DatatypeSegment"), Sequence( Ref("EqualsSegment"), Ref("ExpressionSegment"), optional=True, ), ), Sequence( "TABLE", Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), allow_trailing=True, ) ), ), ), ), ), Dedent, Ref("DelimiterGrammar", optional=True), ) class DeclareCursorStatementSegment(BaseSegment): """Declaration of a cursor. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/declare-cursor-transact-sql?view=sql-server-ver15 """ type = "declare_segment" match_grammar = Sequence( "DECLARE", Ref("NakedIdentifierSegment"), "CURSOR", OneOf("LOCAL", "GLOBAL", optional=True), OneOf("FORWARD_ONLY", "SCROLL", optional=True), OneOf("STATIC", "KEYSET", "DYNAMIC", "FAST_FORWARD", optional=True), OneOf("READ_ONLY", "SCROLL_LOCKS", "OPTIMISTIC", optional=True), Sequence("TYPE_WARNING", optional=True), "FOR", Ref("SelectStatementSegment"), ) class GoStatementSegment(BaseSegment): """GO signals the end of a batch of Transact-SQL statements. GO statements are not part of the TSQL language. They are used to signal batch statements so that clients know in how batches of statements can be executed. """ type = "go_statement" match_grammar = Ref.keyword("GO") class BracketedArguments(ansi.BracketedArguments): """A series of bracketed arguments. e.g. the bracketed part of numeric(1, 3) """ match_grammar = Bracketed( Delimited( OneOf( # TSQL allows optional MAX in some data types "MAX", Ref("ExpressionSegment"), ), # The brackets might be empty for some cases... optional=True, ), ) class DatatypeSegment(BaseSegment): """A data type segment. Updated for Transact-SQL to allow bracketed data types with bracketed schemas. """ type = "data_type" match_grammar = Sequence( # Some dialects allow optional qualification of data types with schemas Sequence( Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=False, optional=True, ), OneOf( Ref("DatatypeIdentifierSegment"), Bracketed(Ref("DatatypeIdentifierSegment"), bracket_type="square"), ), # Stop Gap until explicit Data Types as only relevent for character Ref.keyword("VARYING", optional=True), Ref("BracketedArguments", optional=True), Ref("CharCharacterSetGrammar", optional=True), ) class CreateSequenceOptionsSegment(BaseSegment): """Options for Create Sequence statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-sequence-transact-sql?view=sql-server-ver15 """ type = "create_sequence_options_segment" match_grammar = OneOf( Sequence( "AS", Ref("DatatypeSegment"), ), Sequence("START", "WITH", Ref("NumericLiteralSegment")), Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")), Sequence("MINVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MINVALUE"), Sequence("MAXVALUE", Ref("NumericLiteralSegment")), Sequence("NO", "MAXVALUE"), Sequence( Sequence("NO", optional=True), "CYCLE", ), Sequence( "CACHE", Ref("NumericLiteralSegment"), ), Sequence( "NO", "CACHE", ), ) class NextValueSequenceSegment(BaseSegment): """Segment to get next value from a sequence.""" type = "sequence_next_value" match_grammar = Sequence( "NEXT", "VALUE", "FOR", Ref("ObjectReferenceSegment"), ) class IfExpressionStatement(BaseSegment): """IF-ELSE statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/if-else-transact-sql?view=sql-server-ver15 """ type = "if_then_statement" match_grammar = Sequence( Ref("IfClauseSegment"), Indent, Ref("StatementAndDelimiterGrammar"), Dedent, AnyNumberOf( # ELSE IF included explicitly to allow for correct indentation Sequence( "ELSE", Ref("IfClauseSegment"), Indent, Ref("StatementAndDelimiterGrammar"), Dedent, ), ), Sequence( "ELSE", Indent, Ref("StatementAndDelimiterGrammar"), Dedent, optional=True, ), ) class IfClauseSegment(BaseSegment): """IF clause.""" type = "if_clause" match_grammar = Sequence( "IF", Indent, Ref("ExpressionSegment"), Dedent, ) class WhileExpressionStatement(BaseSegment): """WHILE statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/while-transact-sql?view=sql-server-ver15 """ type = "while_statement" match_grammar = Sequence( "WHILE", Ref("ExpressionSegment"), Indent, Ref("StatementAndDelimiterGrammar"), Dedent, ) class BreakStatement(BaseSegment): """BREAK statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/break-transact-sql?view=sql-server-ver15 """ type = "break_statement" match_grammar = Sequence( "BREAK", ) class ContinueStatement(BaseSegment): """CONTINUE statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/continue-transact-sql?view=sql-server-ver15 """ type = "continue_statement" match_grammar = Sequence( "CONTINUE", ) class WaitForStatementSegment(BaseSegment): """WAITFOR statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/waitfor-transact-sql?view=sql-server-ver15 Partially implemented, lacking Receive and Get Conversation Group statements for now. """ type = "waitfor_statement" match_grammar = Sequence( "WAITFOR", OneOf( Sequence("DELAY", Ref("ExpressionSegment")), Sequence("TIME", Ref("ExpressionSegment")), ), Sequence("TIMEOUT", Ref("NumericLiteralSegment"), optional=True), ) class ColumnConstraintSegment(BaseSegment): """A column option; each CREATE TABLE column can have 0 or more.""" type = "column_constraint_segment" # Column constraint from # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 match_grammar = Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), OneOf( "FILESTREAM", Sequence( "COLLATE", Ref("CollationReferenceSegment") ), # [COLLATE collation_name] "SPARSE", Sequence( "MASKED", "WITH", Bracketed("FUNCTION", Ref("EqualsSegment"), Ref("LiteralGrammar")), ), Sequence( Sequence( "CONSTRAINT", Ref("ObjectReferenceSegment"), # Constraint name optional=True, ), # DEFAULT "DEFAULT", OptionallyBracketed( OneOf( OptionallyBracketed(Ref("LiteralGrammar")), # ((-1)) Ref("FunctionSegment"), Ref("NextValueSequenceSegment"), ), ), ), Ref("IdentityGrammar"), Sequence("NOT", "FOR", "REPLICATION"), Sequence( Sequence("GENERATED", "ALWAYS", "AS"), OneOf("ROW", "TRANSACTION_ID", "SEQUENCE_NUMBER"), OneOf("START", "END"), Ref.keyword("HIDDEN", optional=True), ), Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL "ROWGUIDCOL", Ref("EncryptedWithGrammar"), Ref("PrimaryKeyGrammar"), Ref("RelationalIndexOptionsSegment"), Ref("OnPartitionOrFilegroupOptionSegment"), "UNIQUE", # UNIQUE #can be removed as included in PrimaryKeyGrammar? Ref("ForeignKeyGrammar"), Ref("ReferencesConstraintGrammar"), Ref("CheckConstraintGrammar"), Ref("FilestreamOnOptionSegment", optional=True), # column_index Sequence( "INDEX", Ref("ObjectReferenceSegment"), # index name OneOf("CLUSTERED", "NONCLUSTERED", optional=True), # other optional blocks (RelationalIndexOptionsSegment, # OnIndexOptionSegment,FilestreamOnOptionSegment) are mentioned above ), # computed_column_definition Sequence("AS", Ref("ExpressionSegment")), Sequence("PERSISTED", Sequence("NOT", "NULL", optional=True)) # other optional blocks (RelationalIndexOptionsSegment, # OnIndexOptionSegment, ReferencesConstraintGrammar, CheckConstraintGrammar) # are mentioned above ), ) class FunctionParameterListGrammar(BaseSegment): """The parameters for a function ie. `(@city_name NVARCHAR(30), @postal_code NVARCHAR(15))`. Overriding ANSI (1) to optionally bracket and (2) remove Delimited """ type = "function_parameter_list" # Function parameter list match_grammar = Bracketed( Delimited( Sequence( Ref("FunctionParameterGrammar"), Sequence("READONLY", optional=True), ), optional=True, ), ) class CreateFunctionStatementSegment(BaseSegment): """A `CREATE FUNCTION` statement. This version in the TSQL dialect should be a "common subset" of the structure of the code for those dialects. Updated to include AS after declaration of RETURNS. Might be integrated in ANSI though. https://www.postgresql.org/docs/9.1/sql-createfunction.html https://docs.snowflake.com/en/sql-reference/sql/create-function.html https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions https://docs.microsoft.com/en-us/sql/t-sql/statements/create-function-transact-sql?view=sql-server-ver15 https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-function-transact-sql?view=sql-server-ver15 """ type = "create_function_statement" match_grammar = Sequence( OneOf("CREATE", "ALTER", Sequence("CREATE", "OR", "ALTER")), "FUNCTION", Ref("ObjectReferenceSegment"), Ref("FunctionParameterListGrammar"), Sequence( # Optional function return type "RETURNS", OneOf( Ref("DatatypeSegment"), "TABLE", Sequence( Ref("ParameterNameSegment"), "TABLE", Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), ), ), ), ), ), optional=True, ), Ref("FunctionOptionSegment", optional=True), "AS", Ref("ProcedureDefinitionGrammar"), ) class FunctionOptionSegment(BaseSegment): """A function option segment.""" type = "function_option_segment" match_grammar = Sequence( "WITH", AnyNumberOf( "ENCRYPTION", "SCHEMABINDING", Sequence( OneOf( Sequence( "RETURNS", "NULL", ), "CALLED", ), "ON", "NULL", "INPUT", ), Ref("ExecuteAsClauseSegment"), Sequence( "INLINE", Ref("EqualsSegment"), OneOf( "ON", "OFF", ), ), min_times=1, ), ) class DropFunctionStatementSegment(BaseSegment): """A `DROP FUNCTION` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-function-transact-sql?view=sql-server-ver15 """ type = "drop_function_statement" match_grammar = Sequence( "DROP", "FUNCTION", Ref("IfExistsGrammar", optional=True), Delimited(Ref("FunctionNameSegment")), Ref("DelimiterGrammar", optional=True), ) class ReturnStatementSegment(BaseSegment): """A RETURN statement.""" type = "return_segment" match_grammar = Sequence( "RETURN", Ref("ExpressionSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class ExecuteAsClauseSegment(BaseSegment): """An EXECUTE AS clause. https://docs.microsoft.com/en-us/sql/t-sql/statements/execute-as-clause-transact-sql?view=sql-server-ver15 """ type = "execute_as_clause" match_grammar = Sequence( OneOf("EXEC", "EXECUTE"), "AS", OneOf( "CALLER", "SELF", "OWNER", Ref("QuotedLiteralSegment"), ), ) class SetStatementSegment(BaseSegment): """A Set statement. Setting an already declared variable or global variable. https://docs.microsoft.com/en-us/sql/t-sql/statements/set-statements-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/language-elements/set-local-variable-transact-sql?view=sql-server-ver15 """ type = "set_segment" match_grammar = Sequence( "SET", Indent, Delimited( OneOf( Sequence( "TRANSACTION", "ISOLATION", "LEVEL", OneOf( "SNAPSHOT", "SERIALIZABLE", Sequence( "REPEATABLE", "READ", ), Sequence( "READ", OneOf( "COMMITTED", "UNCOMMITTED", ), ), ), ), Sequence( OneOf( "DATEFIRST", "DATEFORMAT", "DEADLOCK_PRIORITY", "LOCK_TIMEOUT", "CONCAT_NULL_YIELDS_NULL", "CURSOR_CLOSE_ON_COMMIT", "FIPS_FLAGGER", Sequence("IDENTITY_INSERT", Ref("TableReferenceSegment")), "LANGUAGE", "OFFSETS", "QUOTED_IDENTIFIER", "ARITHABORT", "ARITHIGNORE", "FMTONLY", "NOCOUNT", "NOEXEC", "NUMERIC_ROUNDABORT", "PARSEONLY", "QUERY_GOVERNOR_COST_LIMIT", "RESULT_SET_CACHING", # Azure Synapse Analytics specific "ROWCOUNT", "TEXTSIZE", "ANSI_DEFAULTS", "ANSI_NULL_DFLT_OFF", "ANSI_NULL_DFLT_ON", "ANSI_NULLS", "ANSI_PADDING", "ANSI_WARNINGS", "FORCEPLAN", "SHOWPLAN_ALL", "SHOWPLAN_TEXT", "SHOWPLAN_XML", Sequence( "STATISTICS", OneOf( "IO", "PROFILE", "TIME", "XML", ), ), "IMPLICIT_TRANSACTIONS", "REMOTE_PROC_TRANSACTIONS", "XACT_ABORT", ), OneOf( "ON", "OFF", Sequence( Ref("EqualsSegment"), Ref("ExpressionSegment"), ), # The below for https://learn.microsoft.com/en-us/sql/t-sql/statements/set-deadlock-priority-transact-sql?view=sql-server-ver16 # noqa "LOW", "NORMAL", "HIGH", Ref("ParameterNameSegment"), Ref("NumericLiteralSegment"), Ref("QualifiedNumericLiteralSegment"), ), ), Sequence( Ref("ParameterNameSegment"), Ref("AssignmentOperatorSegment"), OneOf( Ref("ExpressionSegment"), Ref("SelectableGrammar"), ), ), ), ), Dedent, Ref("DelimiterGrammar", optional=True), ) class AssignmentOperatorSegment(BaseSegment): """One of the assignment operators. Includes simpler equals but also +=, -=, etc. """ type = "assignment_operator" match_grammar = OneOf( Ref("RawEqualsSegment"), Sequence( OneOf( Ref("PlusSegment"), Ref("MinusSegment"), Ref("DivideSegment"), Ref("MultiplySegment"), Ref("ModuloSegment"), Ref("BitwiseAndSegment"), Ref("BitwiseOrSegment"), Ref("BitwiseXorSegment"), ), Ref("RawEqualsSegment"), allow_gaps=False, ), ) class ProcedureParameterListGrammar(BaseSegment): """The parameters for a procedure ie. `@city_name NVARCHAR(30), @postal_code NVARCHAR(15)`. """ type = "procedure_parameter_list" # Function parameter list match_grammar = OptionallyBracketed( Delimited( Sequence( Ref("ProcedureParameterGrammar"), OneOf("OUT", "OUTPUT", optional=True), Sequence("READONLY", optional=True), ), optional=True, ), ) class CreateProcedureStatementSegment(BaseSegment): """A `CREATE OR ALTER PROCEDURE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-procedure-transact-sql?view=sql-server-ver15 https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-procedure-transact-sql?view=sql-server-ver15 """ type = "create_procedure_statement" _procedure_option = Sequence( "WITH", Delimited( AnySetOf( "ENCRYPTION", "RECOMPILE", "NATIVE_COMPILATION", # natively compiled stored procedure "SCHEMABINDING", # natively compiled stored procedure Ref("ExecuteAsClauseSegment", optional=True), ), ), optional=True, ) match_grammar = Sequence( OneOf("CREATE", "ALTER", Sequence("CREATE", "OR", "ALTER")), OneOf("PROC", "PROCEDURE"), Ref("ObjectReferenceSegment"), # Not for natively compiled stored procedures Sequence( Ref("SemicolonSegment"), Ref("NumericLiteralSegment"), optional=True, ), Indent, Ref("ProcedureParameterListGrammar", optional=True), _procedure_option, Sequence("FOR", "REPLICATION", optional=True), Dedent, "AS", Ref("ProcedureDefinitionGrammar"), ) class DropProcedureStatementSegment(BaseSegment): """A `DROP PROCEDURE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-procedure-transact-sql?view=sql-server-ver15 """ type = "drop_procedure_statement" match_grammar = Sequence( "DROP", OneOf("PROCEDURE", "PROC"), Ref("IfExistsGrammar", optional=True), Delimited(Ref("ObjectReferenceSegment")), Ref("DelimiterGrammar", optional=True), ) class ProcedureDefinitionGrammar(BaseSegment): """This is the body of a `CREATE OR ALTER PROCEDURE AS` statement. This also handles the body of a `CREATE FUNCTION AS` statement. """ type = "procedure_statement" name = "procedure_statement" match_grammar = OneOf( Ref("OneOrMoreStatementsGrammar"), Ref("AtomicBeginEndSegment"), Sequence( "EXTERNAL", "NAME", Ref("ObjectReferenceSegment"), ), ) class CreateViewStatementSegment(BaseSegment): """A `CREATE VIEW` statement. Adjusted to allow CREATE OR ALTER instead of CREATE OR REPLACE. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-view-transact-sql?view=sql-server-ver15#examples https://learn.microsoft.com/en-us/sql/t-sql/statements/alter-view-transact-sql?view=sql-server-ver15#examples """ type = "create_view_statement" match_grammar = Sequence( OneOf("CREATE", "ALTER", Sequence("CREATE", "OR", "ALTER")), "VIEW", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ), optional=True, ), Sequence( "WITH", Delimited("ENCRYPTION", "SCHEMABINDING", "VIEW_METADATA"), optional=True, ), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Sequence("WITH", "CHECK", "OPTION", optional=True), Ref("DelimiterGrammar", optional=True), ) class MLTableExpressionSegment(BaseSegment): """An ML table expression. Not present in T-SQL. TODO: Consider whether this segment can be used to represent a PREDICT statement. """ type = "ml_table_expression" match_grammar = Nothing() class ConvertFunctionNameSegment(BaseSegment): """CONVERT function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf("CONVERT", "TRY_CONVERT") class CastFunctionNameSegment(BaseSegment): """CAST function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = Sequence("CAST") class RankFunctionNameSegment(BaseSegment): """Rank function name segment. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf("DENSE_RANK", "NTILE", "RANK", "ROW_NUMBER") class ReservedKeywordFunctionNameSegment(BaseSegment): """Reserved keywords that are also functions. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf( "COALESCE", "LEFT", "NULLIF", "RIGHT", ) class ReservedKeywordBareFunctionNameSegment(BaseSegment): """Reserved keywords that are functions without parentheses. Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf( "CURRENT_TIMESTAMP", "CURRENT_USER", "SESSION_USER", "SYSTEM_USER", ) class WithinGroupFunctionNameSegment(BaseSegment): """WITHIN GROUP function name segment. For aggregation functions that use the WITHIN GROUP clause. https://docs.microsoft.com/en-us/sql/t-sql/functions/string-agg-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-cont-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver15 Need to be able to specify this as type function_name so that linting rules identify it properly """ type = "function_name" match_grammar = OneOf( "STRING_AGG", "PERCENTILE_CONT", "PERCENTILE_DISC", ) class WithinGroupClause(BaseSegment): """WITHIN GROUP clause. For a small set of aggregation functions. https://docs.microsoft.com/en-us/sql/t-sql/functions/string-agg-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/functions/percentile-cont-transact-sql?view=sql-server-ver15 """ type = "within_group_clause" match_grammar = Sequence( "WITHIN", "GROUP", Bracketed( Ref("OrderByClauseSegment"), ), Sequence( "OVER", Bracketed(Ref("PartitionClauseSegment")), optional=True, ), ) class PartitionClauseSegment(ansi.PartitionClauseSegment): """PARTITION BY clause. https://docs.microsoft.com/en-us/sql/t-sql/queries/select-over-clause-transact-sql?view=sql-server-ver15#partition-by """ type = "partitionby_clause" match_grammar = Sequence( "PARTITION", "BY", Delimited( OptionallyBracketed( OneOf( Ref("ColumnReferenceSegment"), Ref("ExpressionSegment"), ) ) ), ) class OnPartitionsSegment(BaseSegment): """ON PARTITIONS clause. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15 """ type = "on_partitions_clause" match_grammar = Sequence( "ON", "PARTITIONS", Bracketed( Delimited( OneOf( Ref("NumericLiteralSegment"), Sequence( Ref("NumericLiteralSegment"), "TO", Ref("NumericLiteralSegment") ), ) ) ), ) class PartitionSchemeNameSegment(BaseSegment): """Partition Scheme Name.""" type = "partition_scheme_name" match_grammar = Ref("SingleIdentifierGrammar") class PartitionSchemeClause(BaseSegment): """Partition Scheme Clause segment. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-index-transact-sql?view=sql-server-ver15 """ type = "partition_scheme_clause" match_grammar = Sequence( "ON", Ref("PartitionSchemeNameSegment"), Bracketed(Ref("ColumnReferenceSegment")), ) class FunctionSegment(BaseSegment): """A scalar or aggregate function. Maybe in the future we should distinguish between aggregate functions and other functions. For now we treat them the same because they look the same for our purposes. """ type = "function" match_grammar = OneOf( Ref("ReservedKeywordBareFunctionNameSegment"), Sequence( # Treat functions which take date parts separately # So those functions parse date parts as DatetimeUnitSegment # rather than identifiers. Ref("DatePartFunctionNameSegment"), Bracketed( Delimited( Ref("DatetimeUnitSegment"), Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), ), parse_mode=ParseMode.GREEDY, ), ), Sequence( Ref("RankFunctionNameSegment"), Bracketed( Ref("NumericLiteralSegment", optional=True), ), "OVER", Bracketed( Ref("PartitionClauseSegment", optional=True), Ref("OrderByClauseSegment"), ), ), Sequence( # https://docs.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql?view=sql-server-ver15 Ref("ConvertFunctionNameSegment"), Bracketed( Ref("DatatypeSegment"), Bracketed(Ref("NumericLiteralSegment"), optional=True), Ref("CommaSegment"), Ref("ExpressionSegment"), Sequence( Ref("CommaSegment"), Ref("NumericLiteralSegment"), optional=True ), ), ), Sequence( # https://docs.microsoft.com/en-us/sql/t-sql/functions/cast-and-convert-transact-sql?view=sql-server-ver15 Ref("CastFunctionNameSegment"), Bracketed( Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment"), ), ), Sequence( Ref("WithinGroupFunctionNameSegment"), Bracketed( Delimited( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), ), parse_mode=ParseMode.GREEDY, ), Ref("WithinGroupClause", optional=True), ), Sequence( OneOf( Ref( "FunctionNameSegment", exclude=OneOf( Ref("ValuesClauseSegment"), # List of special functions handled differently Ref("CastFunctionNameSegment"), Ref("ConvertFunctionNameSegment"), Ref("DatePartFunctionNameSegment"), Ref("WithinGroupFunctionNameSegment"), Ref("RankFunctionNameSegment"), ), ), Ref("ReservedKeywordFunctionNameSegment"), ), Bracketed( Ref( "FunctionContentsGrammar", # The brackets might be empty for some functions... optional=True, ), parse_mode=ParseMode.GREEDY, ), Ref("PostFunctionGrammar", optional=True), ), ) class CreateTableStatementSegment(BaseSegment): """A `CREATE TABLE` statement.""" type = "create_table_statement" # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-azure-sql-data-warehouse?view=aps-pdw-2016-au7 match_grammar = Sequence( "CREATE", "TABLE", Ref("TableReferenceSegment"), OneOf( # Columns and comment syntax: Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("TableIndexSegment"), Ref("PeriodSegment"), ), allow_trailing=True, ) ), ), # Create AS syntax: Sequence( "AS", OptionallyBracketed(Ref("SelectableGrammar")), ), # Create like syntax Sequence("LIKE", Ref("TableReferenceSegment")), ), Ref( "TableDistributionIndexClause", optional=True ), # Azure Synapse Analytics specific Ref("OnPartitionOrFilegroupOptionSegment", optional=True), Ref("FilestreamOnOptionSegment", optional=True), Ref("TextimageOnOptionSegment", optional=True), Ref("TableOptionSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class AlterTableStatementSegment(BaseSegment): """An `ALTER TABLE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/alter-table-transact-sql?view=sql-server-ver15 Overriding ANSI to remove TSQL non-keywords MODIFY, FIRST TODO: Flesh out TSQL-specific functionality """ type = "alter_table_statement" match_grammar = Sequence( "ALTER", "TABLE", Ref("TableReferenceSegment"), Delimited( OneOf( # Table options Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment", optional=True), OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")), ), Sequence( "ALTER", "COLUMN", Ref("ColumnDefinitionSegment"), ), Sequence( "ADD", Delimited( Ref("ColumnDefinitionSegment"), ), ), Sequence( "DROP", "COLUMN", Ref("IfExistsGrammar", optional=True), Ref("ColumnReferenceSegment"), ), Sequence( "ADD", Ref("ColumnConstraintSegment"), "FOR", Ref("ColumnReferenceSegment"), ), Sequence( Sequence( "WITH", "CHECK", optional=True, ), "ADD", Ref("TableConstraintSegment"), ), Sequence( OneOf( "CHECK", "DROP", ), "CONSTRAINT", Ref("ObjectReferenceSegment"), ), # Rename Sequence( "RENAME", OneOf("AS", "TO", optional=True), Ref("TableReferenceSegment"), ), Sequence( "SET", OneOf( Bracketed( Sequence( "FILESTREAM_ON", Ref("EqualsSegment"), OneOf( Ref("FilegroupNameSegment"), Ref("PartitionSchemeNameSegment"), OneOf( "NULL", Ref("LiteralGrammar"), # for "default" value ), ), ) ), Bracketed( Sequence( "SYSTEM_VERSIONING", Ref("EqualsSegment"), OneOf("ON", "OFF"), Sequence( Bracketed( "HISTORY_TABLE", Ref("EqualsSegment"), Ref("TableReferenceSegment"), Sequence( Ref("CommaSegment"), "DATA_CONSISTENCY_CHECK", Ref("EqualsSegment"), OneOf("ON", "OFF"), optional=True, ), Sequence( Ref("CommaSegment"), "HISTORY_RETENTION_PERIOD", Ref("EqualsSegment"), Ref("NumericLiteralSegment", optional=True), Ref("DatetimeUnitSegment"), optional=True, ), ), optional=True, ), ) ), Bracketed( Sequence( "DATA_DELETION", Ref("EqualsSegment"), OneOf("ON", "OFF"), Sequence( Bracketed( "FILTER_COLUMN", Ref("EqualsSegment"), Ref("ColumnReferenceSegment"), Sequence( Ref("CommaSegment"), "RETENTION_PERIOD", Ref("EqualsSegment"), Ref("NumericLiteralSegment", optional=True), Ref("DatetimeUnitSegment"), optional=True, ), ), optional=True, ), ), ), ), ), ) ), ) class TableConstraintSegment(BaseSegment): """A table constraint, e.g. for CREATE TABLE.""" # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 type = "table_constraint" match_grammar = Sequence( Sequence( # [ CONSTRAINT ] "CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True ), OneOf( Sequence( Ref("PrimaryKeyGrammar"), Ref("BracketedIndexColumnListGrammar"), Ref("RelationalIndexOptionsSegment", optional=True), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), ), Sequence( # FOREIGN KEY ( column_name [, ... ] ) # REFERENCES reftable [ ( refcolumn [, ... ] ) ] Ref("ForeignKeyGrammar"), # Local columns making up FOREIGN KEY constraint Ref("BracketedColumnReferenceListGrammar"), # REFERENCES reftable [ ( refcolumn) ] + ON DELETE/ON UPDATE Ref("ReferencesConstraintGrammar"), ), Ref("CheckConstraintGrammar", optional=True), ), ) class TableIndexSegment(BaseSegment): """A table index, e.g. for CREATE TABLE.""" # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 type = "table_index_segment" match_grammar = Sequence( Sequence("INDEX", Ref("ObjectReferenceSegment"), optional=True), OneOf( Sequence( Sequence("UNIQUE", optional=True), OneOf("CLUSTERED", "NONCLUSTERED", optional=True), Ref("BracketedIndexColumnListGrammar"), ), Sequence("CLUSTERED", "COLUMNSTORE"), Sequence( Sequence("NONCLUSTERED", optional=True), "COLUMNSTORE", Ref("BracketedColumnReferenceListGrammar"), ), ), Ref("RelationalIndexOptionsSegment", optional=True), Ref("OnPartitionOrFilegroupOptionSegment", optional=True), Ref("FilestreamOnOptionSegment", optional=True), ) class BracketedIndexColumnListGrammar(BaseSegment): """list of columns used for CREATE INDEX, constraints.""" type = "bracketed_index_column_list_grammar" match_grammar = Sequence( Bracketed( Delimited( Ref("IndexColumnDefinitionSegment"), ) ) ) class FilegroupNameSegment(BaseSegment): """Filegroup Name Segment.""" type = "filegroup_name" match_grammar = Ref("SingleIdentifierGrammar") class FilegroupClause(BaseSegment): """Filegroup Clause segment. https://docs.microsoft.com/en-us/sql/relational-databases/databases/database-files-and-filegroups?view=sql-server-ver15 """ type = "filegroup_clause" match_grammar = Sequence( "ON", Ref("FilegroupNameSegment"), ) class IdentityGrammar(BaseSegment): """`IDENTITY (1,1)` in table schemas. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql-identity-property?view=sql-server-ver15 """ type = "identity_grammar" match_grammar = Sequence( "IDENTITY", # optional (seed, increment) e.g. (1, 1) Bracketed( Sequence( Ref("NumericLiteralSegment"), Ref("CommaSegment"), Ref("NumericLiteralSegment"), ), optional=True, ), ) class EncryptedWithGrammar(BaseSegment): """ENCRYPTED WITH in table schemas. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql-identity-property?view=sql-server-ver15 """ type = "encrypted_with_grammar" match_grammar = Sequence( "ENCRYPTED", "WITH", Bracketed( Delimited( Sequence( "COLUMN_ENCRYPTION_KEY", Ref("EqualsSegment"), Ref("SingleIdentifierGrammar"), ), Sequence( "ENCRYPTION_TYPE", Ref("EqualsSegment"), OneOf("DETERMINISTIC", "RANDOMIZED"), ), Sequence( "ALGORITHM", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) ), ) class TableDistributionIndexClause(BaseSegment): """`CREATE TABLE` distribution / index clause. This is specific to Azure Synapse Analytics. """ type = "table_distribution_index_clause" match_grammar = Sequence( "WITH", Bracketed( Delimited( Ref("TableDistributionClause"), Ref("TableIndexClause"), Ref("TableLocationClause"), ), ), ) class TableDistributionClause(BaseSegment): """`CREATE TABLE` distribution clause. This is specific to Azure Synapse Analytics. """ type = "table_distribution_clause" match_grammar = Sequence( "DISTRIBUTION", Ref("EqualsSegment"), OneOf( "REPLICATE", "ROUND_ROBIN", Sequence( "HASH", Bracketed(Ref("ColumnReferenceSegment")), ), ), ) class TableIndexClause(BaseSegment): """`CREATE TABLE` table index clause. This is specific to Azure Synapse Analytics. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-azure-sql-data-warehouse?view=aps-pdw-2016-au7#TableOptions """ type = "table_index_clause" match_grammar = Sequence( OneOf( "HEAP", Sequence( "CLUSTERED", "COLUMNSTORE", "INDEX", Sequence( "ORDER", Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), ), optional=True, ), ), Sequence( "CLUSTERED", "INDEX", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), OneOf( "ASC", "DESC", optional=True, ), ), ), ), ), ), ) class TableLocationClause(BaseSegment): """`CREATE TABLE` location clause. This is specific to Azure Synapse Analytics (deprecated) or to an external table. """ type = "table_location_clause" match_grammar = Sequence( "LOCATION", Ref("EqualsSegment"), OneOf( "USER_DB", # Azure Synapse Analytics specific Ref("QuotedLiteralSegmentOptWithN"), # External Table ), ) class AlterTableSwitchStatementSegment(BaseSegment): """An `ALTER TABLE SWITCH` statement.""" type = "alter_table_switch_statement" # https://docs.microsoft.com/en-us/sql/t-sql/statements/alter-table-transact-sql?view=sql-server-ver15 # T-SQL's ALTER TABLE SWITCH grammar is different enough to core ALTER TABLE grammar # to merit its own definition match_grammar = Sequence( "ALTER", "TABLE", Ref("ObjectReferenceSegment"), "SWITCH", Sequence("PARTITION", Ref("NumericLiteralSegment"), optional=True), "TO", Ref("ObjectReferenceSegment"), Sequence( # Azure Synapse Analytics specific "WITH", Bracketed("TRUNCATE_TARGET", Ref("EqualsSegment"), OneOf("ON", "OFF")), optional=True, ), Ref("DelimiterGrammar", optional=True), ) class CreateTableAsSelectStatementSegment(BaseSegment): """A `CREATE TABLE AS SELECT` statement. This is specific to Azure Synapse Analytics. """ type = "create_table_as_select_statement" # https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-as-select-azure-sql-data-warehouse?toc=/azure/synapse-analytics/sql-data-warehouse/toc.json&bc=/azure/synapse-analytics/sql-data-warehouse/breadcrumb/toc.json&view=azure-sqldw-latest&preserve-view=true match_grammar = Sequence( "CREATE", "TABLE", Ref("TableReferenceSegment"), Ref("TableDistributionIndexClause"), "AS", OptionallyBracketed(Ref("SelectableGrammar")), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class TransactionStatementSegment(BaseSegment): """A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement.""" type = "transaction_statement" match_grammar = OneOf( # [ BEGIN | SAVE ] [ TRANSACTION | TRAN ] [ | ] # COMMIT [ TRANSACTION | TRAN | WORK ] # ROLLBACK [ TRANSACTION | TRAN | WORK ] [ | ] # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/begin-transaction-transact-sql?view=sql-server-ver15 Sequence( "BEGIN", Sequence("DISTRIBUTED", optional=True), Ref("TransactionGrammar"), Ref("SingleIdentifierGrammar", optional=True), Sequence("WITH", "MARK", Ref("QuotedIdentifierSegment"), optional=True), Ref("DelimiterGrammar", optional=True), ), Sequence( OneOf("COMMIT", "ROLLBACK"), Ref("TransactionGrammar", optional=True), OneOf( Ref("SingleIdentifierGrammar"), Ref("VariableIdentifierSegment"), optional=True, ), Ref("DelimiterGrammar", optional=True), ), Sequence( OneOf("COMMIT", "ROLLBACK"), Sequence("WORK", optional=True), Ref("DelimiterGrammar", optional=True), ), Sequence( "SAVE", Ref("TransactionGrammar"), OneOf( Ref("SingleIdentifierGrammar"), Ref("VariableIdentifierSegment"), optional=True, ), Ref("DelimiterGrammar", optional=True), ), ) class BeginEndSegment(BaseSegment): """A `BEGIN/END` block. Encloses multiple statements into a single statement object. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/begin-end-transact-sql?view=sql-server-ver15 """ type = "begin_end_block" match_grammar = Sequence( "BEGIN", Ref("DelimiterGrammar", optional=True), Indent, Ref("OneOrMoreStatementsGrammar"), Dedent, "END", ) class AtomicBeginEndSegment(BaseSegment): """A special `BEGIN/END` block with atomic options. This is only dedicated to natively compiled stored procedures. Encloses multiple statements into a single statement object. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/begin-end-transact-sql?view=sql-server-ver15 https://learn.microsoft.com/en-us/sql/t-sql/statements/create-procedure-transact-sql?view=sql-server-ver16#syntax """ type = "atomic_begin_end_block" match_grammar = Sequence( "BEGIN", Sequence( "ATOMIC", "WITH", Bracketed( Delimited( Sequence( "LANGUAGE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "TRANSACTION", "ISOLATION", "LEVEL", Ref("EqualsSegment"), OneOf( "SNAPSHOT", Sequence("REPEATABLE", "READ"), "SERIALIZABLE", ), ), Sequence( "DATEFIRST", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), optional=True, ), Sequence( "DATEFORMAT", Ref("EqualsSegment"), Ref("DateFormatSegment"), optional=True, ), Sequence( "DELAYED_DURABILITY", Ref("EqualsSegment"), OneOf("ON", "OFF"), optional=True, ), ), ), ), Ref("DelimiterGrammar", optional=True), Indent, Ref("OneOrMoreStatementsGrammar"), Dedent, Sequence("END", optional=True), ) class TryCatchSegment(BaseSegment): """A `TRY/CATCH` block pair. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/try-catch-transact-sql?view=sql-server-ver15 """ type = "try_catch" match_grammar = Sequence( "BEGIN", "TRY", Ref("DelimiterGrammar", optional=True), Indent, Ref("OneOrMoreStatementsGrammar"), Dedent, "END", "TRY", "BEGIN", "CATCH", Ref("DelimiterGrammar", optional=True), Indent, Ref("OneOrMoreStatementsGrammar"), Dedent, "END", "CATCH", ) class BatchSegment(BaseSegment): """A segment representing a GO batch within a file or script.""" type = "batch" match_grammar = OneOf( # Things that can be bundled Ref("OneOrMoreStatementsGrammar"), # Things that can't be bundled Ref("CreateProcedureStatementSegment"), ) class FileSegment(BaseFileSegment): """A segment representing a whole file or script. We override default as T-SQL allows concept of several batches of commands separated by GO as well as usual semicolon-separated statement lines. This is also the default "root" segment of the dialect, and so is usually instantiated directly. It therefore has no match_grammar. """ match_grammar = Sequence( AnyNumberOf(Ref("BatchDelimiterGrammar")), Delimited( Ref("BatchSegment"), delimiter=AnyNumberOf( Sequence( Ref("DelimiterGrammar", optional=True), Ref("BatchDelimiterGrammar") ), min_times=1, ), allow_gaps=True, allow_trailing=True, ), ) class OpenRowSetSegment(BaseSegment): """A `OPENROWSET` segment. https://docs.microsoft.com/en-us/sql/t-sql/functions/openrowset-transact-sql?view=sql-server-ver15 """ type = "openrowset_segment" match_grammar = Sequence( "OPENROWSET", Bracketed( OneOf( Sequence( Ref("QuotedLiteralSegment"), Ref("CommaSegment"), OneOf( Sequence( Ref("QuotedLiteralSegment"), Ref("DelimiterGrammar"), Ref("QuotedLiteralSegment"), Ref("DelimiterGrammar"), Ref("QuotedLiteralSegment"), ), Ref("QuotedLiteralSegment"), ), Ref("CommaSegment"), OneOf( Ref("TableReferenceSegment"), Ref("QuotedLiteralSegment"), ), ), Sequence( "BULK", Ref("QuotedLiteralSegmentOptWithN"), Ref("CommaSegment"), OneOf( Sequence( "FORMATFILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), Ref("CommaSegment"), Delimited( AnyNumberOf( Sequence( "DATASOURCE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "ERRORFILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "ERRORFILE_DATA_SOURCE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "MAXERRORS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "FIRSTROW", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "LASTROW", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "CODEPAGE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FORMAT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIELDQUOTE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "FORMATFILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "FORMATFILE_DATA_SOURCE", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), ), optional=True, ), ), "SINGLE_BLOB", "SINGLE_CLOB", "SINGLE_NCLOB", ), ), ), ), ) class DeleteStatementSegment(BaseSegment): """A `DELETE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/delete-transact-sql?view=sql-server-ver15 Overriding ANSI to remove greedy logic which assumes statements have been delimited and to allow for Azure Synapse Analytics-specific DELETE statements """ type = "delete_statement" # match grammar. This one makes sense in the context of knowing that it's # definitely a statement, we just don't know what type yet. match_grammar = Sequence( "DELETE", OneOf( Sequence( Ref("TopPercentGrammar", optional=True), Ref.keyword("FROM", optional=True), OneOf( Sequence( Sequence( "OPENDATASOURCE", Bracketed( Ref("QuotedLiteralSegment"), Ref("CommaSegment"), Ref("QuotedLiteralSegment"), ), Ref("DotSegment"), optional=True, ), Ref("TableReferenceSegment"), Ref("PostTableExpressionGrammar", optional=True), ), Sequence( "OPENQUERY", Bracketed( Ref("NakedIdentifierSegment"), Ref("CommaSegment"), Ref("QuotedLiteralSegment"), ), ), Ref("OpenRowSetSegment"), ), Ref("OutputClauseSegment", optional=True), Ref("FromClauseSegment", optional=True), OneOf( Ref("WhereClauseSegment"), Sequence( "WHERE", "CURRENT", "OF", Ref("CursorNameGrammar"), ), optional=True, ), ), # Azure Synapse Analytics-specific Sequence( "FROM", Ref("TableReferenceSegment"), "JOIN", Ref("TableReferenceSegment"), Ref("JoinOnConditionSegment"), Ref("WhereClauseSegment", optional=True), ), ), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class FromClauseSegment(BaseSegment): """A `FROM` clause like in `SELECT`. NOTE: this is a delimited set of table expressions, with a variable number of optional join clauses with those table expressions. The delmited aspect is the higher of the two such that the following is valid (albeit unusual): ``` SELECT * FROM a JOIN b, c JOIN d ``` Overriding ANSI to remove Delimited logic which assumes statements have been delimited """ type = "from_clause" match_grammar = Sequence( "FROM", Delimited(Ref("FromExpressionSegment")), Ref("DelimiterGrammar", optional=True), ) get_eventual_aliases = ansi.FromClauseSegment.get_eventual_aliases class FromExpressionElementSegment(ansi.FromExpressionElementSegment): """FROM Expression Element Segment. Overriding ANSI to add Temporal Query. """ match_grammar = ansi.FromExpressionElementSegment.match_grammar.copy( insert=[ Ref("TemporalQuerySegment", optional=True), ], before=Ref( "AliasExpressionSegment", exclude=OneOf( Ref("SamplingExpressionSegment"), Ref("JoinLikeClauseGrammar"), ), optional=True, ), ) class TableExpressionSegment(BaseSegment): """The main table expression e.g. within a FROM clause. In SQL standard, as well as T-SQL, table expressions (`table reference` in SQL standard) can also be join tables, optionally bracketed, allowing for nested joins. """ type = "table_expression" match_grammar: Matchable = OneOf( Ref("ValuesClauseSegment"), Ref("BareFunctionSegment"), Ref("FunctionSegment"), Ref("OpenRowSetSegment"), Ref("OpenJsonSegment"), Ref("TableReferenceSegment"), Ref("StorageLocationSegment"), # Nested Selects Bracketed(Ref("SelectableGrammar")), Bracketed(Ref("MergeStatementSegment")), Bracketed( Sequence( Ref("TableExpressionSegment"), # TODO: Revisit this to make sure it's sensible. Conditional(Dedent, indented_joins=False), Conditional(Indent, indented_joins=True), OneOf(Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar")), Conditional(Dedent, indented_joins=True), Conditional(Indent, indented_joins=True), ) ), ) class GroupByClauseSegment(BaseSegment): """A `GROUP BY` clause like in `SELECT`. Overriding ANSI to remove Delimited logic which assumes statements have been delimited """ type = "groupby_clause" match_grammar = Sequence( "GROUP", "BY", Indent, OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), AnyNumberOf( Ref("CommaSegment"), OneOf( Ref("ColumnReferenceSegment"), # Can `GROUP BY 1` Ref("NumericLiteralSegment"), # Can `GROUP BY coalesce(col, 1)` Ref("ExpressionSegment"), ), ), Dedent, ) class HavingClauseSegment(BaseSegment): """A `HAVING` clause like in `SELECT`. Overriding ANSI to remove greedy terminator """ type = "having_clause" match_grammar = Sequence( "HAVING", Indent, OptionallyBracketed(Ref("ExpressionSegment")), Dedent, ) class OrderByClauseSegment(BaseSegment): """A `ORDER BY` clause like in `SELECT`. Overriding ANSI to remove Greedy logic which assumes statements have been delimited """ type = "orderby_clause" match_grammar = Sequence( "ORDER", "BY", Indent, Delimited( Sequence( OneOf( Ref("ColumnReferenceSegment"), # Can `ORDER BY 1` Ref("NumericLiteralSegment"), # Can order by an expression Ref("ExpressionSegment"), ), OneOf("ASC", "DESC", optional=True), ), ), Dedent, ) class RenameStatementSegment(BaseSegment): """`RENAME` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/rename-transact-sql?view=aps-pdw-2016-au7 Azure Synapse Analytics-specific. """ type = "rename_statement" match_grammar = Sequence( "RENAME", "OBJECT", Ref("ObjectReferenceSegment"), "TO", Ref("SingleIdentifierGrammar"), Ref("DelimiterGrammar", optional=True), ) class DropTableStatementSegment(ansi.DropTableStatementSegment): """A `DROP TABLE` statement. Overriding ANSI to add optional delimiter. """ match_grammar = ansi.DropTableStatementSegment.match_grammar.copy( insert=[ Ref("DelimiterGrammar", optional=True), ], ) class DropViewStatementSegment(ansi.DropViewStatementSegment): """A `DROP VIEW` statement. Overriding ANSI to add optional delimiter. """ match_grammar = ansi.DropViewStatementSegment.match_grammar.copy( insert=[ Ref("DelimiterGrammar", optional=True), ], ) class DropUserStatementSegment(ansi.DropUserStatementSegment): """A `DROP USER` statement. Overriding ANSI to add optional delimiter. """ match_grammar = ansi.DropUserStatementSegment.match_grammar.copy( insert=[ Ref("DelimiterGrammar", optional=True), ], ) class UpdateStatementSegment(BaseSegment): """An `Update` statement. UPDATE
SET [ WHERE ] Overriding ANSI in order to allow for PostTableExpressionGrammar (table hints) """ type = "update_statement" match_grammar = Sequence( "UPDATE", OneOf(Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar")), Ref("PostTableExpressionGrammar", optional=True), Ref("SetClauseListSegment"), Ref("OutputClauseSegment", optional=True), Ref("FromClauseSegment", optional=True), Ref("WhereClauseSegment", optional=True), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class SetClauseListSegment(BaseSegment): """set clause list. Overriding ANSI to remove Delimited """ type = "set_clause_list" match_grammar = Sequence( "SET", Indent, Ref("SetClauseSegment"), AnyNumberOf( Ref("CommaSegment"), Ref("SetClauseSegment"), ), Dedent, ) class SetClauseSegment(BaseSegment): """Set clause. Overriding ANSI to allow for ExpressionSegment on the right """ type = "set_clause" match_grammar = Sequence( Ref("ColumnReferenceSegment"), Ref("AssignmentOperatorSegment"), Ref("ExpressionSegment"), ) class PrintStatementSegment(BaseSegment): """PRINT statement segment.""" type = "print_statement" match_grammar = Sequence( "PRINT", Ref("ExpressionSegment"), Ref("DelimiterGrammar", optional=True), ) class OptionClauseSegment(BaseSegment): """Query Hint clause. https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query?view=sql-server-ver15 """ type = "option_clause" match_grammar = Sequence( "OPTION", Bracketed( Delimited(Ref("QueryHintSegment")), ), ) class QueryHintSegment(BaseSegment): """Query Hint segment. https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query?view=sql-server-ver15 """ type = "query_hint_segment" match_grammar = OneOf( Sequence( # Azure Synapse Analytics specific "LABEL", Ref("EqualsSegment"), Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( OneOf("HASH", "ORDER"), "GROUP", ), Sequence(OneOf("MERGE", "HASH", "CONCAT"), "UNION"), Sequence(OneOf("LOOP", "MERGE", "HASH"), "JOIN"), Sequence("EXPAND", "VIEWS"), Sequence( OneOf( "FAST", "MAXDOP", "MAXRECURSION", "QUERYTRACEON", Sequence( OneOf( "MAX_GRANT_PERCENT", "MIN_GRANT_PERCENT", ), Ref("EqualsSegment"), ), ), Ref("NumericLiteralSegment"), ), Sequence("FORCE", "ORDER"), Sequence( OneOf("FORCE", "DISABLE"), OneOf("EXTERNALPUSHDOWN", "SCALEOUTEXECUTION"), ), Sequence( OneOf( "KEEP", "KEEPFIXED", "ROBUST", ), "PLAN", ), "IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX", "NO_PERFORMANCE_SPOOL", Sequence( "OPTIMIZE", "FOR", OneOf( "UNKNOWN", Bracketed( Ref("ParameterNameSegment"), OneOf( "UNKNOWN", Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar")) ), AnyNumberOf( Ref("CommaSegment"), Ref("ParameterNameSegment"), OneOf( "UNKNOWN", Sequence(Ref("EqualsSegment"), Ref("LiteralGrammar")), ), ), ), ), ), Sequence("PARAMETERIZATION", OneOf("SIMPLE", "FORCED")), "RECOMPILE", Sequence( "USE", "HINT", Bracketed( Ref("QuotedLiteralSegment"), AnyNumberOf(Ref("CommaSegment"), Ref("QuotedLiteralSegment")), ), ), Sequence( "USE", "PLAN", Ref("QuotedLiteralSegmentOptWithN"), ), Sequence( "TABLE", "HINT", Ref("ObjectReferenceSegment"), Delimited(Ref("TableHintSegment")), ), ) class PostTableExpressionGrammar(BaseSegment): """Table Hint clause. Overloading the PostTableExpressionGrammar to implement. https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver15 """ type = "post_table_expression" match_grammar = Sequence( Sequence("WITH", optional=True), Bracketed( Ref("TableHintSegment"), AnyNumberOf( Ref("CommaSegment"), Ref("TableHintSegment"), ), ), ) class TableHintSegment(BaseSegment): """Table Hint segment. https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver15 """ type = "query_hint_segment" match_grammar = OneOf( "NOEXPAND", Sequence( "INDEX", Bracketed( Delimited( OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")), ), ), ), Sequence( "INDEX", Ref("EqualsSegment"), Bracketed( OneOf(Ref("IndexReferenceSegment"), Ref("NumericLiteralSegment")), ), ), "KEEPIDENTITY", "KEEPDEFAULTS", Sequence( "FORCESEEK", Bracketed( Ref("IndexReferenceSegment"), Bracketed( Ref("SingleIdentifierGrammar"), AnyNumberOf(Ref("CommaSegment"), Ref("SingleIdentifierGrammar")), ), optional=True, ), ), "FORCESCAN", "HOLDLOCK", "IGNORE_CONSTRAINTS", "IGNORE_TRIGGERS", "NOLOCK", "NOWAIT", "PAGLOCK", "READCOMMITTED", "READCOMMITTEDLOCK", "READPAST", "READUNCOMMITTED", "REPEATABLEREAD", "ROWLOCK", "SERIALIZABLE", "SNAPSHOT", Sequence( "SPATIAL_WINDOW_MAX_CELLS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), "TABLOCK", "TABLOCKX", "UPDLOCK", "XLOCK", ) class SetOperatorSegment(BaseSegment): """A set operator such as Union, Except or Intersect. Override ANSI to remove TSQL non-keyword MINUS. """ type = "set_operator" match_grammar = OneOf( Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)), "INTERSECT", "EXCEPT", ) class SetExpressionSegment(BaseSegment): """A set expression with either Union, Minus, Except or Intersect. Overriding ANSI to include OPTION clause. """ type = "set_expression" # match grammar match_grammar = Sequence( Ref("NonSetSelectableGrammar"), AnyNumberOf( Sequence( Ref("SetOperatorSegment"), Ref("NonSetSelectableGrammar"), ), min_times=1, ), Ref("OrderByClauseSegment", optional=True), Ref("OptionClauseSegment", optional=True), Ref("DelimiterGrammar", optional=True), ) class ForClauseSegment(BaseSegment): """A For Clause segment for TSQL. This is used to format results into XML or JSON """ type = "for_clause" _common_directives_for_xml = Sequence( Sequence( "BINARY", "BASE64", ), "TYPE", Sequence( "ROOT", Bracketed( Ref("LiteralGrammar"), optional=True, ), ), optional=True, ) _elements = Sequence("ELEMENTS", OneOf("XSINIL", "ABSENT", optional=True)) match_grammar = Sequence( "FOR", OneOf( "BROWSE", Sequence( "JSON", Delimited( OneOf( "AUTO", "PATH", ), Sequence( "ROOT", Bracketed( Ref("LiteralGrammar"), optional=True, ), optional=True, ), Ref.keyword("INCLUDE_NULL_VALUES", optional=True), Ref.keyword("WITHOUT_ARRAY_WRAPPER", optional=True), ), ), Sequence( "XML", OneOf( Delimited( Sequence( "PATH", Bracketed( Ref("LiteralGrammar"), optional=True, ), ), _common_directives_for_xml, _elements, ), Delimited( "EXPLICIT", _common_directives_for_xml, Ref.keyword("XMLDATA", optional=True), ), Delimited( OneOf( "AUTO", Sequence( "RAW", Bracketed( Ref("LiteralGrammar"), optional=True, ), ), ), _common_directives_for_xml, _elements, Sequence( OneOf( "XMLDATA", Sequence( "XMLSCHEMA", Bracketed( Ref("LiteralGrammar"), optional=True, ), ), ), optional=True, ), ), ), ), ), ) class ExecuteScriptSegment(BaseSegment): """`EXECUTE` statement. Matching segment name and type from exasol. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/execute-transact-sql?view=sql-server-ver15 """ type = "execute_script_statement" match_grammar = Sequence( OneOf("EXEC", "EXECUTE"), Sequence(Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True), OptionallyBracketed( OneOf(Ref("ObjectReferenceSegment"), Ref("QuotedLiteralSegment")) ), Indent, Sequence( Sequence(Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True), OneOf( "DEFAULT", Ref("LiteralGrammar"), Ref("ParameterNameSegment"), Ref("SingleIdentifierGrammar"), ), Sequence("OUTPUT", optional=True), AnyNumberOf( Ref("CommaSegment"), Sequence( Ref("ParameterNameSegment"), Ref("EqualsSegment"), optional=True ), OneOf( "DEFAULT", Ref("LiteralGrammar"), Ref("ParameterNameSegment"), Ref("SingleIdentifierGrammar"), ), Sequence("OUTPUT", optional=True), ), optional=True, ), Dedent, Ref("DelimiterGrammar", optional=True), ) class CreateSchemaStatementSegment(BaseSegment): """A `CREATE SCHEMA` statement. Overriding ANSI to allow for AUTHORIZATION clause https://docs.microsoft.com/en-us/sql/t-sql/statements/create-schema-transact-sql?view=sql-server-ver15 Not yet implemented: proper schema_element parsing. Once we have an AccessStatementSegment that works for TSQL, this definition should be tweaked to include schema elements. """ type = "create_schema_statement" match_grammar = Sequence( "CREATE", "SCHEMA", Ref("SchemaReferenceSegment"), Sequence( "AUTHORIZATION", Ref("RoleReferenceSegment"), optional=True, ), Ref( "DelimiterGrammar", optional=True, ), ) class MergeStatementSegment(ansi.MergeStatementSegment): """Contains dialect specific `MERGE` statement.""" type = "merge_statement" match_grammar = Sequence( Ref("MergeIntoLiteralGrammar"), Indent, Ref("TableReferenceSegment"), Sequence( "WITH", Bracketed( Delimited( Ref("TableHintSegment", optional=True), ) ), optional=True, ), Ref("AliasExpressionSegment", optional=True), Dedent, "USING", Indent, OneOf( Ref("TableReferenceSegment"), Ref("AliasedTableReferenceGrammar"), Sequence( Bracketed( Ref("SelectableGrammar"), ), Ref("AliasExpressionSegment", optional=True), ), ), Dedent, Conditional(Indent, indented_using_on=True), Ref("JoinOnConditionSegment"), Conditional(Dedent, indented_using_on=True), Ref("MergeMatchSegment"), ) class MergeMatchSegment(BaseSegment): """Contains dialect specific merge operations.""" type = "merge_match" match_grammar = Sequence( AnyNumberOf( Ref("MergeMatchedClauseSegment"), Ref("MergeNotMatchedClauseSegment"), min_times=1, ), Ref("OutputClauseSegment", optional=True), Ref("OptionClauseSegment", optional=True), ) class MergeMatchedClauseSegment(BaseSegment): """The `WHEN MATCHED` clause within a `MERGE` statement.""" type = "merge_when_matched_clause" match_grammar = Sequence( "WHEN", "MATCHED", Sequence( "AND", Ref("ExpressionSegment"), optional=True, ), Indent, "THEN", OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), Dedent, ) class MergeNotMatchedClauseSegment(BaseSegment): """The `WHEN NOT MATCHED` clause within a `MERGE` statement.""" type = "merge_when_not_matched_clause" match_grammar = OneOf( Sequence( "WHEN", "NOT", "MATCHED", Sequence("BY", "TARGET", optional=True), Sequence("AND", Ref("ExpressionSegment"), optional=True), Indent, "THEN", Ref("MergeInsertClauseSegment"), Dedent, ), Sequence( "WHEN", "NOT", "MATCHED", "BY", "SOURCE", Sequence("AND", Ref("ExpressionSegment"), optional=True), Indent, "THEN", OneOf( Ref("MergeUpdateClauseSegment"), Ref("MergeDeleteClauseSegment"), ), Dedent, ), ) class MergeInsertClauseSegment(BaseSegment): """`INSERT` clause within the `MERGE` statement.""" type = "merge_insert_clause" match_grammar = Sequence( "INSERT", Indent, Ref("BracketedColumnReferenceListGrammar", optional=True), Dedent, "VALUES", Indent, OneOf( Bracketed( Delimited( AnyNumberOf( Ref("ExpressionSegment"), ), ), ), Sequence( "DEFAULT", "VALUES", ), ), Dedent, ) class OutputClauseSegment(BaseSegment): """OUTPUT Clause used within DELETE, INSERT, UPDATE, MERGE. https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql?view=sql-server-ver15 """ type = "output_clause" match_grammar = AnyNumberOf( Sequence( "OUTPUT", Indent, Delimited( AnyNumberOf( Ref("WildcardExpressionSegment"), Sequence( Ref("BaseExpressionElementGrammar"), Ref("AliasExpressionSegment", optional=True), ), Ref("SingleIdentifierGrammar"), terminators=[Ref.keyword("INTO")], ), ), Dedent, Sequence( "INTO", Indent, Ref("TableReferenceSegment"), Bracketed( Delimited( Ref("ColumnReferenceSegment"), ), optional=True, ), Dedent, optional=True, ), ), ) class ThrowStatementSegment(BaseSegment): """A THROW statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/throw-transact-sql?view=sql-server-ver15 """ type = "throw_statement" match_grammar = Sequence( "THROW", Sequence( OneOf( # error_number Ref("NumericLiteralSegment"), Ref("ParameterNameSegment"), ), Ref("CommaSegment"), OneOf( # message Ref("QuotedLiteralSegment"), Ref("QuotedLiteralSegmentWithN"), Ref("ParameterNameSegment"), ), Ref("CommaSegment"), OneOf( # state Ref("NumericLiteralSegment"), Ref("ParameterNameSegment"), ), optional=True, ), ) class RaiserrorStatementSegment(BaseSegment): """RAISERROR statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/raiserror-transact-sql?view=sql-server-ver15 """ type = "raiserror_statement" match_grammar = Sequence( "RAISERROR", Bracketed( Delimited( OneOf( Ref("NumericLiteralSegment"), Ref("QuotedLiteralSegment"), Ref("QuotedLiteralSegmentWithN"), Ref("ParameterNameSegment"), ), OneOf( Ref("NumericLiteralSegment"), Ref("QualifiedNumericLiteralSegment"), Ref("ParameterNameSegment"), ), OneOf( Ref("NumericLiteralSegment"), Ref("QualifiedNumericLiteralSegment"), Ref("ParameterNameSegment"), ), AnyNumberOf( Ref("LiteralGrammar"), Ref("ParameterNameSegment"), min_times=0, max_times=20, ), ), ), Sequence( "WITH", Delimited( "LOG", "NOWAIT", "SETERROR", ), optional=True, ), ) class WindowSpecificationSegment(BaseSegment): """Window specification within OVER(...). Overriding ANSI to remove window name option not supported by TSQL """ type = "window_specification" match_grammar = Sequence( Ref("PartitionClauseSegment", optional=True), Ref("OrderByClauseSegment", optional=True), Ref("FrameClauseSegment", optional=True), optional=True, ) class GotoStatement(BaseSegment): """GOTO statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql?view=sql-server-ver15 """ type = "goto_statement" match_grammar = Sequence("GOTO", Ref("SingleIdentifierGrammar")) class ExecuteAsClause(BaseSegment): """EXECUTE AS Clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/execute-as-clause-transact-sql?view=sql-server-ver16 """ type = "execute_as_clause" match_grammar = Sequence( "EXECUTE", "AS", Ref("SingleQuotedIdentifierSegment"), ) class CreateTriggerStatementSegment(BaseSegment): """Create Trigger Statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-trigger-transact-sql?view=sql-server-ver15 """ type = "create_trigger" match_grammar: Matchable = Sequence( "CREATE", "TRIGGER", Ref("TriggerReferenceSegment"), "ON", OneOf( Ref("TableReferenceSegment"), Sequence("ALL", "SERVER"), "DATABASE", ), Sequence( "WITH", AnySetOf( # NOTE: Techincally, ENCRYPTION can't be combined with the other two, # but this slightly more generous parsing is ok for SQLFluff. Ref.keyword("ENCRYPTION"), Ref.keyword("NATIVE_COMPILATION"), Ref.keyword("SCHEMABINDING"), ), Ref("ExecuteAsClause", optional=True), optional=True, ), OneOf( Sequence("FOR", Delimited(Ref("SingleIdentifierGrammar"), optional=True)), "AFTER", Sequence("INSTEAD", "OF"), optional=True, ), Delimited( "INSERT", "UPDATE", "DELETE", optional=True, ), Sequence("WITH", "APPEND", optional=True), Sequence("NOT", "FOR", "REPLICATION", optional=True), "AS", Ref("OneOrMoreStatementsGrammar"), # TODO: EXTERNAL NAME ) class DropTriggerStatementSegment(BaseSegment): """Drop Trigger Statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/drop-trigger-transact-sql?view=sql-server-ver15 """ type = "drop_trigger" match_grammar: Matchable = Sequence( "DROP", "TRIGGER", Ref("IfExistsGrammar", optional=True), Delimited(Ref("TriggerReferenceSegment")), Sequence("ON", OneOf("DATABASE", Sequence("ALL", "SERVER")), optional=True), ) class DisableTriggerStatementSegment(BaseSegment): """Disable Trigger Statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/disable-trigger-transact-sql?view=sql-server-ver15 """ type = "disable_trigger" match_grammar: Matchable = Sequence( "DISABLE", "TRIGGER", OneOf( Delimited(Ref("TriggerReferenceSegment")), "ALL", ), Sequence( "ON", OneOf(Ref("ObjectReferenceSegment"), "DATABASE", Sequence("ALL", "SERVER")), optional=True, ), ) class LabelStatementSegment(BaseSegment): """Label Statement, for a GOTO statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/goto-transact-sql?view=sql-server-ver15 """ type = "label_segment" match_grammar: Matchable = Sequence( Ref("NakedIdentifierSegment"), Ref("ColonSegment"), allow_gaps=False ) class AccessStatementSegment(BaseSegment): """A `GRANT` or `REVOKE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/grant-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/statements/deny-transact-sql?view=sql-server-ver15 https://docs.microsoft.com/en-us/sql/t-sql/statements/revoke-transact-sql?view=sql-server-ver15 """ type = "access_statement" # Privileges that can be set on the account (specific to snowflake) _global_permissions = OneOf( Sequence( "CREATE", OneOf( "ROLE", "USER", "WAREHOUSE", "DATABASE", "INTEGRATION", ), ), Sequence("APPLY", "MASKING", "POLICY"), "EXECUTE", ) _schema_object_names = [ "TABLE", "VIEW", "FUNCTION", "PROCEDURE", "SEQUENCE", ] _schema_object_types = OneOf( *_schema_object_names, Sequence("EXTERNAL", "TABLE"), Sequence("FILE", "FORMAT"), ) # We reuse the object names above and simply append an `S` to the end of them to get # plurals _schema_object_types_plural = OneOf( *[f"{object_name}S" for object_name in _schema_object_names] ) _permissions = Sequence( OneOf( "ALTER", "CONTROL", "DELETE", "EXECUTE", "INSERT", "RECEIVE", "REFERENCES", "SELECT", Sequence("TAKE", "OWNERSHIP"), "UPDATE", Sequence("VIEW", "CHANGE", "TRACKING"), Sequence("VIEW", "DEFINITION"), ), Ref("BracketedColumnReferenceListGrammar", optional=True), ) # All of the object types that we can grant permissions on. # This list will contain ansi sql objects as well as dialect specific ones. _objects = Sequence( OneOf( "DATABASE", "LANGUAGE", "SCHEMA", "ROLE", "TYPE", Sequence( "FOREIGN", OneOf("SERVER", Sequence("DATA", "WRAPPER")), ), Sequence("ALL", "SCHEMAS", "IN", "DATABASE"), _schema_object_types, Sequence("ALL", _schema_object_types_plural, "IN", "SCHEMA"), optional=True, ), Delimited(Ref("ObjectReferenceSegment"), terminators=["TO", "FROM"]), Ref("FunctionParameterListGrammar", optional=True), ) match_grammar: Matchable = OneOf( # Based on https://www.postgresql.org/docs/13/sql-grant.html # and https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html Sequence( "GRANT", OneOf( Sequence( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), ), Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), "ON", Sequence( OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"), Ref("CastOperatorSegment"), optional=True, ), _objects, "TO", Delimited( OneOf(Ref("RoleReferenceSegment"), Ref("FunctionSegment")), ), OneOf( Sequence("WITH", "GRANT", "OPTION"), optional=True, ), Sequence( "AS", Ref("ObjectReferenceSegment"), optional=True, ), ), Sequence( "DENY", OneOf( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), "ON", Sequence( OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"), Ref("CastOperatorSegment"), optional=True, ), _objects, OneOf("TO"), Delimited( Ref("RoleReferenceSegment"), ), Sequence( Ref.keyword("CASCADE", optional=True), Ref("ObjectReferenceSegment", optional=True), optional=True, ), ), Sequence( "REVOKE", Sequence("GRANT", "OPTION", "FOR", optional=True), OneOf( Delimited( OneOf(_global_permissions, _permissions), terminators=["ON"], ), Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)), ), "ON", Sequence( OneOf("LOGIN", "DATABASE", "OBJECT", "ROLE", "SCHEMA", "USER"), Ref("CastOperatorSegment"), optional=True, ), _objects, OneOf("TO", "FROM"), Delimited( Ref("RoleReferenceSegment"), ), Sequence( Ref.keyword("CASCADE", optional=True), Ref("ObjectReferenceSegment", optional=True), optional=True, ), ), ) class CreateTypeStatementSegment(BaseSegment): """A `CREATE TYPE` statement. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-type-transact-sql?view=sql-server-ver15 """ type = "create_type_statement" match_grammar: Matchable = Sequence( "CREATE", "TYPE", Ref("ObjectReferenceSegment"), OneOf( Sequence("FROM", Ref("ObjectReferenceSegment")), Sequence( "AS", "TABLE", Sequence( Bracketed( Delimited( OneOf( Ref("TableConstraintSegment"), Ref("ColumnDefinitionSegment"), Ref("TableIndexSegment"), ), allow_trailing=True, ) ), ), ), ), ) class OpenCursorStatementSegment(BaseSegment): """An `OPEN` cursor statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/open-transact-sql?view=sql-server-ver15 """ type = "open_cursor_statement" match_grammar: Matchable = Sequence( "OPEN", Ref("CursorNameGrammar"), ) class CloseCursorStatementSegment(BaseSegment): """A `CLOSE` cursor statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/close-transact-sql?view=sql-server-ver15 """ type = "close_cursor_statement" match_grammar: Matchable = Sequence( "CLOSE", Ref("CursorNameGrammar"), ) class DeallocateCursorStatementSegment(BaseSegment): """A `DEALLOCATE` cursor statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/deallocate-transact-sql?view=sql-server-ver15 """ type = "deallocate_cursor_statement" match_grammar: Matchable = Sequence( "DEALLOCATE", Ref("CursorNameGrammar"), ) class FetchCursorStatementSegment(BaseSegment): """A `FETCH` cursor statement. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/fetch-transact-sql?view=sql-server-ver15 """ type = "fetch_cursor_statement" match_grammar: Matchable = Sequence( "FETCH", OneOf("NEXT", "PRIOR", "FIRST", "LAST", optional=True), "FROM", Ref("CursorNameGrammar"), Sequence("INTO", Delimited(Ref("ParameterNameSegment")), optional=True), ) class ConcatSegment(ansi.CompositeBinaryOperatorSegment): """Concat operator.""" match_grammar: Matchable = Ref("PlusSegment") class CreateSynonymStatementSegment(BaseSegment): """A `CREATE SYNONYM` statement.""" type = "create_synonym_statement" # https://learn.microsoft.com/en-us/sql/t-sql/statements/create-synonym-transact-sql match_grammar: Matchable = Sequence( "CREATE", "SYNONYM", Ref("SynonymReferenceSegment"), "FOR", Ref("ObjectReferenceSegment"), ) class DropSynonymStatementSegment(BaseSegment): """A `DROP SYNONYM` statement.""" type = "drop_synonym_statement" # https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-synonym-transact-sql match_grammar: Matchable = Sequence( "DROP", "SYNONYM", Ref("IfExistsGrammar", optional=True), Ref("SynonymReferenceSegment"), ) class SynonymReferenceSegment(ansi.ObjectReferenceSegment): """A reference to a synonym. A synonym may only (optionally) specify a schema. It may not specify a server or database name. """ type = "synonym_reference" # match grammar (allow whitespace) match_grammar: Matchable = Sequence( Ref("SingleIdentifierGrammar"), AnyNumberOf( Sequence( Ref("DotSegment"), Ref("SingleIdentifierGrammar", optional=True), ), min_times=0, max_times=1, ), ) class SamplingExpressionSegment(ansi.SamplingExpressionSegment): """Override ANSI to use TSQL TABLESAMPLE expression.""" type = "sample_expression" match_grammar: Matchable = Sequence( "TABLESAMPLE", Sequence("SYSTEM", optional=True), Bracketed( Sequence( Ref("NumericLiteralSegment"), OneOf("PERCENT", "ROWS", optional=True) ) ), Sequence( OneOf("REPEATABLE"), Bracketed(Ref("NumericLiteralSegment")), optional=True, ), ) class TemporalQuerySegment(BaseSegment): """A segment that allows Temporal Queries to be run. https://learn.microsoft.com/en-us/sql/relational-databases/tables/temporal-tables?view=sql-server-ver16 """ type = "temporal_query" match_grammar: Matchable = Sequence( "FOR", "SYSTEM_TIME", OneOf( "ALL", Sequence( "AS", "OF", Ref("QuotedLiteralSegment"), ), Sequence( "FROM", Ref("QuotedLiteralSegment"), "TO", Ref("QuotedLiteralSegment"), ), Sequence( "BETWEEN", Ref("QuotedLiteralSegment"), "AND", Ref("QuotedLiteralSegment"), ), Sequence( "CONTAINED", "IN", Bracketed( Delimited( Ref("QuotedLiteralSegment"), ) ), ), ), ) class CreateDatabaseScopedCredentialStatementSegment(BaseSegment): """A statement to create a database scoped credential. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-database-scoped-credential-transact-sql?view=sql-server-ver16 """ type = "create_database_scoped_credential_statement" match_grammar: Matchable = Sequence( "CREATE", "DATABASE", "SCOPED", "CREDENTIAL", Ref("ObjectReferenceSegment"), "WITH", Ref("CredentialGrammar"), ) class CreateExternalDataSourceStatementSegment(BaseSegment): """A statement to create an external data source. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-data-source-transact-sql?view=sql-server-ver16&tabs=dedicated#syntax """ type = "create_external_data_source_statement" match_grammar: Matchable = Sequence( "CREATE", "EXTERNAL", "DATA", "SOURCE", Ref("ObjectReferenceSegment"), "WITH", Bracketed( Delimited( Ref("TableLocationClause"), Sequence( "CONNECTION_OPTIONS", Ref("EqualsSegment"), AnyNumberOf(Ref("QuotedLiteralSegmentOptWithN")), ), Sequence( "CREDENTIAL", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "PUSHDOWN", Ref("EqualsSegment"), OneOf("ON", "OFF"), ), ), ), ) class PeriodSegment(BaseSegment): """A `PERIOD FOR SYSTEM_TIME` for `CREATE TABLE` of temporal tables. https://docs.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver15 https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver16#generated-always-as--row--transaction_id--sequence_number----start--end---hidden---not-null- """ type = "period_segment" match_grammar = Sequence( "PERIOD", "FOR", "SYSTEM_TIME", Bracketed( Delimited( Ref("ColumnReferenceSegment"), Ref("ColumnReferenceSegment"), ), ), ) class SqlcmdCommandSegment(BaseSegment): """A `sqlcmd` command. Microsoft allows professional CI/CD deployment through so called 'SQL Database Projects'. There are propietary `sqlcmd Commands` that can be part of an SQL file. https://learn.microsoft.com/en-us/sql/tools/sqlcmd/sqlcmd-utility?view=sql-server-ver16#sqlcmd-commands """ type = "sqlcmd_command_segment" match_grammar: Matchable = OneOf( Sequence( Sequence( Ref("ColonSegment"), Ref("SqlcmdOperatorSegment"), # `:r` allow_gaps=False, ), Ref("SqlcmdFilePathSegment"), ), Sequence( Sequence( Ref("ColonSegment"), Ref("SqlcmdOperatorSegment"), # `:setvar` allow_gaps=False, ), Ref("ObjectReferenceSegment"), Ref("CodeSegment"), ), ) class ExternalFileFormatDelimitedTextFormatOptionClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` Delimited text `FORMAT_OPTIONS` clause.""" type = "external_file_delimited_text_format_options_clause" match_grammar = OneOf( Sequence( OneOf( "FIELD_TERMINATOR", "STRING_DELIMITER", "DATE_FORMAT", "PARSER_VERSION" ), Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIRST_ROW", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "USE_TYPE_DEFAULT", Ref("EqualsSegment"), Ref("BooleanLiteralGrammar"), ), Sequence( "ENCODING", Ref("EqualsSegment"), Ref("FileEncodingSegment"), ), ) class ExternalFileFormatDelimitedTextClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *Delimited text* clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delimited#syntax """ type = "external_file_delimited_text_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "DELIMITEDTEXT", ), Sequence( "FORMAT_OPTIONS", Bracketed( Delimited( Ref("ExternalFileFormatDelimitedTextFormatOptionClause"), ), ), optional=True, ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatRcClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *Record Columnar file format (RcFile)* clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=rc#syntax """ type = "external_file_rc_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "RCFILE", ), Sequence( "SERDE_METHOD", Ref("EqualsSegment"), Ref("SerdeMethodSegment"), ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatOrcClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *Optimized Row Columnar (ORC)* format clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=orc#syntax """ type = "external_file_orc_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "ORC", ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatParquetClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *PARQUET* format clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=parquet#syntax """ type = "external_file_parquet_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "PARQUET", ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatJsonClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *JSON* format clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=json#syntax """ type = "external_file_json_clause" match_grammar = Delimited( Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "JSON", ), Sequence( "DATA_COMPRESSION", Ref("EqualsSegment"), Ref("FileCompressionSegment"), optional=True, ), ) class ExternalFileFormatDeltaClause(BaseSegment): """`CREATE EXTERNAL FILE FORMAT` *Delta Lake* format clause. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delta#syntax """ type = "external_file_delta_clause" match_grammar = Sequence( "FORMAT_TYPE", Ref("EqualsSegment"), "DELTA", ) class CreateExternalFileFormat(BaseSegment): """A statement to create an `EXTERNAL FILE FORMAT` object. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delta#syntax """ type = "create_external_file_format" match_grammar: Matchable = Sequence( "CREATE", "EXTERNAL", "FILE", "FORMAT", Ref("ObjectReferenceSegment"), "WITH", Bracketed( OneOf( Ref("ExternalFileFormatDelimitedTextClause"), Ref("ExternalFileFormatRcClause"), Ref("ExternalFileFormatOrcClause"), Ref("ExternalFileFormatParquetClause"), Ref("ExternalFileFormatJsonClause"), Ref("ExternalFileFormatDeltaClause"), ), ), ) class OpenJsonWithClauseSegment(BaseSegment): """A `WITH` clause of an `OPENJSON()` table-valued function. https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16#with_clause """ type = "openjson_with_clause" match_grammar = Sequence( "WITH", Bracketed( Delimited( Sequence( Ref("ColumnReferenceSegment"), Ref("DatatypeSegment"), Ref("QuotedLiteralSegment", optional=True), # column_path Sequence( "AS", "JSON", optional=True, ), ), ), ), ) class OpenJsonSegment(BaseSegment): """An `OPENJSON()` table-valued function. https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16#syntax """ type = "openjson_segment" match_grammar = Sequence( "OPENJSON", Bracketed( Delimited( Ref("QuotedLiteralSegmentOptWithN"), # jsonExpression Ref("ColumnReferenceSegment"), Ref("ParameterNameSegment"), Ref("QuotedLiteralSegment"), # path ), ), Ref("OpenJsonWithClauseSegment", optional=True), ) class CreateExternalTableStatementSegment(BaseSegment): """A `CREATE EXTERNAL TABLE` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-table-transact-sql?view=sql-server-ver16&tabs=dedicated """ type = "create_external_table_statement" match_grammar = Sequence( "CREATE", "EXTERNAL", "TABLE", Ref("ObjectReferenceSegment"), Bracketed( Delimited( Ref("ColumnDefinitionSegment"), ), ), "WITH", Bracketed( Delimited( Ref("TableLocationClause"), Sequence( "DATA_SOURCE", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "REJECT_TYPE", Ref("EqualsSegment"), OneOf("value", "percentage"), ), Sequence( "REJECT_VALUE", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "REJECT_SAMPLE_VALUE", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "REJECTED_ROW_LOCATION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ), ), ) class CreateRoleStatementSegment(ansi.CreateRoleStatementSegment): """A `CREATE ROLE` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/create-role-transact-sql?view=sql-server-ver16 """ type = "create_role_statement" match_grammar = Sequence( "CREATE", "ROLE", Ref("RoleReferenceSegment"), Sequence( "AUTHORIZATION", Ref("RoleReferenceSegment"), optional=True, ), ) class DropExternalTableStatementSegment(BaseSegment): """A `DROP EXTERNAL TABLE ...` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-external-table-transact-sql """ type = "drop_external_table_statement" match_grammar = Sequence( "DROP", "EXTERNAL", "TABLE", Ref("TableReferenceSegment"), ) class StorageLocationSegment(BaseSegment): """A tsql external storage location. https://learn.microsoft.com/en-us/sql/t-sql/statements/copy-into-transact-sql?view=azure-sqldw-latest#external-locations """ type = "storage_location" match_grammar = OneOf( Ref("AzureBlobStoragePath"), Ref("AzureDataLakeStorageGen2Path"), ) class CopyIntoTableStatementSegment(BaseSegment): """A tsql `COPY INTO
` statement. https://learn.microsoft.com/en-us/sql/t-sql/statements/copy-into-transact-sql?view=azure-sqldw-latest """ type = "copy_into_table_statement" match_grammar = Sequence( "COPY", "INTO", Ref("TableReferenceSegment"), Bracketed(Delimited(Ref("ColumnDefinitionSegment")), optional=True), Ref("FromClauseSegment"), Sequence( "WITH", Bracketed( Delimited( AnySetOf( Sequence( "FILE_TYPE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FILE_FORMAT", Ref("EqualsSegment"), Ref("ObjectReferenceSegment"), ), Sequence( "CREDENTIAL", Ref("EqualsSegment"), Bracketed(Ref("CredentialGrammar")), ), Sequence( "ERRORFILE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ERRORFILE_CREDENTIAL", Ref("EqualsSegment"), Bracketed(Ref("CredentialGrammar")), ), Sequence( "MAXERRORS", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "COMPRESSION", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIELDQUOTE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIELDTERMINATOR", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ROWTERMINATOR", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "FIRSTROW", Ref("EqualsSegment"), Ref("NumericLiteralSegment"), ), Sequence( "DATEFORMAT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "ENCODING", Ref("EqualsSegment"), Ref("FileEncodingSegment"), ), Sequence( "IDENTITY_INSERT", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), Sequence( "AUTO_CREATE_TABLE", Ref("EqualsSegment"), Ref("QuotedLiteralSegment"), ), ) ) ), optional=True, ), ) sqlfluff-2.3.5/src/sqlfluff/dialects/dialect_tsql_keywords.py000066400000000000000000000305531451700765000245300ustar00rootroot00000000000000r"""A list of all SQL key words. https://docs.microsoft.com/en-us/sql/t-sql/language-elements/reserved-keywords-transact-sql?view=sql-server-ver16 Run the script in a browser console to extract all reserved keywords: ```js (function () { const xpathResult = document.evaluate( '//div[@class=\'column\']/p[not(descendant::strong)]', document, null, XPathResult.ORDERED_NODE_SNAPSHOT_TYPE, null ); const list = new Set(); for (let index = 0; index < xpathResult.snapshotLength; ++index) { const node = xpathResult.snapshotItem(index); list.add(node.textContent.trim()); } console.log([...list].sort().map(value => ` "${value}"`).join(',\n')); })(); ``` Be careful, some keywords are present in `UNRESERVED_KEYWORDS`. """ RESERVED_KEYWORDS = [ "ADD", "ALL", "ALTER", "AND", "ANY", "APPEND", "AS", "ASC", "AUTHORIZATION", "BACKUP", "BATCHSIZE", "BEGIN", "BETWEEN", "BREAK", "BROWSE", "BULK", "BY", "CASCADE", "CASE", "CHECK", "CHECKPOINT", "CHECK_CONSTRAINTS", "CLOSE", "CLUSTERED", "COALESCE", "COLLATE", "COLUMN", "COMMIT", "COMPUTE", "CONSTRAINT", "CONTAINS", "CONTAINSTABLE", "CONTINUE", "CONVERT", "CREATE", "CROSS", "CURRENT", "CURRENT_CATALOG", # *future* "CURRENT_DATE", "CURRENT_DEFAULT_TRANSFORM_GROUP", # *future* "CURRENT_PATH", # *future* "CURRENT_ROLE", # *future* "CURRENT_SCHEMA", # *future* "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", # *future* "CURRENT_USER", "CURSOR", "DATABASE", "DBCC", "DEALLOCATE", "DECLARE", "DEFAULT", "DELETE", "DENY", "DESC", "DISTINCT", "DISTRIBUTED", "DOUBLE", "DROP", "DYNAMIC", "ELSE", "END", "ERRLVL", "ESCAPE", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXIT", "EXTERNAL", "FAST_FORWARD", "FETCH", "FILE", "FILLFACTOR", "FOR", "FORWARD_ONLY", "FOREIGN", "FREETEXT", "FREETEXTTABLE", "FROM", "FULL", "FULLSCAN", "FUNCTION", "GLOBAL", "GO", "GOTO", "GRANT", "GROUP", "HAVING", "HOLDLOCK", "IDENTITY_INSERT", "IDENTITY", "IDENTITYCOL", "IF", "IN", "INDEX", "INNER", "INSERT", "INTERSECT", "INTO", "IS", "JOIN", "KEY", "KEYSET", "KILL", "LEFT", "LIKE", "LINENO", "LIST", "LOCAL", "MERGE", "NATIONAL", "NATIVE_COMPILATION", "NOCHECK", "NONCLUSTERED", "NOT", "NULL", "NULLIF", "OF", "OFF", "OFFSETS", "ON", "OPEN", "OPENDATASOURCE", "OPENQUERY", "OPENROWSET", "OPENXML", "OPTIMISTIC", "OPTION", "OR", "ORDER", "OUTER", "OVER", "OVERLAY", # *future* "PERCENT", "PIVOT", "PLAN", "PRIMARY", "PRINT", "PROC", "PROCEDURE", "PROPERTY", "PUBLIC", "RAISERROR", "READ", "READ_ONLY", "READTEXT", "RECONFIGURE", "REFERENCES", "REPLICATION", "RESAMPLE", "RESTORE", "RESTRICT", "RETURN", "REVERT", "REVOKE", "RIGHT", "ROLLBACK", "ROWCOUNT", "ROWGUIDCOL", "RULE", "SAVE", "SCHEMA", "SCROLL", "SCROLL_LOCKS", "SELECT", "SEMANTICKEYPHRASETABLE", "SEMANTICSIMILARITYDETAILSTABLE", "SEMANTICSIMILARITYTABLE", "SESSION_USER", "SET", "SETUSER", "SHUTDOWN", "SOME", "STATIC", "STATISTICS", "SYSTEM_USER", "TABLE", "TABLESAMPLE", "TEXTSIZE", "THEN", "TO", "TOP", "TRAN", "TRANSACTION", "TRAN", "TRIGGER", "TRUNCATE", "TRY_CONVERT", "TSEQUAL", "TYPE_WARNING", "UNION", "UNIQUE", "UNPIVOT", "UPDATE", "UPDATETEXT", "USE", "USER", "VALUES", "VARYING", "VIEW", "WAITFOR", "WHEN", "WHERE", "WHILE", "WITH", "WRITETEXT", ] # Future reserved keywords extracted from the documentation FUTURE_RESERVED_KEYWORDS = [ "ALIAS", "ARRAY", "CLASS", "DESTROY", "END-EXEC", "EVERY", "LIKE_REGEX", ] UNRESERVED_KEYWORDS = [ "ABORT", "ABORT_AFTER_WAIT", "ABSENT", "ATOMIC", "AFTER", "ALGORITHM", "ALLOWED", "ALLOW_PAGE_LOCKS", "ALLOW_ROW_LOCKS", "ALWAYS", "ANSI_DEFAULTS", "ANSI_NULL_DFLT_OFF", "ANSI_NULL_DFLT_ON", "ANSI_NULLS", "ANSI_PADDING", "ANSI_WARNINGS", "APPEND_ONLY", "APPLY", "ARITHABORT", "ARITHIGNORE", "AT", "AUTO_CREATE_TABLE", "AUTO", "BEFORE", # *future* "BERNOULLI", "BINARY", "BLOCKERS", "BREAK", "CACHE", "CALLED", "CALLER", "CAST", "CATCH", "CHANGE_TRACKING", "CODEPAGE", "COLUMN_ENCRYPTION_KEY", "COLUMNSTORE_ARCHIVE", "COLUMNSTORE", "COMMITTED", "COMPRESS_ALL_ROW_GROUPS", "COMPRESSION_DELAY", "COMPRESSION", "CONCAT_NULL_YIELDS_NULL", "CONCAT", "CONNECTION_OPTIONS", "CONTAINED", "CONTINUE", "CONTROL", "CREDENTIAL", "COPY", "CURSOR_CLOSE_ON_COMMIT", "CYCLE", "DATA_COMPRESSION", "DATA_CONSISTENCY_CHECK", "DATA_DELETION", "DATA_SOURCE", "DATA", "DATAFILETYPE", "DATASOURCE", "DATE_FORMAT", "DATE", "DATEFIRST", "DATEFORMAT", "DAY", "DAYS", "DEADLOCK_PRIORITY", "DELAY", "DELAYED_DURABILITY", "DELIMITEDTEXT", "DELTA", "DENSE_RANK", "DETERMINISTIC", "DISABLE", "DISK", # listed as reserved but functionally unreserved "DISTRIBUTION", # Azure Synapse Analytics specific "DROP_EXISTING", "DUMP", # listed as reserved but functionally unreserved "DURABILITY", "ELEMENT", # *future* "ELEMENTS", "ENCODING", "ENCRYPTED", "ENCRYPTION_TYPE", "ENCRYPTION", "ERRORFILE_CREDENTIAL", "ERRORFILE_DATA_SOURCE", "ERRORFILE", "EXPAND", "EXPLAIN", # Azure Synapse Analytics specific "EXPLICIT", "EXTERNALPUSHDOWN", "FAST", "FIELD_TERMINATOR", "FIELDQUOTE", "FIELDTERMINATOR", "FILE_FORMAT", "FILEGROUP", "FILESTREAM", "FILESTREAM_ON", "FILESTREAM", "FILE_TYPE", "FILETABLE_COLLATE_FILENAME", "FILETABLE_DIRECTORY", "FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME", "FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME", "FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME", "FILTER_COLUMN", "FILTER_PREDICATE", "FILTER", "FIPS_FLAGGER", "FIRE_TRIGGERS", "FIRST_ROW", "FIRST", "FIRSTROW", "FMTONLY", "FOLLOWING", "FORCE", "FORCED", "FORCEPLAN", "FORCESCAN", "FORCESEEK", "FORMAT_OPTIONS", "FORMAT_TYPE", "FORMAT", "FORMATFILE_DATA_SOURCE", "FORMATFILE", "FULLTEXT", "GENERATED", "HASH", "HEAP", # Azure Synapse Analytics specific "HIDDEN", "HIGH", "HINT", "HISTORY_RETENTION_PERIOD", "HISTORY_TABLE", "IGNORE_CONSTRAINTS", "IGNORE_DUP_KEY", "IGNORE_NONCLUSTERED_COLUMNSTORE_INDEX", "IGNORE_TRIGGERS", "IGNORE", "IMPLICIT_TRANSACTIONS", "INBOUND", "INCLUDE_NULL_VALUES", "INCLUDE", "INCREMENT", "INFINITE", "INLINE", "INSTEAD", "INTERVAL", "IO", "ISOLATION", "JSON", "KEEP", "KEEPDEFAULTS", "KEEPFIXED", "KEEPIDENTITY", "KEEPNULLS", "KILOBYTES_PER_BATCH", "LABEL", # *reserved* keyword in Azure Synapse; but would break TSQL parsing "LANGUAGE", "LAST", "LASTROW", "LEDGER", "LEDGER_VIEW", "LEGACY_CARDINALITY_ESTIMATION", "LEVEL", "LOAD", # listed as reserved but functionally unreserved "LOB_COMPACTION", "LOCATION", "LOCK_TIMEOUT", "LOG", "LOGIN", "LOOP", "LOW", "MANUAL", "MASKED", "MATCHED", "MAX_DURATION", "MAX_GRANT_PERCENT", "MAX", "MAXDOP", "MAXERRORS", "MAXRECURSION", "MAXVALUE", "MEMORY_OPTIMIZED", "MIGRATION_STATE", "MIN_GRANT_PERCENT", "MINUTES", "MINVALUE", "MONTH", "MONTHS", "NAME", "NEXT", "NO_PERFORMANCE_SPOOL", "NO", "NOCOUNT", "NOEXEC", "NOEXPAND", "NOLOCK", "NONE", "NORMAL", "NOWAIT", "NTILE", "NUMERIC_ROUNDABORT", "OBJECT", "OFFSET", "ONLINE", "OPENJSON", "OPERATION_TYPE_COLUMN_NAME", "OPERATION_TYPE_DESC_COLUMN_NAME", "OPTIMIZE_FOR_SEQUENTIAL_KEY", "OPTIMIZE", "ORC", "OUT", "OUTBOUND", "OUTPUT", "OWNER", "PAD_INDEX", "PAGE", "PAGLOCK", "PARAMETER", "PARAMETERS", # *future* "PARAMETERIZATION", "PARQUET", "PARSEONLY", "PARSER_VERSION", "PARTIAL", # *future* "PARTITION", "PARTITIONS", "PATH", "PAUSE", "PAUSED", "PERCENTAGE", "PERCENTILE_CONT", "PERCENTILE_DISC", "PERIOD", "PERSISTED", "POPULATION", "PRECEDING", "PRECISION", # listed as reserved but functionally unreserved "PRIOR", "PROFILE", "PUSHDOWN", "QUERY_GOVERNOR_COST_LIMIT", "QUERYTRACEON", "QUOTED_IDENTIFIER", "R", # sqlcmd command "RANDOMIZED", "RANGE", "RANK", "RAW", "RCFILE", "READCOMMITTED", "READCOMMITTEDLOCK", "READONLY", "READPAST", "READUNCOMMITTED", "REBUILD", "RECEIVE", "RECOMPILE", "RECURSIVE", "REGR_AVGX", # *future* "REGR_AVGY", # *future* "REGR_COUNT", # *future* "REGR_INTERCEPT", # *future* "REGR_R2", # *future* "REGR_SLOPE", # *future* "REGR_SXX", # *future* "REGR_SXY", # *future* "REGR_SYY", # *future* "REJECTED_ROW_LOCATION", "REJECT_SAMPLE_VALUE", "REJECT_TYPE", "REJECT_VALUE", "REMOTE_DATA_ARCHIVE", "REMOTE_PROC_TRANSACTIONS", "RENAME", # Azure Synapse Analytics specific "REORGANIZE", "REPEATABLE", "REPEATABLEREAD", "REPLACE", "REPLICATE", # Azure Synapse Analytics "RESPECT", "RESULT_SET_CACHING", # Azure Synapse Analytics specific "RESUMABLE", "RESUME", "RETENTION_PERIOD", "RETURNS", "ROBUST", "ROLE", "ROOT", "ROUND_ROBIN", # Azure Synapse Analytics specific "ROW_NUMBER", "ROW", "ROWGUIDCOL", "ROWLOCK", "ROWS_PER_BATCH", "ROWS", "ROWTERMINATOR", "S", "SCALEOUTEXECUTION", "SCHEMA_AND_DATA", "SCHEMA_ONLY", "SCHEMABINDING", "SCOPED", "SEARCH", "SECRET", "SECURITYAUDIT", # listed as reserved but functionally unreserved "SELF", "SEQUENCE_NUMBER_COLUMN_NAME", "SEQUENCE_NUMBER", "SEQUENCE", "SERDE_METHOD", "SERIALIZABLE", "SERVER", "SETERROR", "SETVAR", # sqlcmd command "SHOWPLAN_ALL", "SHOWPLAN_TEXT", "SHOWPLAN_XML", "SINGLE_BLOB", "SINGLE_CLOB", "SINGLE_NCLOB", "SNAPSHOT", "SORT_IN_TEMPDB", "SOURCE", "SPARSE", "SPATIAL_WINDOW_MAX_CELLS", "START", "STATISTICAL_SEMANTICS", "STATISTICS_INCREMENTAL", "STATISTICS_NORECOMPUTE", "STOPLIST", "STRING_AGG", "STRING_DELIMITER", "SWITCH", "SYNONYM", "SYSTEM_TIME", "SYSTEM_VERSIONING", "SYSTEM", "TABLOCK", "TABLOCKX", "TAKE", "TARGET", "TEXTIMAGE_ON", "THROW", "TIES", "TIME", "TIMEOUT", "TIMESTAMP", "TRANSACTION_ID_COLUMN_NAME", "TRANSACTION_ID", "TRUNCATE_TARGET", # Azure Synapse Analytics specific "TRY", "TYPE", "UNBOUNDED", "UNCOMMITTED", "UNKNOWN", "UPDLOCK", "USE_TYPE_DEFAULT", "USER_DB", # Azure Synapse Analytics specific, deprecated "USING", "VALUE", "VIEW_METADATA", "WAIT_AT_LOW_PRIORITY", "WAITFOR", "WEEK", "WEEKS", "WHILE", "WITHIN", "WITHOUT_ARRAY_WRAPPER", "WORK", "XACT_ABORT", "XLOCK", "XML", "XMLAGG", # *future* "XMLATTRIBUTES", # *future* "XMLBINARY", # *future* "XMLCAST", # *future* "XMLCOMMENT", # *future* "XMLCONCAT", # *future* "XMLDATA", "XMLDOCUMENT", # *future* "XMLELEMENT", # *future* "XMLEXISTS", # *future* "XMLFOREST", # *future* "XMLITERATE", # *future* "XMLNAMESPACES", # *future* "XMLPARSE", # *future* "XMLPI", # *future* "XMLQUERY", # *future* "XMLSCHEMA", "XMLSERIALIZE", # *future* "XMLTABLE", # *future* "XMLTEXT", # *future* "XMLVALIDATE", # *future* "XML_COMPRESSION", "XSINIL", "YEAR", "YEARS", "ZONE", ] sqlfluff-2.3.5/src/sqlfluff/diff_quality_plugin.py000066400000000000000000000110061451700765000223670ustar00rootroot00000000000000"""This module integrates SQLFluff with diff_cover's "diff-quality" tool.""" import copy import json import logging import os import pathlib import sys import tempfile from typing import List from diff_cover.command_runner import execute, run_command_for_code from diff_cover.hook import hookimpl as diff_cover_hookimpl from diff_cover.violationsreporters.base import ( QualityDriver, QualityReporter, Violation, ) logger = logging.getLogger(__name__) class SQLFluffDriver(QualityDriver): """SQLFluff driver for use by SQLFluffViolationReporter.""" def __init__(self) -> None: super().__init__( [sys.executable, "-m", "sqlfluff.cli.commands"], [".sql"], [ s.encode(sys.getfilesystemencoding()) for s in ["sqlfluff", "lint", "--format=json"] ], exit_codes=[0, 1], ) def parse_reports(self, reports) -> None: # pragma: no cover """Parse report output. Not used by SQLFluff.""" pass def installed(self) -> bool: """Check if SQLFluff is installed.""" return run_command_for_code("sqlfluff") == 0 class SQLFluffViolationReporter(QualityReporter): """Class that implements diff-quality integration.""" supported_extensions = ["sql"] def __init__(self, **kw) -> None: """Calls the base class constructor to set the object's name.""" super().__init__(SQLFluffDriver(), **kw) def violations_batch(self, src_paths): """Return a dictionary of Violations recorded in `src_paths`.""" # Check if SQLFluff is installed. if self.driver_tool_installed is None: self.driver_tool_installed = self.driver.installed() if not self.driver_tool_installed: # pragma: no cover raise OSError(f"{self.driver.name} is not installed") if src_paths: output = self.reports if self.reports else self._run_sqlfluff(src_paths) for o in output: # Load and parse SQLFluff JSON output. try: report = json.loads(o) except json.JSONDecodeError as e: # pragma: no cover print(f"Error parsing JSON output ({e}): {repr(o)}") raise else: for file in report: self.violations_dict[file["filepath"]] = [ Violation(v["line_no"], v["description"]) for v in file["violations"] ] else: logger.warning("Not running SQLFluff: No files to check") return self.violations_dict def _run_sqlfluff(self, src_paths) -> List[str]: # Prepare the SQLFluff command to run. command = copy.deepcopy(self.driver.command) if self.options: for arg in self.options.split(): command.append(arg) for src_path in src_paths: if src_path.endswith(".sql") and os.path.exists(src_path): command.append(src_path.encode(sys.getfilesystemencoding())) with tempfile.NamedTemporaryFile( prefix="sqlfluff-", suffix=".json", delete=False ) as f: f.close() try: # Write output to a temporary file. This avoids issues where # extraneous SQLFluff or dbt output results in the JSON output # being invalid. command += ["--write-output", f.name] # Run SQLFluff. printable_command = " ".join( [ c.decode(sys.getfilesystemencoding()) if isinstance(c, bytes) else c for c in command ] ) logger.warning(f"{printable_command}") execute(command, self.driver.exit_codes) return [pathlib.Path(f.name).read_text()] finally: os.remove(f.name) def measured_lines(self, src_path: str) -> None: # pragma: no cover """Return list of the lines in src_path that were measured.""" @diff_cover_hookimpl def diff_cover_report_quality(**kw) -> SQLFluffViolationReporter: """Returns the SQLFluff plugin. This function is registered as a diff_cover entry point. diff-quality calls it in order to "discover" the SQLFluff plugin. :return: Object that implements the BaseViolationReporter ABC """ return SQLFluffViolationReporter(**kw) sqlfluff-2.3.5/src/sqlfluff/py.typed000066400000000000000000000000001451700765000174460ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/000077500000000000000000000000001451700765000171135ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/__init__.py000066400000000000000000000000551451700765000212240ustar00rootroot00000000000000"""Standard Rules packaged with sqlfluff.""" sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/000077500000000000000000000000001451700765000207025ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/AL01.py000066400000000000000000000074621451700765000217220ustar00rootroot00000000000000"""Implementation of Rule AL01.""" from typing import Optional, Tuple, cast from sqlfluff.core.parser import ( BaseSegment, KeywordSegment, RawSegment, ) from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.reflow import ReflowSequence class Rule_AL01(BaseRule): """Implicit/explicit aliasing of table. Aliasing of table to follow preference (requiring an explicit ``AS`` is the default). **Anti-pattern** In this example, the alias ``voo`` is implicit. .. code-block:: sql SELECT voo.a FROM foo voo **Best practice** Add ``AS`` to make it explicit. .. code-block:: sql SELECT voo.a FROM foo AS voo """ name = "aliasing.table" aliases = ("L011",) groups: Tuple[str, ...] = ("all", "aliasing") config_keywords = ["aliasing"] crawl_behaviour = SegmentSeekerCrawler({"alias_expression"}, provide_raw_stack=True) is_fix_compatible = True _target_parent_types: Tuple[str, ...] = ( "from_expression_element", "merge_statement", ) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Implicit aliasing of table/column not allowed. Use explicit `AS` clause. We look for the alias segment, and then evaluate its parent and whether it contains an AS keyword. This is the _eval function for both AL01 and AL02. """ # Config type hints self.aliasing: str assert context.segment.is_type("alias_expression") if context.parent_stack[-1].is_type(*self._target_parent_types): # Search for an AS keyword. as_keyword: Optional[BaseSegment] for as_keyword in context.segment.segments: if as_keyword.raw_upper == "AS": break else: as_keyword = None if as_keyword: if self.aliasing == "implicit": self.logger.debug("Removing AS keyword and respacing.") return LintResult( anchor=as_keyword, # Generate the fixes to remove and respace accordingly. fixes=ReflowSequence.from_around_target( as_keyword, context.parent_stack[0], config=context.config, ) .without(cast(RawSegment, as_keyword)) .respace() .get_fixes(), ) elif self.aliasing != "implicit": self.logger.debug("Inserting AS keyword and respacing.") for identifier in context.segment.raw_segments: if identifier.is_code: break else: # pragma: no cover raise NotImplementedError( "Failed to find identifier. Raise this as a bug on GitHub." ) return LintResult( anchor=context.segment, # Work out the insertion and reflow fixes. fixes=ReflowSequence.from_around_target( identifier, context.parent_stack[0], config=context.config, # Only reflow before, otherwise we catch too much. sides="before", ) .insert( KeywordSegment("AS"), target=identifier, pos="before", ) .respace() .get_fixes(), ) return None sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/AL02.py000066400000000000000000000025021451700765000217110ustar00rootroot00000000000000"""Implementation of Rule AL02.""" from typing import Optional from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.rules.aliasing.AL01 import Rule_AL01 from sqlfluff.utils.functional import FunctionalContext class Rule_AL02(Rule_AL01): """Implicit/explicit aliasing of columns. Aliasing of columns to follow preference (explicit using an ``AS`` clause is default). **Anti-pattern** In this example, the alias for column ``a`` is implicit. .. code-block:: sql SELECT a alias_col FROM foo **Best practice** Add ``AS`` to make it explicit. .. code-block:: sql SELECT a AS alias_col FROM foo """ name = "aliasing.column" aliases = ("L012",) groups = ("all", "core", "aliasing") config_keywords = ["aliasing"] # NB: crawl_behaviour is the same as Rule AL01 _target_parent_types = ("select_clause_element",) def _eval(self, context: RuleContext) -> Optional[LintResult]: # T-SQL supports alternative alias expressions for AL02 # select alias = value # instead of # select value as alias # Recognise this and exit early if FunctionalContext(context).segment.children()[-1].raw == "=": return None return super()._eval(context) sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/AL03.py000066400000000000000000000104251451700765000217150ustar00rootroot00000000000000"""Implementation of Rule AL03.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class Rule_AL03(BaseRule): """Column expression without alias. Use explicit `AS` clause. **Anti-pattern** In this example, there is no alias for both sums. .. code-block:: sql SELECT sum(a), sum(b) FROM foo **Best practice** Add aliases. .. code-block:: sql SELECT sum(a) AS a_sum, sum(b) AS b_sum FROM foo """ name = "aliasing.expression" aliases = ("L013",) groups = ("all", "core", "aliasing") config_keywords = ["allow_scalar"] crawl_behaviour = SegmentSeekerCrawler({"select_clause_element"}) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Column expression without alias. Use explicit `AS` clause. We look for the select_clause_element segment, and then evaluate whether it has an alias segment or not and whether the expression is complicated enough. `parent_stack` is to assess how many other elements there are. """ functional_context = FunctionalContext(context) segment = functional_context.segment children = segment.children() # If we have an alias its all good if children.any(sp.is_type("alias_expression")): return None # Ignore if it's a function with EMITS clause as EMITS is equivalent to AS if ( children.select(sp.is_type("function")) .children() .select(sp.is_type("emits_segment")) ): return None # Ignore if it's a cast_expression with non-function enclosed children # For example, we do not want to ignore something like func()::type # but we can ignore something like a::type if children.children().select( sp.is_type("cast_expression") ) and not children.children().select( sp.is_type("cast_expression") ).children().any( sp.is_type("function") ): return None parent_stack = functional_context.parent_stack # Ignore if it is part of a CTE with column names if ( parent_stack.last(sp.is_type("common_table_expression")) .children() .any(sp.is_type("cte_column_list")) ): return None select_clause_children = children.select(sp.not_(sp.is_type("star"))) is_complex_clause = _recursively_check_is_complex(select_clause_children) if not is_complex_clause: return None # No fixes, because we don't know what the alias should be, # the user should document it themselves. if self.allow_scalar: # type: ignore # Check *how many* elements/columns there are in the select # statement. If this is the only one, then we won't # report an error. immediate_parent = parent_stack.last() elements = immediate_parent.children(sp.is_type("select_clause_element")) num_elements = len(elements) if num_elements > 1: return LintResult(anchor=context.segment) return None return LintResult(anchor=context.segment) def _recursively_check_is_complex(select_clause_or_exp_children: Segments) -> bool: forgiveable_types = [ "whitespace", "newline", "column_reference", "wildcard_expression", "bracketed", ] selector = sp.not_(sp.is_type(*forgiveable_types)) filtered = select_clause_or_exp_children.select(selector) remaining_count = len(filtered) # Once we have removed the above if nothing remains, # then this statement/expression was simple if remaining_count == 0: return False first_el = filtered.first() # Anything except a single expression seg remains # Then it was complex if remaining_count > 1 or not first_el.all(sp.is_type("expression")): return True # If we have just an expression check if it was simple return _recursively_check_is_complex(first_el.children()) sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/AL04.py000066400000000000000000000077611451700765000217270ustar00rootroot00000000000000"""Implementation of Rule AL04.""" import itertools from typing import List, Optional, Tuple from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ObjectReferenceSegment from sqlfluff.utils.analysis.select import get_select_statement_info class Rule_AL04(BaseRule): """Table aliases should be unique within each clause. Reusing table aliases is very likely a coding error. **Anti-pattern** In this example, the alias ``t`` is reused for two different tables: .. code-block:: sql SELECT t.a, t.b FROM foo AS t, bar AS t -- This can also happen when using schemas where the -- implicit alias is the table name: SELECT a, b FROM 2020.foo, 2021.foo **Best practice** Make all tables have a unique alias. .. code-block:: sql SELECT f.a, b.b FROM foo AS f, bar AS b -- Also use explicit aliases when referencing two tables -- with the same name from two different schemas. SELECT f1.a, f2.b FROM 2020.foo AS f1, 2021.foo AS f2 """ name = "aliasing.unique.table" aliases = ("L020",) groups: Tuple[str, ...] = ("all", "core", "aliasing", "aliasing.unique") crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) def _lint_references_and_aliases( self, table_aliases: List[AliasInfo], standalone_aliases: List[str], references: List[ObjectReferenceSegment], col_aliases: List[ColumnAliasInfo], using_cols: List[str], parent_select: Optional[BaseSegment], ) -> Optional[List[LintResult]]: """Check whether any aliases are duplicates. NB: Subclasses of this error should override this function. """ # Are any of the aliases the same? duplicate = set() for a1, a2 in itertools.combinations(table_aliases, 2): # Compare the strings if a1.ref_str == a2.ref_str and a1.ref_str: duplicate.add(a2) if duplicate: return [ LintResult( # Reference the element, not the string. anchor=aliases.segment, description=( "Duplicate table alias {!r}. Table " "aliases should be unique." ).format(aliases.ref_str), ) for aliases in duplicate ] else: return None def _eval(self, context: RuleContext) -> EvalResultType: """Get References and Aliases and allow linting. This rule covers a lot of potential cases of odd usages of references, see the code for each of the potential cases. Subclasses of this rule should override the `_lint_references_and_aliases` method. """ assert context.segment.is_type("select_statement") select_info = get_select_statement_info(context.segment, context.dialect) if not select_info: return None # Work out if we have a parent select function parent_select = None for seg in reversed(context.parent_stack): if seg.is_type("select_statement"): parent_select = seg break # Pass them all to the function that does all the work. # NB: Subclasses of this rules should override the function below return self._lint_references_and_aliases( select_info.table_aliases, select_info.standalone_aliases, select_info.reference_buffer, select_info.col_aliases, select_info.using_cols, parent_select, ) sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/AL05.py000066400000000000000000000176161451700765000217300ustar00rootroot00000000000000"""Implementation of Rule AL05.""" from dataclasses import dataclass, field from typing import List, Set, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo from sqlfluff.core.parser.segments import BaseSegment from sqlfluff.core.rules import ( BaseRule, EvalResultType, LintFix, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.analysis.query import Query from sqlfluff.utils.analysis.select import get_select_statement_info from sqlfluff.utils.functional import Segments, sp @dataclass class AL05Query(Query): """Query subclass with custom AL05 info.""" aliases: List[AliasInfo] = field(default_factory=list) tbl_refs: Set[str] = field(default_factory=set) class Rule_AL05(BaseRule): """Tables should not be aliased if that alias is not used. **Anti-pattern** .. code-block:: sql SELECT a FROM foo AS zoo **Best practice** Use the alias or remove it. An unused alias makes code harder to read without changing any functionality. .. code-block:: sql SELECT zoo.a FROM foo AS zoo -- Alternatively... SELECT a FROM foo """ name = "aliasing.unused" aliases = ("L025",) groups = ("all", "core", "aliasing") crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) _dialects_requiring_alias_for_values_clause = [ "snowflake", "tsql", ] is_fix_compatible = True def _eval(self, context: RuleContext) -> EvalResultType: violations: List[LintResult] = [] assert context.segment.is_type("select_statement") # Exit early if the SELECT does not define any aliases. select_info = get_select_statement_info(context.segment, context.dialect) if not select_info or not select_info.table_aliases: return None # Analyze the SELECT. alias: AliasInfo query = AL05Query.from_segment(context.segment, dialect=context.dialect) self._analyze_table_aliases(query, context.dialect) if context.dialect.name == "redshift": # Redshift supports un-nesting using aliases. # Detect that situation and ignore. # https://docs.aws.amazon.com/redshift/latest/dg/query-super.html#unnest # Do any references refer to aliases in the same list? references = set() aliases = set() for alias in query.aliases: aliases.add(alias.ref_str) if not alias.object_reference: continue # pragma: no cover for seg in alias.object_reference.segments: if seg.is_type("identifier"): references.add(seg.raw) # If there's any overlap between aliases and reference if aliases.intersection(references): self.logger.debug( "Overlapping references found. Assuming redshift semi-structured." ) return None for alias in query.aliases: # Skip alias if it's required (some dialects require aliases for # VALUES clauses). if alias.from_expression_element and self._is_alias_required( alias.from_expression_element, context.dialect.name ): continue if alias.aliased and alias.ref_str not in query.tbl_refs: # Unused alias. Report and fix. violations.append(self._report_unused_alias(alias)) return violations or None @classmethod def _is_alias_required( cls, from_expression_element: BaseSegment, dialect_name: str ) -> bool: """Given an alias, is it REQUIRED to be present? Aliases are required in SOME, but not all dialects when there's a VALUES clause. """ # Look for a table_expression (i.e. VALUES clause) as a descendant of # the FROM expression, potentially nested inside brackets. The reason we # allow nesting in brackets is that in some dialects (e.g. TSQL), this # is actually *required* in order for SQL Server to parse it. for segment in from_expression_element.iter_segments(expanding=("bracketed",)): if segment.is_type("table_expression"): # Found a table expression. Does it have a VALUES clause? if segment.get_child("values_clause"): # Found a VALUES clause. Is this a dialect that requires # VALUE clauses to be aliased? return ( dialect_name in cls._dialects_requiring_alias_for_values_clause ) elif any( seg.is_type( "select_statement", "set_expression", "with_compound_statement" ) for seg in segment.iter_segments(expanding=("bracketed",)) ): # The FROM expression is a derived table, i.e. a nested # SELECT. In this case, the alias is required in every # dialect we checked (MySQL, Postgres, T-SQL). # https://pganalyze.com/docs/log-insights/app-errors/U115 return True else: # None of the special cases above applies, so the alias is # not required. return False # This should never happen. Return False just to be safe. return False # pragma: no cover @classmethod def _analyze_table_aliases(cls, query: AL05Query, dialect: Dialect) -> None: # Get table aliases defined in query. for selectable in query.selectables: select_info = selectable.select_info if select_info: # Record the aliases. query.aliases += select_info.table_aliases # Look at each table reference; if it's an alias reference, # resolve the alias: could be an alias defined in "query" # itself or an "ancestor" query. for r in select_info.reference_buffer: for tr in r.extract_possible_references( level=r.ObjectReferenceLevel.TABLE ): # This function walks up the query's parent stack if necessary. cls._resolve_and_mark_reference(query, tr.part) # Visit children. for child in query.children: cls._analyze_table_aliases(cast(AL05Query, child), dialect) @classmethod def _resolve_and_mark_reference(cls, query: AL05Query, ref: str) -> None: # Does this query define the referenced alias? if any(ref == a.ref_str for a in query.aliases): # Yes. Record the reference. query.tbl_refs.add(ref) elif query.parent: # No. Recursively check the query's parent hierarchy. cls._resolve_and_mark_reference(cast(AL05Query, query.parent), ref) @classmethod def _report_unused_alias(cls, alias: AliasInfo) -> LintResult: fixes = [LintFix.delete(alias.alias_expression)] # type: ignore # Walk back to remove indents/whitespaces to_delete = ( Segments(*alias.from_expression_element.segments) .reversed() .select( start_seg=alias.alias_expression, # Stop once we reach an other, "regular" segment. loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()), ) ) fixes += [LintFix.delete(seg) for seg in to_delete] return LintResult( anchor=alias.segment, description="Alias {!r} is never used in SELECT statement.".format( alias.ref_str ), fixes=fixes, ) sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/AL06.py000066400000000000000000000100431451700765000217140ustar00rootroot00000000000000"""Implementation of Rule AL06.""" from typing import List, Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext class Rule_AL06(BaseRule): """Enforce table alias lengths in from clauses and join conditions. **Anti-pattern** In this example, alias ``o`` is used for the orders table. .. code-block:: sql SELECT SUM(o.amount) as order_amount, FROM orders as o **Best practice** Avoid aliases. Avoid short aliases when aliases are necessary. See also: :class:`Rule_AL07`. .. code-block:: sql SELECT SUM(orders.amount) as order_amount, FROM orders SELECT replacement_orders.amount, previous_orders.amount FROM orders AS replacement_orders JOIN orders AS previous_orders ON replacement_orders.id = previous_orders.replacement_id """ name = "aliasing.length" aliases = ("L066",) groups = ("all", "core", "aliasing") config_keywords = ["min_alias_length", "max_alias_length"] crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) def _eval(self, context: RuleContext) -> Optional[List[LintResult]]: """Identify aliases in from clause and join conditions. Find base table, table expressions in join, and other expressions in select clause and decide if it's needed to report them. """ self.min_alias_length: Optional[int] self.max_alias_length: Optional[int] assert context.segment.is_type("select_statement") children = FunctionalContext(context).segment.children() from_expression_elements = children.recursive_crawl("from_expression_element") return self._lint_aliases(from_expression_elements) or None def _lint_aliases(self, from_expression_elements) -> Optional[List[LintResult]]: """Lint all table aliases.""" # A buffer to keep any violations. violation_buff = [] # For each table, check whether it is aliased, and if so check the # lengths. for from_expression_element in from_expression_elements: table_expression = from_expression_element.get_child("table_expression") table_ref = ( table_expression.get_child("object_reference") if table_expression else None ) # If the from_expression_element has no object_reference - skip it # An example case is a lateral flatten, where we have a function segment # instead of a table_reference segment. if not table_ref: continue # If there's no alias expression - skip it alias_exp_ref = from_expression_element.get_child("alias_expression") if alias_exp_ref is None: continue alias_identifier_ref = alias_exp_ref.get_child("identifier") if self.min_alias_length is not None: if len(alias_identifier_ref.raw) < self.min_alias_length: violation_buff.append( LintResult( anchor=alias_identifier_ref, description=( "Aliases should be at least {} character(s) long." ).format(self.min_alias_length), ) ) if self.max_alias_length is not None: if len(alias_identifier_ref.raw) > self.max_alias_length: violation_buff.append( LintResult( anchor=alias_identifier_ref, description=( "Aliases should be no more than {} character(s) long." ).format(self.max_alias_length), ) ) return violation_buff or None sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/AL07.py000066400000000000000000000254201451700765000217220ustar00rootroot00000000000000"""Implementation of Rule AL07.""" from collections import Counter, defaultdict from typing import Generator, List, NamedTuple, Optional from sqlfluff.core.parser import BaseSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import IdentifierSegment from sqlfluff.utils.functional import FunctionalContext, sp class TableAliasInfo(NamedTuple): """Structure yielded by_filter_table_expressions().""" table_ref: BaseSegment whitespace_ref: BaseSegment alias_exp_ref: BaseSegment alias_identifier_ref: BaseSegment class Rule_AL07(BaseRule): """Avoid table aliases in from clauses and join conditions. .. note:: This rule was taken from the `dbt Style Guide `_ which notes that: Avoid table aliases in join conditions (especially initialisms) - it's harder to understand what the table called "c" is compared to "customers". This rule is controversial and for many larger databases avoiding alias is neither realistic nor desirable. In particular for BigQuery due to the complexity of backtick requirements and determining whether a name refers to a project or dataset so automated fixes can potentially break working SQL code. For most users :class:`Rule_AL06` is likely a more appropriate linting rule to drive a sensible behaviour around aliasing. The stricter treatment of aliases in this rule may be useful for more focused projects, or temporarily as a refactoring tool because the :code:`fix` routine of the rule can remove aliases. This rule is disabled by default for all dialects it can be enabled with the ``force_enable = True`` flag. **Anti-pattern** In this example, alias ``o`` is used for the orders table, and ``c`` is used for ``customers`` table. .. code-block:: sql SELECT COUNT(o.customer_id) as order_amount, c.name FROM orders as o JOIN customers as c on o.id = c.user_id **Best practice** Avoid aliases. .. code-block:: sql SELECT COUNT(orders.customer_id) as order_amount, customers.name FROM orders JOIN customers on orders.id = customers.user_id -- Self-join will not raise issue SELECT table1.a, table_alias.b, FROM table1 LEFT JOIN table1 AS table_alias ON table1.foreign_key = table_alias.foreign_key """ name = "aliasing.forbid" aliases = ("L031",) groups = ("all", "aliasing") config_keywords = ["force_enable"] crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[List[LintResult]]: """Identify aliases in from clause and join conditions. Find base table, table expressions in join, and other expressions in select clause and decide if it's needed to report them. """ # Config type hints self.force_enable: bool # Issue 2810: BigQuery has some tricky expectations (apparently not # documented, but subject to change, e.g.: # https://www.reddit.com/r/bigquery/comments/fgk31y/new_in_bigquery_no_more_backticks_around_table/) # about whether backticks are required (and whether the query is valid # or not, even with them), depending on whether the GCP project name is # present, or just the dataset name. Since SQLFluff doesn't have access # to BigQuery when it is looking at the query, it would be complex for # this rule to do the right thing. For now, the rule simply disables # itself. if not self.force_enable: return None assert context.segment.is_type("select_statement") children = FunctionalContext(context).segment.children() from_clause_segment = children.select(sp.is_type("from_clause")).first() base_table = ( from_clause_segment.children(sp.is_type("from_expression")) .first() .children(sp.is_type("from_expression_element")) .first() .children(sp.is_type("table_expression")) .first() .children(sp.is_type("object_reference")) .first() ) if not base_table: return None # A buffer for all table expressions in join conditions from_expression_elements = [] column_reference_segments = [] after_from_clause = children.select(start_seg=from_clause_segment[0]) for clause in from_clause_segment + after_from_clause: for from_expression_element in clause.recursive_crawl( "from_expression_element" ): from_expression_elements.append(from_expression_element) for column_reference in clause.recursive_crawl("column_reference"): column_reference_segments.append(column_reference) return ( self._lint_aliases_in_join( base_table[0] if base_table else None, from_expression_elements, column_reference_segments, context.segment, ) or None ) @classmethod def _filter_table_expressions( cls, base_table, from_expression_elements ) -> Generator[TableAliasInfo, None, None]: for from_expression in from_expression_elements: table_expression = from_expression.get_child("table_expression") if not table_expression: continue # pragma: no cover table_ref = table_expression.get_child("object_reference") # If the from_expression_element has no object_references - skip it # An example case is a lateral flatten, where we have a function segment # instead of a table_reference segment. if not table_ref: continue # If this is self-join - skip it if ( base_table and base_table.raw == table_ref.raw and base_table != table_ref ): continue whitespace_ref = from_expression.get_child("whitespace") # If there's no alias expression - skip it alias_exp_ref = from_expression.get_child("alias_expression") if alias_exp_ref is None: continue alias_identifier_ref = alias_exp_ref.get_child("identifier") yield TableAliasInfo( table_ref, whitespace_ref, alias_exp_ref, alias_identifier_ref ) def _lint_aliases_in_join( self, base_table, from_expression_elements, column_reference_segments, segment ) -> Optional[List[LintResult]]: """Lint and fix all aliases in joins - except for self-joins.""" # A buffer to keep any violations. violation_buff = [] to_check = list( self._filter_table_expressions(base_table, from_expression_elements) ) # How many times does each table appear in the FROM clause? table_counts = Counter(ai.table_ref.raw for ai in to_check) # What is the set of aliases used for each table? (We are mainly # interested in the NUMBER of different aliases used.) table_aliases = defaultdict(set) for ai in to_check: if ai and ai.table_ref and ai.alias_identifier_ref: table_aliases[ai.table_ref.raw].add(ai.alias_identifier_ref.raw) # For each aliased table, check whether to keep or remove it. for alias_info in to_check: # If the same table appears more than once in the FROM clause with # different alias names, do not consider removing its aliases. # The aliases may have been introduced simply to make each # occurrence of the table independent within the query. if ( table_counts[alias_info.table_ref.raw] > 1 and len(table_aliases[alias_info.table_ref.raw]) > 1 ): continue select_clause = segment.get_child("select_clause") ids_refs = [] # Find all references to alias in select clause if alias_info.alias_identifier_ref: alias_name = alias_info.alias_identifier_ref.raw for alias_with_column in select_clause.recursive_crawl( "object_reference" ): used_alias_ref = alias_with_column.get_child("identifier") if used_alias_ref and used_alias_ref.raw == alias_name: ids_refs.append(used_alias_ref) # Find all references to alias in column references for exp_ref in column_reference_segments: used_alias_ref = exp_ref.get_child("identifier") # exp_ref.get_child('dot') ensures that the column reference includes a # table reference if ( used_alias_ref and used_alias_ref.raw == alias_name and exp_ref.get_child("dot") ): ids_refs.append(used_alias_ref) # Fixes for deleting ` as sth` and for editing references to aliased tables # Note unparsable errors have cause the delete to fail (see #2484) # so check there is a d before doing deletes. fixes: List[LintFix] = [] fixes += [ LintFix.delete(d) for d in [alias_info.alias_exp_ref, alias_info.whitespace_ref] if d ] for alias in [alias_info.alias_identifier_ref, *ids_refs]: if alias: identifier_parts = alias_info.table_ref.raw.split(".") edits: List[BaseSegment] = [] for part in identifier_parts: if edits: edits.append(SymbolSegment(".", type="dot")) edits.append(IdentifierSegment(part, type="naked_identifier")) fixes.append( LintFix.replace( alias, edits, source=[alias_info.table_ref], ) ) violation_buff.append( LintResult( anchor=alias_info.alias_identifier_ref, description="Avoid aliases in from clauses and join conditions.", fixes=fixes, ) ) return violation_buff or None sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/AL08.py000066400000000000000000000105701451700765000217230ustar00rootroot00000000000000"""Implementation of Rule AL08.""" from typing import Dict, Optional, Tuple from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_AL08(BaseRule): """Column aliases should be unique within each clause. Reusing column aliases is very likely a coding error. Note that while in many dialects, quoting an identifier makes it case-sensitive this rule always compares in a case-insensitive way. This is because columns with the same name, but different case, are still confusing and potentially ambiguous to other readers. In situations where it is *necessary* to have columns with the same name (whether they differ in case or not) we recommend disabling this rule for either just the line, or the whole file. **Anti-pattern** In this example, the alias ``foo`` is reused for two different columns: .. code-block:: sql SELECT a as foo, b as foo FROM tbl; -- This can also happen when referencing the same column -- column twice, or aliasing an expression to the same -- name as a column: SELECT foo, foo, a as foo FROM tbl; **Best practice** Make all columns have a unique alias. .. code-block:: sql SELECT a as foo, b as bar FROM tbl; -- Avoid also using the same column twice unless aliased: SELECT foo as foo1, foo as foo2, a as foo3 FROM tbl; """ name = "aliasing.unique.column" aliases = () groups: Tuple[str, ...] = ("all", "core", "aliasing", "aliasing.unique") crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) def _eval(self, context: RuleContext) -> EvalResultType: """Walk through select clauses, looking for matching identifiers.""" assert context.segment.is_type("select_clause") used_aliases: Dict[str, BaseSegment] = {} violations = [] # Work through each of the elements for clause_element in context.segment.get_children("select_clause_element"): # Is there an alias expression? alias_expression = clause_element.get_child("alias_expression") column_alias: Optional[BaseSegment] = None if alias_expression: # Get the alias (it will be the next code element after AS) seg: Optional[BaseSegment] = None for seg in alias_expression.segments: if not seg or not seg.is_code or seg.raw_upper == "AS": continue break assert seg column_alias = seg # No alias, the only other thing we'll track are column references. else: column_reference = clause_element.get_child("column_reference") if column_reference: # We don't want the whole reference, just the last section. # If it is qualified, take the last bit. Otherwise, we still # take the last bit but it shouldn't make a difference. column_alias = column_reference.segments[-1] # If we don't have an alias to work with, just skip this element if not column_alias: continue # NOTE: Always case insensitive, see docstring for why. _key = column_alias.raw_upper # Strip any quote tokens _key = _key.strip("\"'`") # Otherwise check whether it's been used before if _key in used_aliases: # It has. previous = used_aliases[_key] assert previous.pos_marker violations.append( LintResult( anchor=column_alias, description=( "Reuse of column alias " f"{column_alias.raw!r} from line " f"{previous.pos_marker.line_no}." ), ) ) else: # It's not, save it to check against others. used_aliases[_key] = clause_element return violations sqlfluff-2.3.5/src/sqlfluff/rules/aliasing/__init__.py000066400000000000000000000016561451700765000230230ustar00rootroot00000000000000"""The aliasing plugin bundle.""" from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.aliasing.AL01 import Rule_AL01 from sqlfluff.rules.aliasing.AL02 import Rule_AL02 from sqlfluff.rules.aliasing.AL03 import Rule_AL03 from sqlfluff.rules.aliasing.AL04 import Rule_AL04 from sqlfluff.rules.aliasing.AL05 import Rule_AL05 from sqlfluff.rules.aliasing.AL06 import Rule_AL06 from sqlfluff.rules.aliasing.AL07 import Rule_AL07 from sqlfluff.rules.aliasing.AL08 import Rule_AL08 return [ Rule_AL01, Rule_AL02, Rule_AL03, Rule_AL04, Rule_AL05, Rule_AL06, Rule_AL07, Rule_AL08, ] sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/000077500000000000000000000000001451700765000211065ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/AM01.py000066400000000000000000000036241451700765000221230ustar00rootroot00000000000000"""Implementation of Rule AM01.""" from typing import Optional, Tuple from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_AM01(BaseRule): """Ambiguous use of ``DISTINCT`` in a ``SELECT`` statement with ``GROUP BY``. When using ``GROUP BY`` a `DISTINCT`` clause should not be necessary as every non-distinct ``SELECT`` clause must be included in the ``GROUP BY`` clause. **Anti-pattern** ``DISTINCT`` and ``GROUP BY`` are conflicting. .. code-block:: sql SELECT DISTINCT a FROM foo GROUP BY a **Best practice** Remove ``DISTINCT`` or ``GROUP BY``. In our case, removing ``GROUP BY`` is better. .. code-block:: sql SELECT DISTINCT a FROM foo """ name = "ambiguous.distinct" aliases = ("L021",) groups: Tuple[str, ...] = ("all", "core", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"select_statement"}) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Ambiguous use of DISTINCT in select statement with GROUP BY.""" segment = FunctionalContext(context).segment # We know it's a select_statement from the seeker crawler assert segment.all(sp.is_type("select_statement")) # Do we have a group by clause if segment.children(sp.is_type("groupby_clause")): # Do we have the "DISTINCT" keyword in the select clause distinct = ( segment.children(sp.is_type("select_clause")) .children(sp.is_type("select_clause_modifier")) .children(sp.is_type("keyword")) .select(sp.is_keyword("distinct")) ) if distinct: return LintResult(anchor=distinct[0]) return None sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/AM02.py000066400000000000000000000062621451700765000221250ustar00rootroot00000000000000"""Implementation of Rule AM02.""" from typing import Tuple from sqlfluff.core.parser import ( KeywordSegment, WhitespaceSegment, ) from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_AM02(BaseRule): """``UNION [DISTINCT|ALL]`` is preferred over just ``UNION``. .. note:: This rule is only enabled for dialects that support ``UNION`` and ``UNION DISTINCT`` (``ansi``, ``hive``, ``mysql``, and ``redshift``). **Anti-pattern** In this example, ``UNION DISTINCT`` should be preferred over ``UNION``, because explicit is better than implicit. .. code-block:: sql SELECT a, b FROM table_1 UNION SELECT a, b FROM table_2 **Best practice** Specify ``DISTINCT`` or ``ALL`` after ``UNION`` (note that ``DISTINCT`` is the default behavior). .. code-block:: sql SELECT a, b FROM table_1 UNION DISTINCT SELECT a, b FROM table_2 """ name = "ambiguous.union" aliases = ("L033",) groups: Tuple[str, ...] = ("all", "core", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"set_operator"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> LintResult: """Look for UNION keyword not immediately followed by DISTINCT or ALL. Note that UNION DISTINCT is valid, rule only applies to bare UNION. The function does this by looking for a segment of type set_operator which has a UNION but no DISTINCT or ALL. Note only some dialects have concept of UNION DISTINCT, so rule is only applied to dialects that are known to support this syntax. """ if context.dialect.name not in [ "ansi", "hive", "mysql", "redshift", ]: return LintResult() assert context.segment.is_type("set_operator") if "union" in context.segment.raw and not ( "ALL" in context.segment.raw.upper() or "DISTINCT" in context.segment.raw.upper() ): return LintResult( anchor=context.segment, fixes=[ LintFix.replace( context.segment.segments[0], [ KeywordSegment("union"), WhitespaceSegment(), KeywordSegment("distinct"), ], ) ], ) elif "UNION" in context.segment.raw.upper() and not ( "ALL" in context.segment.raw.upper() or "DISTINCT" in context.segment.raw.upper() ): return LintResult( anchor=context.segment, fixes=[ LintFix.replace( context.segment.segments[0], [ KeywordSegment("UNION"), WhitespaceSegment(), KeywordSegment("DISTINCT"), ], ) ], ) return LintResult() sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/AM03.py000066400000000000000000000074111451700765000221230ustar00rootroot00000000000000"""Implementation of Rule AM03.""" from typing import List, NamedTuple, Optional, Tuple from sqlfluff.core.parser import BaseSegment, KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class OrderByColumnInfo(NamedTuple): """For AM03, segment that ends an ORDER BY column and any order provided.""" column_reference: BaseSegment order: Optional[str] # One of 'ASC'/'DESC'/None class Rule_AM03(BaseRule): """Ambiguous ordering directions for columns in order by clause. **Anti-pattern** .. code-block:: sql SELECT a, b FROM foo ORDER BY a, b DESC **Best practice** If any columns in the ``ORDER BY`` clause specify ``ASC`` or ``DESC``, they should all do so. .. code-block:: sql SELECT a, b FROM foo ORDER BY a ASC, b DESC """ name = "ambiguous.order_by" aliases = ("L037",) groups: Tuple[str, ...] = ("all", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"orderby_clause"}) is_fix_compatible = True @staticmethod def _get_orderby_info(segment: BaseSegment) -> List[OrderByColumnInfo]: assert segment.is_type("orderby_clause") result = [] column_reference = None ordering_reference = None for child_segment in segment.segments: if child_segment.is_type("column_reference"): column_reference = child_segment elif child_segment.is_type("keyword") and child_segment.raw_upper in ( "ASC", "DESC", ): ordering_reference = child_segment.raw_upper if column_reference and child_segment.raw == ",": result.append( OrderByColumnInfo( column_reference=column_reference, order=ordering_reference ) ) # Reset findings column_reference = None ordering_reference = None # Special handling for last column if column_reference: result.append( OrderByColumnInfo( column_reference=column_reference, order=ordering_reference ) ) return result def _eval(self, context: RuleContext) -> Optional[List[LintResult]]: """Ambiguous ordering directions for columns in order by clause. This rule checks if some ORDER BY columns explicitly specify ASC or DESC and some don't. """ # We only trigger on orderby_clause lint_fixes = [] orderby_spec = self._get_orderby_info(context.segment) order_types = {o.order for o in orderby_spec} # If ALL columns or NO columns explicitly specify ASC/DESC, all is # well. if None not in order_types or order_types == {None}: return None # There's a mix of explicit and default sort order. Make everything # explicit. for col_info in orderby_spec: if not col_info.order: # Since ASC is default in SQL, add in ASC for fix lint_fixes.append( LintFix.create_after( col_info.column_reference, [WhitespaceSegment(), KeywordSegment("ASC")], ) ) return [ LintResult( anchor=context.segment, fixes=lint_fixes, description=( "Ambiguous order by clause. Order by clauses should specify " "order direction for ALL columns or NO columns." ), ) ] sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/AM04.py000066400000000000000000000140371451700765000221260ustar00rootroot00000000000000"""Implementation of Rule AM04.""" from typing import Optional, Tuple from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.analysis.query import Query _START_TYPES = ["select_statement", "set_expression", "with_compound_statement"] class RuleFailure(Exception): """Exception class for reporting lint failure inside deeply nested code.""" def __init__(self, anchor: BaseSegment): self.anchor: BaseSegment = anchor class Rule_AM04(BaseRule): """Query produces an unknown number of result columns. **Anti-pattern** Querying all columns using ``*`` produces a query result where the number or ordering of columns changes if the upstream table's schema changes. This should generally be avoided because it can cause slow performance, cause important schema changes to go undetected, or break production code. For example: * If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``, and ``c``, the actual columns returned will be wrong/different if columns are added to or deleted from the input table. * ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number of columns (and compatible types). * ``JOIN`` queries may break due to new column name conflicts, e.g. the query references a column ``c`` which initially existed in only one input table but a column of the same name is added to another table. * ``CREATE TABLE (<>) AS SELECT *`` .. code-block:: sql WITH cte AS ( SELECT * FROM foo ) SELECT * FROM cte UNION SELECT a, b FROM t **Best practice** Somewhere along the "path" to the source data, specify columns explicitly. .. code-block:: sql WITH cte AS ( SELECT * FROM foo ) SELECT a, b FROM cte UNION SELECT a, b FROM t """ name = "ambiguous.column_count" aliases = ("L044",) groups: Tuple[str, ...] = ("all", "ambiguous") # Only evaluate the outermost query. crawl_behaviour = SegmentSeekerCrawler(set(_START_TYPES), allow_recurse=False) def _handle_alias(self, selectable, alias_info, query) -> None: select_info_target = next( query.crawl_sources(alias_info.from_expression_element, True) ) if isinstance(select_info_target, str): # It's an alias to an external table whose # number of columns could vary without our # knowledge. Thus, warn. self.logger.debug( f"Query target {select_info_target} is external. Generating warning." ) raise RuleFailure(selectable.selectable) else: # Handle nested SELECT. self._analyze_result_columns(select_info_target) def _analyze_result_columns(self, query: Query) -> None: """Given info on a list of SELECTs, determine whether to warn.""" # Recursively walk from the given query (select_info_list) to any # wildcard columns in the select targets. If every wildcard evdentually # resolves to a query without wildcards, all is well. Otherwise, warn. if not query.selectables: return None # pragma: no cover for selectable in query.selectables: self.logger.debug(f"Analyzing query: {selectable.selectable.raw}") for wildcard in selectable.get_wildcard_info(): if wildcard.tables: for wildcard_table in wildcard.tables: self.logger.debug( f"Wildcard: {wildcard.segment.raw} has target " "{wildcard_table}" ) # Is it an alias? alias_info = selectable.find_alias(wildcard_table) if alias_info: # Found the alias matching the wildcard. Recurse, # analyzing the query associated with that alias. self._handle_alias(selectable, alias_info, query) else: # Not an alias. Is it a CTE? cte = query.lookup_cte(wildcard_table) if cte: # Wildcard refers to a CTE. Analyze it. self._analyze_result_columns(cte) else: # Not CTE, not table alias. Presumably an # external table. Warn. self.logger.debug( f"Query target {wildcard_table} is external. " "Generating warning." ) raise RuleFailure(selectable.selectable) else: # No table was specified with the wildcard. Assume we're # querying from a nested select in FROM. for o in query.crawl_sources(query.selectables[0].selectable, True): if isinstance(o, Query): self._analyze_result_columns(o) return None self.logger.debug( f'Query target "{query.selectables[0].selectable.raw}" has no ' "targets. Generating warning." ) raise RuleFailure(query.selectables[0].selectable) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Outermost query should produce known number of columns.""" query: Query = Query.from_segment(context.segment, context.dialect) try: # Begin analysis at the outer query. self._analyze_result_columns(query) return None except RuleFailure as e: return LintResult(anchor=e.anchor) sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/AM05.py000066400000000000000000000061321451700765000221240ustar00rootroot00000000000000"""Implementation of Rule AM05.""" from typing import Optional, Tuple from sqlfluff.core.parser import KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_AM05(BaseRule): """Join clauses should be fully qualified. By default this rule is configured to enforce fully qualified ``INNER JOIN`` clauses, but not ``[LEFT/RIGHT/FULL] OUTER JOIN``. If you prefer a stricter lint then this is configurable. **Anti-pattern** A join is used without specifying the **kind** of join. .. code-block:: sql :force: SELECT foo FROM bar JOIN baz; **Best practice** Use ``INNER JOIN`` rather than ``JOIN``. .. code-block:: sql :force: SELECT foo FROM bar INNER JOIN baz; """ name = "ambiguous.join" aliases = ("L051",) groups: Tuple[str, ...] = ("all", "ambiguous") config_keywords = ["fully_qualify_join_types"] crawl_behaviour = SegmentSeekerCrawler({"join_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Fully qualify JOINs.""" # Config type hints self.fully_qualify_join_types: str # We are only interested in JOIN clauses. assert context.segment.is_type("join_clause") join_clause_keywords = [ segment for segment in context.segment.segments if segment.type == "keyword" ] # Identify LEFT/RIGHT/OUTER JOIN and if the next keyword is JOIN. if ( self.fully_qualify_join_types in ["outer", "both"] and join_clause_keywords[0].raw_upper in ["RIGHT", "LEFT", "FULL"] and join_clause_keywords[1].raw_upper == "JOIN" ): # Define basic-level OUTER capitalization based on JOIN outer_kw = ("outer", "OUTER")[join_clause_keywords[1].raw == "JOIN"] # Insert OUTER after LEFT/RIGHT/FULL return LintResult( context.segment.segments[0], fixes=[ LintFix.create_after( context.segment.segments[0], [WhitespaceSegment(), KeywordSegment(outer_kw)], ) ], ) # Identify lone JOIN by looking at first child segment. if ( self.fully_qualify_join_types in ["inner", "both"] and join_clause_keywords[0].raw_upper == "JOIN" ): # Define basic-level INNER capitalization based on JOIN inner_kw = ("inner", "INNER")[join_clause_keywords[0].raw == "JOIN"] # Replace lone JOIN with INNER JOIN. return LintResult( context.segment.segments[0], fixes=[ LintFix.create_before( context.segment.segments[0], [KeywordSegment(inner_kw), WhitespaceSegment()], ) ], ) return None sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/AM06.py000066400000000000000000000125711451700765000221310ustar00rootroot00000000000000"""Implementation of Rule AM06.""" from typing import List, Optional, Tuple from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_AM06(BaseRule): """Inconsistent column references in ``GROUP BY/ORDER BY`` clauses. .. note:: ``ORDER BY`` clauses from ``WINDOW`` clauses are ignored by this rule. **Anti-pattern** A mix of implicit and explicit column references are used in a ``GROUP BY`` clause. .. code-block:: sql :force: SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, 2; -- The same also applies to column -- references in ORDER BY clauses. SELECT foo, bar FROM fake_table ORDER BY 1, bar; **Best practice** Reference all ``GROUP BY``/``ORDER BY`` columns either by name or by position. .. code-block:: sql :force: -- GROUP BY: Explicit SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar; -- ORDER BY: Explicit SELECT foo, bar FROM fake_table ORDER BY foo, bar; -- GROUP BY: Implicit SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2; -- ORDER BY: Implicit SELECT foo, bar FROM fake_table ORDER BY 1, 2; """ name = "ambiguous.column_references" aliases = ("L054",) groups: Tuple[str, ...] = ("all", "core", "ambiguous") config_keywords = ["group_by_and_order_by_style"] crawl_behaviour = SegmentSeekerCrawler( {"groupby_clause", "orderby_clause", "grouping_expression_list"} ) _ignore_types: List[str] = [ "withingroup_clause", "window_specification", "aggregate_order_by", ] def _eval(self, context: RuleContext) -> Optional[LintResult]: """Inconsistent column references in GROUP BY/ORDER BY clauses.""" # Config type hints self.group_by_and_order_by_style: str # We only care about GROUP BY/ORDER BY clauses. assert context.segment.is_type( "groupby_clause", "orderby_clause", "grouping_expression_list" ) # Ignore Windowing clauses if FunctionalContext(context).parent_stack.any(sp.is_type(*self._ignore_types)): return LintResult(memory=context.memory) # Look at child segments and map column references to either the implicit or # explicit category. # N.B. segment names are used as the numeric literal type is 'raw', so best to # be specific with the name. column_reference_category_map = { "column_reference": "explicit", "expression": "explicit", "numeric_literal": "implicit", } column_reference_category_set = { column_reference_category_map[segment.get_type()] for segment in context.segment.segments if segment.is_type(*column_reference_category_map.keys()) } # If there are no column references then just return if not column_reference_category_set: # pragma: no cover return LintResult(memory=context.memory) if self.group_by_and_order_by_style == "consistent": # If consistent naming then raise lint error if either: if len(column_reference_category_set) > 1: # 1. Both implicit and explicit column references are found in the same # clause. return LintResult( anchor=context.segment, memory=context.memory, ) else: # 2. A clause is found to contain column name references that # contradict the precedent set in earlier clauses. current_group_by_order_by_convention = ( column_reference_category_set.pop() ) prior_group_by_order_by_convention = context.memory.get( "prior_group_by_order_by_convention" ) if prior_group_by_order_by_convention and ( prior_group_by_order_by_convention != current_group_by_order_by_convention ): return LintResult( anchor=context.segment, memory=context.memory, ) context.memory[ "prior_group_by_order_by_convention" ] = current_group_by_order_by_convention else: # If explicit or implicit naming then raise lint error # if the opposite reference type is detected. if any( category != self.group_by_and_order_by_style for category in column_reference_category_set ): return LintResult( anchor=context.segment, memory=context.memory, ) # Return memory for later clauses. return LintResult(memory=context.memory) sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/AM07.py000066400000000000000000000206751451700765000221360ustar00rootroot00000000000000"""Implementation of Rule AM07.""" from typing import Optional, Set, Tuple from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.analysis.query import ( Query, Selectable, WildcardInfo, ) class Rule_AM07(BaseRule): """Queries within set query produce different numbers of columns. **Anti-pattern** When writing set expressions, all queries must return the same number of columns. .. code-block:: sql WITH cte AS ( SELECT a, b FROM foo ) SELECT * FROM cte UNION SELECT c, d, e FROM t **Best practice** Always specify columns when writing set queries and ensure that they all seleect same number of columns .. code-block:: sql WITH cte AS ( SELECT a, b FROM foo ) SELECT a, b FROM cte UNION SELECT c, d FROM t """ name = "ambiguous.set_columns" aliases = ("L068",) groups: Tuple[str, ...] = ("all", "ambiguous") crawl_behaviour = SegmentSeekerCrawler({"set_expression"}, provide_raw_stack=True) def __resolve_wild_query( self, query: Query, ) -> Tuple[int, bool]: """Attempt to resolve a full query which may contain wildcards. NOTE: This requires a ``Query`` as input rather than just a ``Selectable`` and will delegate to ``__resolve_selectable`` once any Selectables have been identified. This method is *not* called on the initial set expression as that is evaluated as a series of Selectables. This method is only called on any subqueries (which may themselves be SELECT, WITH or set expressions) found during the resolution of any wildcards. """ self.logger.debug("Resolving query of type %s", query.query_type) for s in query.selectables: self.logger.debug(" ...with selectable %r", s.selectable.raw) # if one of the source queries for a query within the set is a # set expression, just use the first query. If that first query isn't # reflective of the others, that will be caught when that segment # is processed. We'll know if we're in a set based on whether there # is more than one selectable. i.e. Just take the first selectable. return self.__resolve_selectable(query.selectables[0], query) def __resolve_selectable_wildcard( self, wildcard: WildcardInfo, selectable: Selectable, root_query: Query ) -> Tuple[int, bool]: """Attempt to resolve a single wildcard (*) within a Selectable. NOTE: This means resolving the number of columns implied by a single *. This method would be run multiple times if there are multiple wildcards in a single selectable. """ resolved = True # If there is no table specified, it is likely a subquery. # Handle that first. if not wildcard.tables: # Crawl the Query looking for the subquery, probably in the FROM. for o in root_query.crawl_sources(selectable.selectable): if isinstance(o, Query): return self.__resolve_wild_query(o) # We should find one. This is not an expected path to be in. return 0, False # pragma: no cover # There might be multiple tables referenced in some wildcard cases. num_cols = 0 for wildcard_table in wildcard.tables: cte_name = wildcard_table # Get the AliasInfo for the table referenced in the wildcard # expression. alias_info = selectable.find_alias(wildcard_table) # attempt to resolve alias or table name to a cte if alias_info: # Crawl inside the FROM expression looking for something to # resolve to. select_info_target = next( root_query.crawl_sources(alias_info.from_expression_element) ) if isinstance(select_info_target, str): cte_name = select_info_target else: _cols, _resolved = self.__resolve_wild_query(select_info_target) num_cols += _cols resolved = resolved and _resolved continue cte = root_query.lookup_cte(cte_name) if cte: _cols, _resolved = self.__resolve_wild_query(cte) num_cols += _cols resolved = resolved and _resolved else: # Unable to resolve resolved = False return num_cols, resolved def __resolve_selectable( self, selectable: Selectable, root_query: Query ) -> Tuple[int, bool]: """Resolve the number of columns in a single Selectable. The selectable may or may not have wildcard (*) expressions. If it does, we attempt to resolve them. """ self.logger.debug("Resolving selectable: %r", selectable.selectable.raw) assert selectable.select_info wildcard_info = selectable.get_wildcard_info() # Start with the number of non-wild columns. num_cols = len(selectable.select_info.select_targets) - len(wildcard_info) # If there's no wildcard, just count the columns and move on. if not wildcard_info: # if there is no wildcard in the query use the count of select targets self.logger.debug("Resolved N=%s: %r", num_cols, selectable.selectable.raw) return num_cols, True resolved = True # If the set query contains on or more wildcards, attempt to resolve it to a # list of select targets that can be counted. for wildcard in wildcard_info: _cols, _resolved = self.__resolve_selectable_wildcard( wildcard, selectable, root_query ) resolved = resolved and _resolved # Add on the number of columns which the wildcard resolves to. num_cols += _cols self.logger.debug( "%s N=%s: %r", "Resolved" if resolved else "Unresolved", num_cols, selectable.selectable.raw, ) return num_cols, resolved def _get_select_target_counts(self, query: Query) -> Tuple[Set[int], bool]: """Given a set expression, get the number of select targets in each query. We keep track of the number of columns in each selectable using a ``set``. Ideally at the end there is only one item in the set, showing that all selectables have the same size. Importantly we can't guarantee that we can always resolve any wildcards (*), so we also return a flag to indicate whether any present have been fully resolved. """ select_target_counts = set() resolved_wildcard = True for selectable in query.selectables: cnt, res = self.__resolve_selectable(selectable, query) if not res: resolved_wildcard = False select_target_counts.add(cnt) return select_target_counts, resolved_wildcard def _eval(self, context: RuleContext) -> Optional[LintResult]: """All queries in set expression should return the same number of columns.""" assert context.segment.is_type("set_expression") root = context.segment # Is the parent of the set expression a WITH expression? # NOTE: Backward slice to work outward. for parent in context.parent_stack[::-1]: if parent.is_type("with_compound_statement"): # If it is, work from there instead. root = parent break query: Query = Query.from_segment(root, dialect=context.dialect) set_segment_select_sizes, resolve_wildcard = self._get_select_target_counts( query ) self.logger.info( "Resolved select sizes (resolved wildcard: %s) : %s", resolve_wildcard, set_segment_select_sizes, ) # if queries had different select target counts # and all wildcards have been resolved; fail if len(set_segment_select_sizes) > 1 and resolve_wildcard: return LintResult(anchor=context.segment) return LintResult() sqlfluff-2.3.5/src/sqlfluff/rules/ambiguous/__init__.py000066400000000000000000000015501451700765000232200ustar00rootroot00000000000000"""The ambiguous plugin bundle. NOTE: Yes the title of this bundle is ...ambiguous. 😁 """ from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.ambiguous.AM01 import Rule_AM01 from sqlfluff.rules.ambiguous.AM02 import Rule_AM02 from sqlfluff.rules.ambiguous.AM03 import Rule_AM03 from sqlfluff.rules.ambiguous.AM04 import Rule_AM04 from sqlfluff.rules.ambiguous.AM05 import Rule_AM05 from sqlfluff.rules.ambiguous.AM06 import Rule_AM06 from sqlfluff.rules.ambiguous.AM07 import Rule_AM07 return [Rule_AM01, Rule_AM02, Rule_AM03, Rule_AM04, Rule_AM05, Rule_AM06, Rule_AM07] sqlfluff-2.3.5/src/sqlfluff/rules/capitalisation/000077500000000000000000000000001451700765000221175ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/capitalisation/CP01.py000066400000000000000000000263111451700765000231370ustar00rootroot00000000000000"""Implementation of Rule CP01.""" from typing import List, Optional, Tuple import regex from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.config_info import get_config_info from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler def is_capitalizable(character: str) -> bool: """Does the character have differing lower and upper-case versions?""" if character.lower() == character.upper(): return False return True class Rule_CP01(BaseRule): """Inconsistent capitalisation of keywords. **Anti-pattern** In this example, ``select`` is in lower-case whereas ``FROM`` is in upper-case. .. code-block:: sql select a FROM foo **Best practice** Make all keywords either in upper-case or in lower-case. .. code-block:: sql SELECT a FROM foo -- Also good select a from foo """ name = "capitalisation.keywords" aliases = ("L010",) groups: Tuple[str, ...] = ("all", "core", "capitalisation") is_fix_compatible = True lint_phase = "post" # Binary operators behave like keywords too. crawl_behaviour = SegmentSeekerCrawler({"keyword", "binary_operator", "date_part"}) # Skip literals (which are also keywords) as they have their own rule (CP04) _exclude_types: Tuple[str, ...] = ("literal",) _exclude_parent_types: Tuple[str, ...] = ( "data_type", "datetime_type_identifier", "primitive_type", ) config_keywords = ["capitalisation_policy", "ignore_words", "ignore_words_regex"] # Human readable target elem for description _description_elem = "Keywords" def _eval(self, context: RuleContext) -> Optional[List[LintResult]]: """Inconsistent capitalisation of keywords. We use the `memory` feature here to keep track of cases known to be INconsistent with what we've seen so far as well as the top choice for what the possible case is. """ # NOTE: Given the dialect structure we can assume the targets have a parent. parent: BaseSegment = context.parent_stack[-1] if context.segment.is_type(*self._exclude_types) or parent.is_type( *self._exclude_parent_types ): return [LintResult(memory=context.memory)] # Used by CP03 (that inherits from this rule) # If it's a qualified function_name (i.e with more than one part to # function_name). Then it is likely an existing user defined function (UDF) # which are case sensitive so ignore for this. if parent.get_type() == "function_name" and len(parent.segments) != 1: return [LintResult(memory=context.memory)] return [self._handle_segment(context.segment, context)] def _handle_segment(self, segment: BaseSegment, context: RuleContext) -> LintResult: # NOTE: this mutates the memory field. memory = context.memory self.logger.info("_handle_segment: %s, %s", segment, segment.get_type()) # Config type hints self.ignore_words_regex: str # Get the capitalisation policy configuration. try: cap_policy = self.cap_policy cap_policy_opts = self.cap_policy_opts ignore_words_list = self.ignore_words_list ignore_templated_areas = self.ignore_templated_areas except AttributeError: # First-time only, read the settings from configuration. This is # very slow. ( cap_policy, cap_policy_opts, ignore_words_list, ignore_templated_areas, ) = self._init_capitalisation_policy(context) # Skip if in ignore list if ignore_words_list and segment.raw.lower() in ignore_words_list: return LintResult(memory=memory) # Skip if matches ignore regex if self.ignore_words_regex and regex.search( self.ignore_words_regex, segment.raw ): return LintResult(memory=memory) # Skip if templated. If the user wants to ignore templated areas, we don't # even want to look at them to avoid affecting flagging non-template areas # that are inconsistent with the template areas. if segment.is_templated and ignore_templated_areas: return LintResult(memory=memory) # Skip if empty. if not segment.raw: return LintResult(memory=memory) refuted_cases = memory.get("refuted_cases", set()) # Which cases are definitely inconsistent with the segment? for character in segment.raw: if is_capitalizable(character): first_letter_is_lowercase = character != character.upper() break # If none of the characters are letters there will be a parsing # error, so not sure we need this statement first_letter_is_lowercase = False if first_letter_is_lowercase: refuted_cases.update(["upper", "capitalise", "pascal"]) if segment.raw != segment.raw.lower(): refuted_cases.update(["lower"]) else: refuted_cases.update(["lower"]) if segment.raw != segment.raw.upper(): refuted_cases.update(["upper"]) if segment.raw != segment.raw.capitalize(): refuted_cases.update(["capitalise"]) if not segment.raw.isalnum(): refuted_cases.update(["pascal"]) # Update the memory memory["refuted_cases"] = refuted_cases self.logger.debug( f"Refuted cases after segment '{segment.raw}': {refuted_cases}" ) # Skip if no inconsistencies, otherwise compute a concrete policy # to convert to. if cap_policy == "consistent": possible_cases = [c for c in cap_policy_opts if c not in refuted_cases] self.logger.debug( f"Possible cases after segment '{segment.raw}': {possible_cases}" ) if possible_cases: # Save the latest possible case and skip memory["latest_possible_case"] = possible_cases[0] self.logger.debug( f"Consistent capitalization, returning with memory: {memory}" ) return LintResult(memory=memory) else: concrete_policy = memory.get("latest_possible_case", "upper") self.logger.debug( f"Getting concrete policy '{concrete_policy}' from memory" ) else: if cap_policy not in refuted_cases: # Skip self.logger.debug( f"Consistent capitalization {cap_policy}, returning with " f"memory: {memory}" ) return LintResult(memory=memory) else: concrete_policy = cap_policy self.logger.debug( f"Setting concrete policy '{concrete_policy}' from cap_policy" ) # Set the fixed to same as initial in case any of below don't match fixed_raw = segment.raw # We need to change the segment to match the concrete policy if concrete_policy in ["upper", "lower", "capitalise"]: if concrete_policy == "upper": fixed_raw = fixed_raw.upper() elif concrete_policy == "lower": fixed_raw = fixed_raw.lower() elif concrete_policy == "capitalise": fixed_raw = fixed_raw.capitalize() elif concrete_policy == "pascal": # For Pascal we set the first letter in each "word" to uppercase # We do not lowercase other letters to allow for PascalCase style # words. This does mean we allow all UPPERCASE and also don't # correct Pascalcase to PascalCase, but there's only so much we can # do. We do correct underscore_words to Underscore_Words. fixed_raw = regex.sub( "([^a-zA-Z0-9]+|^)([a-zA-Z0-9])([a-zA-Z0-9]*)", lambda match: match.group(1) + match.group(2).upper() + match.group(3), segment.raw, ) if fixed_raw == segment.raw: # No need to fix self.logger.debug( f"Capitalisation of segment '{segment.raw}' already OK with " f"policy '{concrete_policy}', returning with memory {memory}" ) return LintResult(memory=memory) else: # build description based on the policy in use consistency = "consistently " if cap_policy == "consistent" else "" if concrete_policy in ["upper", "lower"]: policy = f"{concrete_policy} case." elif concrete_policy == "capitalise": policy = "capitalised." elif concrete_policy == "pascal": policy = "pascal case." # Return the fixed segment self.logger.debug( f"INCONSISTENT Capitalisation of segment '{segment.raw}', " f"fixing to '{fixed_raw}' and returning with memory {memory}" ) return LintResult( anchor=segment, fixes=[self._get_fix(segment, fixed_raw)], memory=memory, description=f"{self._description_elem} must be {consistency}{policy}", ) def _get_fix(self, segment: BaseSegment, fixed_raw: str) -> LintFix: """Given a segment found to have a fix, returns a LintFix for it. May be overridden by subclasses, which is useful when the parse tree structure varies from this simple base case. """ return LintFix.replace(segment, [segment.edit(fixed_raw)]) def _init_capitalisation_policy(self, context: RuleContext): """Called first time rule is evaluated to fetch & cache the policy.""" cap_policy_name = next( k for k in self.config_keywords if k.endswith("capitalisation_policy") ) self.cap_policy = getattr(self, cap_policy_name) self.cap_policy_opts = [ opt for opt in get_config_info()[cap_policy_name]["validation"] if opt != "consistent" ] # Use str() as CP04 uses bools which might otherwise be read as bool ignore_words_config = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] self.ignore_templated_areas = context.config.get("ignore_templated_areas") self.logger.debug( f"Selected '{cap_policy_name}': '{self.cap_policy}' from options " f"{self.cap_policy_opts}" ) cap_policy = self.cap_policy cap_policy_opts = self.cap_policy_opts ignore_words_list = self.ignore_words_list ignore_templated_areas = self.ignore_templated_areas return cap_policy, cap_policy_opts, ignore_words_list, ignore_templated_areas sqlfluff-2.3.5/src/sqlfluff/rules/capitalisation/CP02.py000066400000000000000000000041041451700765000231340ustar00rootroot00000000000000"""Implementation of Rule CP02.""" from typing import List, Optional from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 from sqlfluff.utils.identifers import identifiers_policy_applicable class Rule_CP02(Rule_CP01): """Inconsistent capitalisation of unquoted identifiers. **Anti-pattern** In this example, unquoted identifier ``a`` is in lower-case but ``B`` is in upper-case. .. code-block:: sql select a, B from foo **Best practice** Ensure all unquoted identifiers are either in upper-case or in lower-case. .. code-block:: sql select a, b from foo -- Also good select A, B from foo """ name = "capitalisation.identifiers" aliases = ("L014",) is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler( {"naked_identifier", "properties_naked_identifier"} ) config_keywords = [ "extended_capitalisation_policy", "unquoted_identifiers_policy", "ignore_words", "ignore_words_regex", ] _description_elem = "Unquoted identifiers" def _eval(self, context: RuleContext) -> Optional[List[LintResult]]: # Return None if identifier is case-sensitive property to enable Change # Data Feed # https://docs.delta.io/2.0.0/delta-change-data-feed.html#enable-change-data-feed if ( context.dialect.name in ["databricks", "sparksql"] and context.parent_stack and context.parent_stack[-1].type == "property_name_identifier" and context.segment.raw == "enableChangeDataFeed" ): return None if identifiers_policy_applicable( self.unquoted_identifiers_policy, context.parent_stack # type: ignore ): return super()._eval(context=context) else: return [LintResult(memory=context.memory)] sqlfluff-2.3.5/src/sqlfluff/rules/capitalisation/CP03.py000066400000000000000000000023671451700765000231460ustar00rootroot00000000000000"""Implementation of Rule CP03.""" from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.rules import LintFix from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 class Rule_CP03(Rule_CP01): """Inconsistent capitalisation of function names. **Anti-pattern** In this example, the two ``SUM`` functions don't have the same capitalisation. .. code-block:: sql SELECT sum(a) AS aa, SUM(b) AS bb FROM foo **Best practice** Make the case consistent. .. code-block:: sql SELECT sum(a) AS aa, sum(b) AS bb FROM foo """ name = "capitalisation.functions" aliases = ("L030",) is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler( {"function_name_identifier", "bare_function"} ) _exclude_types = () _exclude_parent_types = () config_keywords = [ "extended_capitalisation_policy", "ignore_words", "ignore_words_regex", ] _description_elem = "Function names" def _get_fix(self, segment: BaseSegment, fixed_raw: str) -> LintFix: return super()._get_fix(segment, fixed_raw) sqlfluff-2.3.5/src/sqlfluff/rules/capitalisation/CP04.py000066400000000000000000000022041451700765000231350ustar00rootroot00000000000000"""Implementation of Rule CP04.""" from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 class Rule_CP04(Rule_CP01): """Inconsistent capitalisation of boolean/null literal. **Anti-pattern** In this example, ``null`` and ``false`` are in lower-case whereas ``TRUE`` is in upper-case. .. code-block:: sql select a, null, TRUE, false from foo **Best practice** Ensure all literal ``null``/``true``/``false`` literals are consistently upper or lower case .. code-block:: sql select a, NULL, TRUE, FALSE from foo -- Also good select a, null, true, false from foo """ name = "capitalisation.literals" aliases = ("L040",) is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler({"null_literal", "boolean_literal"}) _exclude_types = () _exclude_parent_types = () _description_elem = "Boolean/null literals" sqlfluff-2.3.5/src/sqlfluff/rules/capitalisation/CP05.py000066400000000000000000000061221451700765000231410ustar00rootroot00000000000000"""Implementation of Rule CP05.""" from typing import List from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules.base import LintResult from sqlfluff.core.rules.context import RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 class Rule_CP05(Rule_CP01): """Inconsistent capitalisation of datatypes. **Anti-pattern** In this example, ``int`` and ``unsigned`` are in lower-case whereas ``VARCHAR`` is in upper-case. .. code-block:: sql CREATE TABLE t ( a int unsigned, b VARCHAR(15) ); **Best practice** Ensure all datatypes are consistently upper or lower case .. code-block:: sql CREATE TABLE t ( a INT UNSIGNED, b VARCHAR(15) ); """ name = "capitalisation.types" aliases = ("L063",) groups = ("all", "core", "capitalisation") is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler( { "data_type_identifier", "primitive_type", "datetime_type_identifier", "data_type", } ) # NOTE: CP05 overrides `_eval` and then only calls # `_handle_segment` from CP01. Setting `_exclude_types` # and `_exclude_parent_types` therefore has no effect. # They are set here to empty tuples to avoid confusion. _exclude_types = () _exclude_parent_types = () config_keywords = [ "extended_capitalisation_policy", "ignore_words", "ignore_words_regex", ] _description_elem = "Datatypes" def _eval(self, context: RuleContext) -> List[LintResult]: """Inconsistent capitalisation of datatypes. We use the `memory` feature here to keep track of cases known to be inconsistent with what we've seen so far as well as the top choice for what the possible case is. """ results = [] # For some of these segments we want to run the code on if context.segment.is_type( "primitive_type", "datetime_type_identifier", "data_type" ): for seg in context.segment.segments: # We don't want to edit symbols, quoted things or identifiers # if they appear. if seg.is_type( "symbol", "identifier", "quoted_literal" ) or not seg.is_type("raw"): continue res = self._handle_segment(seg, context) if res: results.append(res) # NOTE: Given the dialect structure we can assume the targets have a parent. parent: BaseSegment = context.parent_stack[-1] # Don't process it if it's likely to have been processed by the parent. if context.segment.is_type("data_type_identifier") and not parent.is_type( "primitive_type", "datetime_type_identifier", "data_type" ): results.append( self._handle_segment(context.segment, context) ) # pragma: no cover return results sqlfluff-2.3.5/src/sqlfluff/rules/capitalisation/__init__.py000066400000000000000000000013051451700765000242270ustar00rootroot00000000000000"""The capitalisation plugin bundle.""" from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.capitalisation.CP01 import Rule_CP01 from sqlfluff.rules.capitalisation.CP02 import Rule_CP02 from sqlfluff.rules.capitalisation.CP03 import Rule_CP03 from sqlfluff.rules.capitalisation.CP04 import Rule_CP04 from sqlfluff.rules.capitalisation.CP05 import Rule_CP05 return [Rule_CP01, Rule_CP02, Rule_CP03, Rule_CP04, Rule_CP05] sqlfluff-2.3.5/src/sqlfluff/rules/convention/000077500000000000000000000000001451700765000212755ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV01.py000066400000000000000000000040271451700765000223230ustar00rootroot00000000000000"""Implementation of Rule CV01.""" from typing import Optional from sqlfluff.core.parser import SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_CV01(BaseRule): """Use ``!=`` instead of ``<>`` for "not equal to" comparisons. **Anti-pattern** ``<>`` means ``not equal`` but doesn't sound like this when we say it out loud. .. code-block:: sql SELECT * FROM X WHERE 1 <> 2; **Best practice** Use ``!=`` instead because its sounds more natural and is more common in other programming languages. .. code-block:: sql SELECT * FROM X WHERE 1 != 2; """ name = "convention.not_equal" aliases = ("L061",) groups = ("all", "convention") crawl_behaviour = SegmentSeekerCrawler({"comparison_operator"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Use ``!=`` instead of ``<>`` for "not equal to" comparison.""" # Get the comparison operator children raw_comparison_operators = ( FunctionalContext(context) .segment.children() .select(select_if=sp.is_type("raw_comparison_operator")) ) # Only care about ``<>`` if [r.raw for r in raw_comparison_operators] != ["<", ">"]: return None # Provide a fix and replace ``<>`` with ``!=`` # As each symbol is a separate symbol this is done in two steps: # 1. Replace < with ! # 2. Replace > with = fixes = [ LintFix.replace( raw_comparison_operators[0], [SymbolSegment(raw="!", type="raw_comparison_operator")], ), LintFix.replace( raw_comparison_operators[1], [SymbolSegment(raw="=", type="raw_comparison_operator")], ), ] return LintResult(context.segment, fixes) sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV02.py000066400000000000000000000040221451700765000223170ustar00rootroot00000000000000"""Implementation of Rule CV02.""" from typing import Optional from sqlfluff.core.parser import WordSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_CV02(BaseRule): """Use ``COALESCE`` instead of ``IFNULL`` or ``NVL``. **Anti-pattern** ``IFNULL`` or ``NVL`` are used to fill ``NULL`` values. .. code-block:: sql SELECT ifnull(foo, 0) AS bar, FROM baz; SELECT nvl(foo, 0) AS bar, FROM baz; **Best practice** Use ``COALESCE`` instead. ``COALESCE`` is universally supported, whereas Redshift doesn't support ``IFNULL`` and BigQuery doesn't support ``NVL``. Additionally, ``COALESCE`` is more flexible and accepts an arbitrary number of arguments. .. code-block:: sql SELECT coalesce(foo, 0) AS bar, FROM baz; """ name = "convention.coalesce" aliases = ("L060",) groups = ("all", "convention") crawl_behaviour = SegmentSeekerCrawler({"function_name_identifier"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Use ``COALESCE`` instead of ``IFNULL`` or ``NVL``.""" # We only care about function names, and they should be the # only things we get assert context.segment.is_type("function_name_identifier") # Only care if the function is ``IFNULL`` or ``NVL``. if context.segment.raw_upper not in {"IFNULL", "NVL"}: return None # Create fix to replace ``IFNULL`` or ``NVL`` with ``COALESCE``. fix = LintFix.replace( context.segment, [ WordSegment( raw="COALESCE", type="function_name_identifier", ) ], ) return LintResult( anchor=context.segment, fixes=[fix], description=f"Use 'COALESCE' instead of '{context.segment.raw_upper}'.", ) sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV03.py000066400000000000000000000105221451700765000223220ustar00rootroot00000000000000"""Implementation of Rule CV03.""" from typing import Optional from sqlfluff.core.parser import BaseSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_CV03(BaseRule): """Trailing commas within select clause. .. note:: For many database backends this is allowed. For some users this may be something they wish to enforce (in line with Python best practice). Many database backends regard this as a syntax error, and as such the `SQLFluff` default is to forbid trailing commas in the select clause. **Anti-pattern** .. code-block:: sql SELECT a, b, FROM foo **Best practice** .. code-block:: sql SELECT a, b FROM foo """ name = "convention.select_trailing_comma" aliases = ("L038",) groups = ("all", "core", "convention") config_keywords = ["select_clause_trailing_comma"] crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Trailing commas within select clause.""" # Config type hints self.select_clause_trailing_comma: str segment = FunctionalContext(context).segment children = segment.children() # Iterate content to find last element last_content: BaseSegment = children.last(sp.is_code())[0] # What mode are we in? if self.select_clause_trailing_comma == "forbid": # Is it a comma? if last_content.is_type("comma"): # The last content is a comma. Before we try and remove it, we # should check that it's safe. One edge case is that it's a trailing # comma in a loop, but that if we try and remove it, we also break # the previous examples. We should check that this comma doesn't # share a source position with any other commas in the same select. # If there isn't a source position, then it's safe to remove, it's # a recent addition. if not last_content.pos_marker: # pragma: no cover fixes = [LintFix.delete(last_content)] else: comma_pos = last_content.pos_marker.source_position() for seg in context.segment.segments: if seg.is_type("comma"): if not seg.pos_marker: # pragma: no cover continue elif seg.pos_marker.source_position() == comma_pos: if seg is not last_content: # Not safe to fix self.logger.info( "Preventing deletion of %s, because source " "position is the same as %s. Templated " "positions are %s and %s.", last_content, seg, last_content.pos_marker.templated_position(), seg.pos_marker.templated_position(), ) fixes = [] break else: # No matching commas found. It's safe. fixes = [LintFix.delete(last_content)] return LintResult( anchor=last_content, fixes=fixes, description="Trailing comma in select statement forbidden", ) elif self.select_clause_trailing_comma == "require": if not last_content.is_type("comma"): new_comma = SymbolSegment(",", type="comma") return LintResult( anchor=last_content, fixes=[LintFix.replace(last_content, [last_content, new_comma])], description="Trailing comma in select statement required", ) return None sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV04.py000066400000000000000000000122021451700765000223200ustar00rootroot00000000000000"""Implementation of Rule CV04.""" from typing import Optional from sqlfluff.core.parser import RawSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import LiteralSegment from sqlfluff.utils.functional import FunctionalContext, sp class Rule_CV04(BaseRule): """Use consistent syntax to express "count number of rows". Note: If both ``prefer_count_1`` and ``prefer_count_0`` are set to true then ``prefer_count_1`` has precedence. ``COUNT(*)``, ``COUNT(1)``, and even ``COUNT(0)`` are equivalent syntaxes in many SQL engines due to optimizers interpreting these instructions as "count number of rows in result". The ANSI-92_ spec mentions the ``COUNT(*)`` syntax specifically as having a special meaning: If COUNT(*) is specified, then the result is the cardinality of T. So by default, `SQLFluff` enforces the consistent use of ``COUNT(*)``. If the SQL engine you work with, or your team, prefers ``COUNT(1)`` or ``COUNT(0)`` over ``COUNT(*)``, you can configure this rule to consistently enforce your preference. .. _ANSI-92: http://msdn.microsoft.com/en-us/library/ms175997.aspx **Anti-pattern** .. code-block:: sql select count(1) from table_a **Best practice** Use ``count(*)`` unless specified otherwise by config ``prefer_count_1``, or ``prefer_count_0`` as preferred. .. code-block:: sql select count(*) from table_a """ name = "convention.count_rows" aliases = ("L047",) groups = ("all", "core", "convention") config_keywords = ["prefer_count_1", "prefer_count_0"] crawl_behaviour = SegmentSeekerCrawler({"function"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Find rule violations and provide fixes.""" # Config type hints self.prefer_count_0: bool self.prefer_count_1: bool new_segment: RawSegment # We already know we're in a function because of the crawl_behaviour. # This means it's very unlikely that there isn't a function_name here. function_name = context.segment.get_child("function_name") if not function_name: # pragma: no cover return None if function_name.raw_upper == "COUNT": # Get bracketed content f_content = ( FunctionalContext(context) .segment.children(sp.is_type("bracketed")) .children( sp.and_( sp.not_(sp.is_meta()), sp.not_( sp.is_type( "start_bracket", "end_bracket", "whitespace", "newline" ) ), ) ) ) if len(f_content) != 1: # pragma: no cover return None preferred = "*" if self.prefer_count_1: preferred = "1" elif self.prefer_count_0: preferred = "0" if f_content[0].is_type("star") and ( self.prefer_count_1 or self.prefer_count_0 ): new_segment = LiteralSegment(raw=preferred, type="numeric_literal") return LintResult( anchor=context.segment, fixes=[ LintFix.replace( f_content[0], [new_segment], ), ], ) if f_content[0].is_type("expression"): expression_content = [ seg for seg in f_content[0].segments if not seg.is_meta ] if ( len(expression_content) == 1 and expression_content[0].is_type("literal") and expression_content[0].raw in ["0", "1"] and expression_content[0].raw != preferred ): if preferred == "*": new_segment = SymbolSegment(raw=preferred, type="star") else: new_segment = LiteralSegment( raw=preferred, type="numeric_literal" ) return LintResult( anchor=context.segment, fixes=[ LintFix.replace( expression_content[0], [ expression_content[0].edit( expression_content[0].raw.replace( expression_content[0].raw, preferred ) ), ], ), ], ) return None sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV05.py000066400000000000000000000076741451700765000223420ustar00rootroot00000000000000"""Implementation of Rule CV05.""" from typing import List, Optional, Union from sqlfluff.core.parser import KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import Segments, sp from sqlfluff.utils.reflow import ReflowSequence CorrectionListType = List[Union[WhitespaceSegment, KeywordSegment]] class Rule_CV05(BaseRule): """Comparisons with NULL should use "IS" or "IS NOT". **Anti-pattern** In this example, the ``=`` operator is used to check for ``NULL`` values. .. code-block:: sql SELECT a FROM foo WHERE a = NULL **Best practice** Use ``IS`` or ``IS NOT`` to check for ``NULL`` values. .. code-block:: sql SELECT a FROM foo WHERE a IS NULL """ name = "convention.is_null" aliases = ("L049",) groups = ("all", "core", "convention") crawl_behaviour = SegmentSeekerCrawler({"comparison_operator"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Relational operators should not be used to check for NULL values.""" # Context/motivation for this rule: # https://news.ycombinator.com/item?id=28772289 # https://stackoverflow.com/questions/9581745/sql-is-null-and-null # Allow assignments in SET clauses if len(context.parent_stack) >= 2 and context.parent_stack[-2].is_type( "set_clause_list", "execute_script_statement", "options_segment" ): return None # Allow assignments in EXEC clauses, or any other explicit assignments if context.parent_stack and context.parent_stack[-1].is_type( "set_clause_list", "execute_script_statement", "assignment_operator" ): return None # If the operator is in an EXCLUDE constraint (PostgreSQL feature), the SQL # could look like: EXCLUDE (field WITH =). In that case, we can exit early # to avoid an assertion failure due to no segment following the operator. # Note that if the EXCLUDE is based on an expression, we will still be # checking that expression because it will be under a different child segment. if context.parent_stack and context.parent_stack[-1].is_type( "exclusion_constraint_element" ): return None # We only care about equality operators. if context.segment.raw not in ("=", "!=", "<>"): return None # We only care if it's followed by a NULL literal. siblings = Segments(*context.parent_stack[-1].segments) after_op_list = siblings.select(start_seg=context.segment) next_code = after_op_list.first(sp.is_code()) if not next_code.all(sp.is_type("null_literal")): return None sub_seg = next_code.get() assert sub_seg, "TypeGuard: Segment must exist" self.logger.debug( "Found NULL literal following equals/not equals @%s: %r", sub_seg.pos_marker, sub_seg.raw, ) edit = _create_base_is_null_sequence( is_upper=sub_seg.raw[0] == "N", operator_raw=context.segment.raw, ) return LintResult( anchor=context.segment, fixes=ReflowSequence.from_around_target( context.segment, context.parent_stack[0], config=context.config ) .replace(context.segment, edit) .respace() .get_fixes(), ) def _create_base_is_null_sequence( is_upper: bool, operator_raw: str, ) -> CorrectionListType: is_seg = KeywordSegment("IS" if is_upper else "is") not_seg = KeywordSegment("NOT" if is_upper else "not") if operator_raw == "=": return [is_seg] return [ is_seg, WhitespaceSegment(), not_seg, ] sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV06.py000066400000000000000000000352251451700765000223340ustar00rootroot00000000000000"""Implementation of Rule CV06.""" from typing import List, NamedTuple, Optional, Sequence, cast from sqlfluff.core.parser import BaseSegment, NewlineSegment, RawSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.functional import Segments, sp class SegmentMoveContext(NamedTuple): """Context information for moving a segment.""" anchor_segment: RawSegment is_one_line: bool before_segment: Segments whitespace_deletions: Segments class Rule_CV06(BaseRule): """Statements must end with a semi-colon. **Anti-pattern** A statement is not immediately terminated with a semi-colon. The ``•`` represents space. .. code-block:: sql :force: SELECT a FROM foo ; SELECT b FROM bar••; **Best practice** Immediately terminate the statement with a semi-colon. .. code-block:: sql :force: SELECT a FROM foo; """ name = "convention.terminator" aliases = ("L052",) groups = ("all", "convention") config_keywords = ["multiline_newline", "require_final_semicolon"] crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True @staticmethod def _handle_preceding_inline_comments( before_segment: Sequence[BaseSegment], anchor_segment: BaseSegment ): """Adjust segments to not move preceding inline comments. We don't want to move inline comments that are on the same line as the preceding code segment as they could contain noqa instructions. """ # See if we have a preceding inline comment on the same line as the preceding # segment. same_line_comment = next( ( s for s in before_segment if s.is_comment and not s.is_type("block_comment") and s.pos_marker and s.pos_marker.working_line_no # We don't need to handle the case where raw_segments is empty # because it never is. It's either a segment with raw children # or a raw segment which returns [self] as raw_segments. == anchor_segment.raw_segments[-1].pos_marker.working_line_no ), None, ) # If so then make that our new anchor segment and adjust # before_segment accordingly. if same_line_comment: anchor_segment = same_line_comment before_segment = before_segment[: before_segment.index(same_line_comment)] return before_segment, anchor_segment @staticmethod def _handle_trailing_inline_comments( parent_segment: BaseSegment, anchor_segment: BaseSegment ) -> BaseSegment: """Adjust anchor_segment to not move trailing inline comment. We don't want to move inline comments that are on the same line as the preceding code segment as they could contain noqa instructions. """ # See if we have a trailing inline comment on the same line as the preceding # segment. for comment_segment in parent_segment.recursive_crawl("comment"): assert comment_segment.pos_marker assert anchor_segment.pos_marker if ( comment_segment.pos_marker.working_line_no == anchor_segment.pos_marker.working_line_no ) and (not comment_segment.is_type("block_comment")): anchor_segment = comment_segment return anchor_segment @staticmethod def _is_one_line_statement( parent_segment: BaseSegment, segment: BaseSegment ) -> bool: """Check if the statement containing the provided segment is one line.""" # Find statement segment containing the current segment. statement_segment = next( ( ps.segment for ps in (parent_segment.path_to(segment) or []) if ps.segment.is_type("statement") ), None, ) if statement_segment is None: # pragma: no cover # If we can't find a parent statement segment then don't try anything # special. return False if not any(statement_segment.recursive_crawl("newline")): # Statement segment has no newlines therefore starts and ends on the same # line. return True return False def _get_segment_move_context( self, target_segment: RawSegment, parent_segment: BaseSegment ) -> SegmentMoveContext: # Locate the segment to be moved (i.e. context.segment) and search back # over the raw stack to find the end of the preceding statement. reversed_raw_stack = Segments(*parent_segment.raw_segments).reversed() before_code = reversed_raw_stack.select( loop_while=sp.not_(sp.is_code()), start_seg=target_segment ) before_segment = before_code.select(sp.not_(sp.is_meta())) # We're selecting from the raw stack, so we know that before_code is # made of RawSegment elements. anchor_segment = ( cast(RawSegment, before_code[-1]) if before_code else target_segment ) first_code = reversed_raw_stack.select( sp.is_code(), start_seg=target_segment ).first() self.logger.debug("Semicolon: first_code: %s", first_code) is_one_line = ( self._is_one_line_statement(parent_segment, first_code[0]) if first_code else False ) # We can tidy up any whitespace between the segment # and the preceding code/comment segment. # Don't mess with comment spacing/placement. whitespace_deletions = before_segment.select(loop_while=sp.is_whitespace()) return SegmentMoveContext( anchor_segment, is_one_line, before_segment, whitespace_deletions ) def _handle_semicolon( self, target_segment: RawSegment, parent_segment: BaseSegment ) -> Optional[LintResult]: info = self._get_segment_move_context(target_segment, parent_segment) semicolon_newline = self.multiline_newline if not info.is_one_line else False self.logger.debug("Semicolon Newline: %s", semicolon_newline) # Semi-colon on same line. if not semicolon_newline: return self._handle_semicolon_same_line( target_segment, parent_segment, info ) # Semi-colon on new line. else: return self._handle_semicolon_newline(target_segment, parent_segment, info) def _handle_semicolon_same_line( self, target_segment: RawSegment, parent_segment: BaseSegment, info: SegmentMoveContext, ) -> Optional[LintResult]: if not info.before_segment: return None # If preceding segments are found then delete the old # semi-colon and its preceding whitespace and then insert # the semi-colon in the correct location. fixes = self._create_semicolon_and_delete_whitespace( target_segment, parent_segment, info.anchor_segment, info.whitespace_deletions, [ SymbolSegment(raw=";", type="statement_terminator"), ], ) return LintResult( anchor=info.anchor_segment, fixes=fixes, ) def _handle_semicolon_newline( self, target_segment: RawSegment, parent_segment: BaseSegment, info: SegmentMoveContext, ) -> Optional[LintResult]: # Adjust before_segment and anchor_segment for preceding inline # comments. Inline comments can contain noqa logic so we need to add the # newline after the inline comment. ( before_segment, anchor_segment, ) = self._handle_preceding_inline_comments( info.before_segment, info.anchor_segment ) if (len(before_segment) == 1) and all( s.is_type("newline") for s in before_segment ): return None # If preceding segment is not a single newline then delete the old # semi-colon/preceding whitespace and then insert the # semi-colon in the correct location. # This handles an edge case in which an inline comment comes after # the semi-colon. anchor_segment = self._handle_trailing_inline_comments( parent_segment, anchor_segment ) fixes = [] if anchor_segment is target_segment: fixes.append( LintFix.replace( anchor_segment, [ NewlineSegment(), SymbolSegment(raw=";", type="statement_terminator"), ], ) ) else: fixes.extend( self._create_semicolon_and_delete_whitespace( target_segment, parent_segment, anchor_segment, info.whitespace_deletions, [ NewlineSegment(), SymbolSegment(raw=";", type="statement_terminator"), ], ) ) return LintResult( anchor=anchor_segment, fixes=fixes, ) def _create_semicolon_and_delete_whitespace( self, target_segment: BaseSegment, parent_segment: BaseSegment, anchor_segment: BaseSegment, whitespace_deletions: Segments, create_segments: List[BaseSegment], ) -> List[LintFix]: anchor_segment = self._choose_anchor_segment( parent_segment, "create_after", anchor_segment, filter_meta=True ) lintfix_fn = LintFix.create_after whitespace_deletion_set = set(whitespace_deletions) if anchor_segment in whitespace_deletion_set: # Can't delete() and create_after() the same segment. Use replace() # instead. lintfix_fn = LintFix.replace whitespace_deletions = whitespace_deletions.select( lambda seg: seg is not anchor_segment ) fixes = [ lintfix_fn( anchor_segment, create_segments, ), LintFix.delete( target_segment, ), ] fixes.extend(LintFix.delete(d) for d in whitespace_deletions) return fixes def _ensure_final_semicolon( self, parent_segment: BaseSegment ) -> Optional[LintResult]: # Iterate backwards over complete stack to find # if the final semi-colon is already present. anchor_segment = parent_segment.segments[-1] trigger_segment = parent_segment.segments[-1] semi_colon_exist_flag = False is_one_line = False before_segment = [] for segment in parent_segment.segments[::-1]: anchor_segment = segment if segment.is_type("statement_terminator"): semi_colon_exist_flag = True elif segment.is_code: is_one_line = self._is_one_line_statement(parent_segment, segment) break elif not segment.is_meta: before_segment.append(segment) trigger_segment = segment else: return None # File does not contain any statements self.logger.debug("Trigger on: %s", trigger_segment) self.logger.debug("Anchoring on: %s", anchor_segment) semicolon_newline = self.multiline_newline if not is_one_line else False if not semi_colon_exist_flag: # Create the final semi-colon if it does not yet exist. # Semi-colon on same line. if not semicolon_newline: fixes = [ LintFix.create_after( self._choose_anchor_segment( parent_segment, "create_after", anchor_segment, filter_meta=True, ), [ SymbolSegment(raw=";", type="statement_terminator"), ], ) ] # Semi-colon on new line. else: # Adjust before_segment and anchor_segment for inline # comments. ( before_segment, anchor_segment, ) = self._handle_preceding_inline_comments( before_segment, anchor_segment ) self.logger.debug("Revised anchor on: %s", anchor_segment) fixes = [ LintFix.create_after( self._choose_anchor_segment( parent_segment, "create_after", anchor_segment, filter_meta=True, ), [ NewlineSegment(), SymbolSegment(raw=";", type="statement_terminator"), ], ) ] return LintResult( anchor=trigger_segment, fixes=fixes, ) return None def _eval(self, context: RuleContext) -> List[LintResult]: """Statements must end with a semi-colon.""" # Config type hints self.multiline_newline: bool self.require_final_semicolon: bool # We should only be dealing with a root segment assert context.segment.is_type("file") results = [] for idx, seg in enumerate(context.segment.segments): res = None # First we can simply handle the case of existing semi-colon alignment. if seg.is_type("statement_terminator"): # If it's a terminator then we know it's a raw. seg = cast(RawSegment, seg) self.logger.debug("Handling semi-colon: %s", seg) res = self._handle_semicolon(seg, context.segment) # Otherwise handle the end of the file separately. elif ( self.require_final_semicolon and idx == len(context.segment.segments) - 1 ): self.logger.debug("Handling final segment: %s", seg) res = self._ensure_final_semicolon(context.segment) if res: results.append(res) return results sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV07.py000066400000000000000000000077541451700765000223430ustar00rootroot00000000000000"""Implementation of Rule CV07.""" from typing import List from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.functional import Segments, sp class Rule_CV07(BaseRule): """Top-level statements should not be wrapped in brackets. **Anti-pattern** A top-level statement is wrapped in brackets. .. code-block:: sql :force: (SELECT foo FROM bar) -- This also applies to statements containing a sub-query. (SELECT foo FROM (SELECT * FROM bar)) **Best practice** Don't wrap top-level statements in brackets. .. code-block:: sql :force: SELECT foo FROM bar -- Likewise for statements containing a sub-query. SELECT foo FROM (SELECT * FROM bar) """ name = "convention.statement_brackets" aliases = ("L053",) groups = ("all", "convention") crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True @staticmethod def _iter_statements(file_segment): """Designed to be used on files. Yields only direct children, or children of batches. """ for seg in file_segment.segments: if seg.is_type("batch"): for subseg in seg.segments: if subseg.is_type("statement"): yield subseg elif seg.is_type("statement"): yield seg @classmethod def _iter_bracketed_statements(cls, file_segment): for stmt in cls._iter_statements(file_segment): for seg in stmt.segments: if seg.is_type("bracketed"): yield stmt, seg def _eval(self, context: RuleContext) -> List[LintResult]: """Top-level statements should not be wrapped in brackets.""" # Because of the root_only_crawler, this can control its own # crawling behaviour. results = [] for parent, bracketed_segment in self._iter_bracketed_statements( context.segment ): self.logger.debug("Evaluating %s in %s", bracketed_segment, parent) # Replace the bracketed segment with it's # children, excluding the bracket symbols. bracket_set = {"start_bracket", "end_bracket"} filtered_children = Segments( *[ segment for segment in bracketed_segment.segments if segment.get_type() not in bracket_set and not segment.is_meta ] ) # Lift leading/trailing whitespace and inline comments to the # segment above. This avoids introducing a parse error (ANSI and other # dialects generally don't allow this at lower levels of the parse # tree). to_lift_predicate = sp.or_(sp.is_whitespace(), sp.is_type("inline_comment")) leading = filtered_children.select(loop_while=to_lift_predicate) self.logger.debug("Leading: %s", leading) trailing = ( filtered_children.reversed() .select(loop_while=to_lift_predicate) .reversed() ) self.logger.debug("Trailing: %s", trailing) lift_nodes = set(leading + trailing) fixes = [] if lift_nodes: fixes.append(LintFix.create_before(parent, list(leading))) fixes.append(LintFix.create_after(parent, list(trailing))) fixes.extend([LintFix.delete(segment) for segment in lift_nodes]) filtered_children = filtered_children[len(leading) : -len(trailing)] fixes.append( LintFix.replace( bracketed_segment, filtered_children, ) ) results.append(LintResult(anchor=bracketed_segment, fixes=fixes)) return results sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV08.py000066400000000000000000000025361451700765000223350ustar00rootroot00000000000000"""Implementation of Rule CV08.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_CV08(BaseRule): """Use ``LEFT JOIN`` instead of ``RIGHT JOIN``. **Anti-pattern** ``RIGHT JOIN`` is used. .. code-block:: sql :force: SELECT foo.col1, bar.col2 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id; **Best practice** Refactor and use ``LEFT JOIN`` instead. .. code-block:: sql :force: SELECT foo.col1, bar.col2 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id; """ name = "convention.left_join" aliases = ("L055",) groups = ("all", "convention") crawl_behaviour = SegmentSeekerCrawler({"join_clause"}) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Use LEFT JOIN instead of RIGHT JOIN.""" # We are only interested in JOIN clauses. assert context.segment.is_type("join_clause") # Identify if RIGHT JOIN is present. if {"RIGHT", "JOIN"}.issubset( {segment.raw_upper for segment in context.segment.segments} ): return LintResult(context.segment.segments[0]) return None sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV09.py000066400000000000000000000076711451700765000223430ustar00rootroot00000000000000"""Implementation of Rule CV09.""" from typing import List, Optional import regex from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_CV09(BaseRule): """Block a list of configurable words from being used. This generic rule can be useful to prevent certain keywords, functions, or objects from being used. Only whole words can be blocked, not phrases, nor parts of words. This block list is case insensitive. **Example use cases** * We prefer ``BOOL`` over ``BOOLEAN`` and there is no existing rule to enforce this. Until such a rule is written, we can add ``BOOLEAN`` to the deny list to cause a linting error to flag this. * We have deprecated a schema/table/function and want to prevent it being used in future. We can add that to the denylist and then add a ``-- noqa: CV09`` for the few exceptions that still need to be in the code base for now. **Anti-pattern** If the ``blocked_words`` config is set to ``deprecated_table,bool`` then the following will flag: .. code-block:: sql SELECT * FROM deprecated_table WHERE 1 = 1; CREATE TABLE myschema.t1 (a BOOL); **Best practice** Do not used any blocked words: .. code-block:: sql SELECT * FROM another_table WHERE 1 = 1; CREATE TABLE myschema.t1 (a BOOLEAN); """ name = "convention.blocked_words" aliases = ("L062",) groups = ("all", "convention") # It's a broad selector, but only trigger on raw segments. crawl_behaviour = SegmentSeekerCrawler({"raw"}) config_keywords = [ "blocked_words", "blocked_regex", "match_source", ] def _eval(self, context: RuleContext) -> Optional[LintResult]: # Config type hints self.blocked_words: Optional[str] self.blocked_regex: Optional[str] self.match_source: Optional[bool] # Exit early if no block list set if not self.blocked_words and not self.blocked_regex: return None if context.segment.type == "comment": return None # Get the ignore list configuration and cache it try: blocked_words_list = self.blocked_words_list except AttributeError: # First-time only, read the settings from configuration. # So we can cache them for next time for speed. blocked_words_list = self._init_blocked_words() if context.segment.raw_upper in blocked_words_list: return LintResult( anchor=context.segment, description=f"Use of blocked word '{context.segment.raw}'.", ) if self.blocked_regex: if regex.search(self.blocked_regex, context.segment.raw): return LintResult( anchor=context.segment, description=f"Use of blocked regex '{context.segment.raw}'.", ) if self.match_source: for segment in context.segment.raw_segments: source_str = segment.pos_marker.source_str() if regex.search(self.blocked_regex, source_str): return LintResult( anchor=context.segment, description=f"Use of blocked regex '{source_str}'.", ) return None def _init_blocked_words(self) -> List[str]: """Called first time rule is evaluated to fetch & cache the blocked_words.""" blocked_words_config = getattr(self, "blocked_words") if blocked_words_config: self.blocked_words_list = self.split_comma_separated_string( blocked_words_config.upper() ) else: # pragma: no cover # Shouldn't get here as we exit early if no block list self.blocked_words_list = [] return self.blocked_words_list sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV10.py000066400000000000000000000272631451700765000223320ustar00rootroot00000000000000"""Implementation of Rule CV10.""" from typing import Optional import regex from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import LiteralSegment from sqlfluff.utils.functional import FunctionalContext, rsp class Rule_CV10(BaseRule): r"""Consistent usage of preferred quotes for quoted literals. Some databases allow quoted literals to use either single or double quotes. Prefer one type of quotes as specified in rule setting, falling back to alternate quotes to reduce the need for escapes. Dollar-quoted raw strings are excluded from this rule, as they are mostly used for literal UDF Body definitions. .. note:: This rule only checks quoted literals and not quoted identifiers as they often cannot interchange single and double quotes This rule is only enabled for dialects that allow single *and* double quotes for quoted literals (currently ``bigquery``, ``databricks``, ``hive``, ``mysql``, ``sparksql``). It can be enabled for other dialects with the ``force_enable = True`` flag. **Anti-pattern** .. code-block:: sql :force: select "abc", 'abc', "\"", "abc" = 'abc' from foo **Best practice** Ensure all quoted literals use preferred quotes, unless escaping can be reduced by using alternate quotes. .. code-block:: sql :force: select "abc", "abc", '"', "abc" = "abc" from foo """ name = "convention.quoted_literals" aliases = ("L064",) groups = ("all", "convention") config_keywords = ["preferred_quoted_literal_style", "force_enable"] crawl_behaviour = SegmentSeekerCrawler({"literal"}) targets_templated = True is_fix_compatible = True _dialects_with_double_quoted_strings = [ "bigquery", "databricks", "hive", "mysql", "sparksql", ] _quotes_mapping = { "single_quotes": { "common_name": "single quotes", "preferred_quote_char": "'", "alternate_quote_char": '"', }, "double_quotes": { "common_name": "double quotes", "preferred_quote_char": '"', "alternate_quote_char": "'", }, } # BigQuery string prefix characters. _string_prefix_chars = "rbRB" def _eval(self, context: RuleContext) -> Optional[LintResult]: # Config type hints self.preferred_quoted_literal_style: str self.force_enable: bool # Only care about quoted literal segments. if not context.segment.is_type("quoted_literal"): return None if not ( self.force_enable or context.dialect.name in self._dialects_with_double_quoted_strings ): return LintResult(memory=context.memory) # This rule can also cover quoted literals that are partially templated. # I.e. when the quotes characters are _not_ part of the template we can # meaningfully apply this rule. templated_raw_slices = FunctionalContext(context).segment.raw_slices.select( rsp.is_slice_type("templated") ) for raw_slice in templated_raw_slices: pos_marker = context.segment.pos_marker # This is to make mypy happy. assert isinstance(pos_marker, PositionMarker) # Check whether the quote characters are inside the template. # For the leading quote we need to account for string prefix characters. leading_quote_inside_template = pos_marker.source_str()[:2].lstrip( self._string_prefix_chars )[0] not in ['"', "'"] trailing_quote_inside_template = pos_marker.source_str()[-1] not in [ '"', "'", ] # quotes are not entirely outside of a template, nothing we can do if leading_quote_inside_template or trailing_quote_inside_template: return LintResult(memory=context.memory) # If quoting style is set to consistent we use the quoting style of the first # quoted_literal that we encounter. if self.preferred_quoted_literal_style == "consistent": memory = context.memory preferred_quoted_literal_style = memory.get( "preferred_quoted_literal_style" ) if not preferred_quoted_literal_style: # Getting the quote from LAST character to be able to handle STRING # prefixes preferred_quoted_literal_style = ( "double_quotes" if context.segment.raw[-1] == '"' else "single_quotes" ) memory[ "preferred_quoted_literal_style" ] = preferred_quoted_literal_style self.logger.debug( "Preferred string quotes is set to `consistent`. Derived quoting " "style %s from first quoted literal.", preferred_quoted_literal_style, ) else: preferred_quoted_literal_style = self.preferred_quoted_literal_style fixed_string = self._normalize_preferred_quoted_literal_style( context.segment.raw, preferred_quote_char=self._quotes_mapping[preferred_quoted_literal_style][ "preferred_quote_char" ], alternate_quote_char=self._quotes_mapping[preferred_quoted_literal_style][ "alternate_quote_char" ], ) if fixed_string != context.segment.raw: # We can't just set the primary type, but we have to ensure that the # subtypes are properly set too so that the re-parse checks pass. if fixed_string[0] == "'": _instance_types = ("quoted_literal", "single_quote") else: _instance_types = ("quoted_literal", "double_quote") return LintResult( anchor=context.segment, memory=context.memory, fixes=[ LintFix.replace( context.segment, [ LiteralSegment( raw=fixed_string, instance_types=_instance_types, ) ], ) ], description=( "Inconsistent use of preferred quote style '" f"{self._quotes_mapping[preferred_quoted_literal_style]['common_name']}" # noqa: E501 f"'. Use {fixed_string} instead of {context.segment.raw}." ), ) return None # Code for preferred quoted_literal style was copied from Black string normalization # and adapted to our use-case. def _regex_sub_with_overlap( self, regex: regex.Pattern, replacement: str, original: str ) -> str: """Replace `regex` with `replacement` twice on `original`. This is used by string normalization to perform replaces on overlapping matches. Source: https://github.com/psf/black/blob/7f7673d941a947a8d392c8c0866d3d588affc174/src/black/strings.py#L23-L29 """ return regex.sub(replacement, regex.sub(replacement, original)) def _normalize_preferred_quoted_literal_style( self, s: str, preferred_quote_char: str, alternate_quote_char: str ) -> str: """Prefer `preferred_quote_char` but only if it doesn't cause more escaping. Adds or removes backslashes as appropriate. Source: https://github.com/psf/black/blob/7f7673d941a947a8d392c8c0866d3d588affc174/src/black/strings.py#L167 """ value = s.lstrip(self._string_prefix_chars) if value[:3] == preferred_quote_char * 3: # In triple-quoted strings we are not replacing escaped quotes. # So nothing left to do and we can exit. return s elif value[0] == preferred_quote_char: # Quotes are alright already. But maybe we can remove some unnecessary # escapes or reduce the number of escapes using alternate_quote_char ? orig_quote = preferred_quote_char new_quote = alternate_quote_char elif value[:3] == alternate_quote_char * 3: orig_quote = alternate_quote_char * 3 new_quote = preferred_quote_char * 3 elif value[0] == alternate_quote_char: orig_quote = alternate_quote_char new_quote = preferred_quote_char else: self.logger.debug( "Found quoted string %s using neither preferred quote char %s " "nor alternate_quote_char %s. Skipping...", s, preferred_quote_char, alternate_quote_char, ) return s first_quote_pos = s.find(orig_quote) prefix = s[:first_quote_pos] unescaped_new_quote = regex.compile(rf"(([^\\]|^)(\\\\)*){new_quote}") escaped_new_quote = regex.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") escaped_orig_quote = regex.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)] if "r" in prefix.lower(): if unescaped_new_quote.search(body): self.logger.debug( "There's at least one unescaped new_quote in this raw string " "so converting is impossible." ) return s # Do not modify the body of raw strings by introducing or removing # backslashes as this changes the value of the raw string. new_body = body else: # remove unnecessary escapes new_body = self._regex_sub_with_overlap( escaped_new_quote, rf"\1\2{new_quote}", body ) if body != new_body: # Consider the string without unnecessary escapes as the original self.logger.debug("Removing unnecessary escapes in %s.", body) body = new_body s = f"{prefix}{orig_quote}{body}{orig_quote}" new_body = self._regex_sub_with_overlap( escaped_orig_quote, rf"\1\2{orig_quote}", new_body ) new_body = self._regex_sub_with_overlap( unescaped_new_quote, rf"\1\\{new_quote}", new_body ) if ( new_quote == 3 * preferred_quote_char and new_body[-1:] == preferred_quote_char ): # edge case: for example when converting quotes from '''a"''' # to """a\"""" the last " of the string body needs to be escaped. new_body = new_body[:-1] + f"\\{preferred_quote_char}" orig_escape_count = body.count("\\") new_escape_count = new_body.count("\\") if new_escape_count > orig_escape_count: self.logger.debug( "Changing quote style would introduce more escapes in the body. " "Before: %s After: %s . Skipping.", body, new_body, ) return s # Do not introduce more escaping if new_escape_count == orig_escape_count and orig_quote == preferred_quote_char: # Already using preferred_quote_char, and no escape benefit to changing return s return f"{prefix}{new_quote}{new_body}{new_quote}" sqlfluff-2.3.5/src/sqlfluff/rules/convention/CV11.py000066400000000000000000000377421451700765000223360ustar00rootroot00000000000000"""Implementation of Rule CV11.""" from typing import Iterable, List, Optional from sqlfluff.core.parser import ( BaseSegment, KeywordSegment, SymbolSegment, WhitespaceSegment, WordSegment, ) from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class Rule_CV11(BaseRule): """Enforce consistent type casting style. .. note:: This is only compatible with 2-arguments CONVERT as some dialects allow an optional 3rd argument e.g TSQL, which cannot be rewritten into CAST. This rule is disabled by default for Teradata because it supports different type casting apart from CONVERT and :: e.g DATE '2007-01-01', '9999-12-31' (DATE). **Anti-pattern** Using mixture of CONVERT, :: and CAST when ``preferred_type_casting_style`` config is set to ``consistent`` (default). .. code-block:: sql SELECT CONVERT(int, 1) AS bar, 100::int::text, CAST(10 AS text) AS coo FROM foo; **Best practice** Use consistent type casting style. .. code-block:: sql SELECT CAST(1 AS int) AS bar, CAST(CAST(100 AS int) AS text), CAST(10 AS text) AS coo FROM foo; """ name = "convention.casting_style" aliases = ("L067",) groups = ("all", "convention") config_keywords = ["preferred_type_casting_style"] crawl_behaviour = SegmentSeekerCrawler({"function", "cast_expression"}) is_fix_compatible = True @staticmethod def _get_children(segments: Segments) -> Segments: return segments.children( sp.and_( sp.not_(sp.is_meta()), sp.not_( sp.is_type( "start_bracket", "end_bracket", "whitespace", "newline", "casting_operator", "comma", "keyword", ) ), ) ) @staticmethod def _cast_fix_list( context: RuleContext, cast_arg_1: Iterable[BaseSegment], cast_arg_2: BaseSegment, later_types: Optional[Segments] = None, ) -> List[LintFix]: """Generate list of fixes to convert CONVERT and ShorthandCast to CAST.""" # Add cast and opening parenthesis. edits = ( [ WordSegment("cast", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), ] + list(cast_arg_1) + [ WhitespaceSegment(), KeywordSegment("as"), WhitespaceSegment(), cast_arg_2, SymbolSegment(")", type="end_bracket"), ] ) if later_types: pre_edits: List[BaseSegment] = [ WordSegment("cast", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), ] in_edits: List[BaseSegment] = [ WhitespaceSegment(), KeywordSegment("as"), WhitespaceSegment(), ] post_edits: List[BaseSegment] = [ SymbolSegment(")", type="end_bracket"), ] for _type in later_types: edits = pre_edits + edits + in_edits + [_type] + post_edits fixes = [ LintFix.replace( context.segment, edits, ) ] return fixes @staticmethod def _convert_fix_list( context: RuleContext, convert_arg_1: BaseSegment, convert_arg_2: BaseSegment, later_types=None, ) -> List[LintFix]: """Generate list of fixes to convert CAST and ShorthandCast to CONVERT.""" # Add convert and opening parenthesis. edits = [ WordSegment("convert", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), convert_arg_1, SymbolSegment(",", type="comma"), WhitespaceSegment(), convert_arg_2, SymbolSegment(")", type="end_bracket"), ] if later_types: pre_edits: List[BaseSegment] = [ WordSegment("convert", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), ] in_edits: List[BaseSegment] = [ SymbolSegment(",", type="comma"), WhitespaceSegment(), ] post_edits: List[BaseSegment] = [ SymbolSegment(")", type="end_bracket"), ] for _type in later_types: edits = pre_edits + [_type] + in_edits + edits + post_edits fixes = [ LintFix.replace( context.segment, edits, ) ] return fixes @staticmethod def _shorthand_fix_list( context: RuleContext, shorthand_arg_1: BaseSegment, shorthand_arg_2: BaseSegment ) -> List[LintFix]: """Generate list of fixes to convert CAST and CONVERT to ShorthandCast.""" if len(shorthand_arg_1.raw_segments) > 1: edits = [ SymbolSegment("(", type="start_bracket"), shorthand_arg_1, SymbolSegment(")", type="end_bracket"), ] else: edits = [shorthand_arg_1] edits.extend( [ SymbolSegment("::", type="casting_operator"), shorthand_arg_2, ] ) fixes = [ LintFix.replace( context.segment, edits, ) ] return fixes def _eval(self, context: RuleContext) -> Optional[LintResult]: """Enforce consistent type casting style.""" # Config type hints self.preferred_type_casting_style: str # Rule disabled for teradata. if context.dialect.name == "teradata": return None # Construct segment type casting if context.segment.is_type("function"): function_name = context.segment.get_child("function_name") # Functions should always have a name, that means this clause should # be unnecessary. if not function_name: # pragma: no cover return None elif function_name.raw_upper == "CAST": current_type_casting_style = "cast" elif function_name.raw_upper == "CONVERT": current_type_casting_style = "convert" else: current_type_casting_style = None elif context.segment.is_type("cast_expression"): current_type_casting_style = "shorthand" else: # pragma: no cover current_type_casting_style = None functional_context = FunctionalContext(context) # If casting style is set to consistent, # we use the casting style of the first segment we encounter. # convert_content = None if self.preferred_type_casting_style == "consistent": memory = context.memory prior_type_casting_style = context.memory.get("prior_type_casting_style") previous_skipped = context.memory.get("previous_skipped") # if previous_skipped then we can skip the whole fix # Construct fixes if prior_type_casting_style == "cast": if current_type_casting_style == "convert": # Get the content of CONVERT convert_content = self._get_children( functional_context.segment.children(sp.is_type("bracketed")) ) # We only care about 2-arguments convert # some dialects allow an optional 3rd argument e.g TSQL # which cannot be rewritten into CAST if len(convert_content) > 2: # set previous_skipped if previous_skipped is None: # Only update prior_type_casting_style # if it is none, this ultimately # makes sure we maintain the first # casting style we encounter memory["previous_skipped"] = True return None fixes = self._cast_fix_list( context, [convert_content[1]], convert_content[0], ) elif current_type_casting_style == "shorthand": # Get the expression and the datatype segment expression_datatype_segment = self._get_children( functional_context.segment ) print(previous_skipped) fixes = self._cast_fix_list( context, [expression_datatype_segment[0]], expression_datatype_segment[1], # We can have multiple shorthandcast e.g 1::int::text # in that case, we need to introduce nested CAST() expression_datatype_segment[2:], ) elif prior_type_casting_style == "convert": if current_type_casting_style == "cast": cast_content = self._get_children( functional_context.segment.children(sp.is_type("bracketed")) ) if len(cast_content) > 2: return None fixes = self._convert_fix_list( context, cast_content[1], cast_content[0], ) elif current_type_casting_style == "shorthand": expression_datatype_segment = self._get_children( functional_context.segment ) fixes = self._convert_fix_list( context, expression_datatype_segment[1], expression_datatype_segment[0], expression_datatype_segment[2:], ) elif prior_type_casting_style == "shorthand": if current_type_casting_style == "cast": # Get the content of CAST cast_content = self._get_children( functional_context.segment.children(sp.is_type("bracketed")) ) if len(cast_content) > 2: return None fixes = self._shorthand_fix_list( context, cast_content[0], cast_content[1], ) elif current_type_casting_style == "convert": convert_content = self._get_children( functional_context.segment.children(sp.is_type("bracketed")) ) if len(convert_content) > 2: return None fixes = self._shorthand_fix_list( context, convert_content[1], convert_content[0], ) if ( prior_type_casting_style and current_type_casting_style and (prior_type_casting_style != current_type_casting_style) ): return LintResult( anchor=context.segment, memory=context.memory, fixes=fixes, description=("Inconsistent type casting styles found."), ) if prior_type_casting_style is None: # Only update prior_type_casting_style if it is none, this ultimately # makes sure we maintain the first casting style we encounter memory["prior_type_casting_style"] = current_type_casting_style elif ( current_type_casting_style and current_type_casting_style != self.preferred_type_casting_style ): convert_content = None cast_content = None if self.preferred_type_casting_style == "cast": if current_type_casting_style == "convert": convert_content = self._get_children( functional_context.segment.children(sp.is_type("bracketed")) ) fixes = self._cast_fix_list( context, [convert_content[1]], convert_content[0], ) elif current_type_casting_style == "shorthand": expression_datatype_segment = self._get_children( functional_context.segment ) for data_type_idx, seg in enumerate(expression_datatype_segment): if seg.is_type("data_type"): break fixes = self._cast_fix_list( context, expression_datatype_segment[:data_type_idx], expression_datatype_segment[data_type_idx], expression_datatype_segment[data_type_idx + 1 :], ) elif self.preferred_type_casting_style == "convert": if current_type_casting_style == "cast": cast_content = self._get_children( functional_context.segment.children(sp.is_type("bracketed")) ) fixes = self._convert_fix_list( context, cast_content[1], cast_content[0], ) elif current_type_casting_style == "shorthand": expression_datatype_segment = self._get_children( functional_context.segment ) fixes = self._convert_fix_list( context, expression_datatype_segment[1], expression_datatype_segment[0], expression_datatype_segment[2:], ) elif self.preferred_type_casting_style == "shorthand": if current_type_casting_style == "cast": cast_content = self._get_children( functional_context.segment.children(sp.is_type("bracketed")) ) fixes = self._shorthand_fix_list( context, cast_content[0], cast_content[1], ) elif current_type_casting_style == "convert": convert_content = self._get_children( functional_context.segment.children(sp.is_type("bracketed")) ) fixes = self._shorthand_fix_list( context, convert_content[1], convert_content[0], ) # Don't fix if there's too much content. if (convert_content and len(convert_content) > 2) or ( cast_content and len(cast_content) > 2 ): fixes = [] return LintResult( anchor=context.segment, memory=context.memory, fixes=fixes, description=( "Used type casting style is different from" " the preferred type casting style." ), ) return None sqlfluff-2.3.5/src/sqlfluff/rules/convention/__init__.py000066400000000000000000000022441451700765000234100ustar00rootroot00000000000000"""The convention plugin bundle.""" from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.convention.CV01 import Rule_CV01 from sqlfluff.rules.convention.CV02 import Rule_CV02 from sqlfluff.rules.convention.CV03 import Rule_CV03 from sqlfluff.rules.convention.CV04 import Rule_CV04 from sqlfluff.rules.convention.CV05 import Rule_CV05 from sqlfluff.rules.convention.CV06 import Rule_CV06 from sqlfluff.rules.convention.CV07 import Rule_CV07 from sqlfluff.rules.convention.CV08 import Rule_CV08 from sqlfluff.rules.convention.CV09 import Rule_CV09 from sqlfluff.rules.convention.CV10 import Rule_CV10 from sqlfluff.rules.convention.CV11 import Rule_CV11 return [ Rule_CV01, Rule_CV02, Rule_CV03, Rule_CV04, Rule_CV05, Rule_CV06, Rule_CV07, Rule_CV08, Rule_CV09, Rule_CV10, Rule_CV11, ] sqlfluff-2.3.5/src/sqlfluff/rules/jinja/000077500000000000000000000000001451700765000202065ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/jinja/JJ01.py000066400000000000000000000173151451700765000212330ustar00rootroot00000000000000"""Implementation of Rule JJ01.""" from typing import List, Tuple from sqlfluff.core.parser.segments import BaseSegment, SourceFix from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.core.templaters import JinjaTemplater class Rule_JJ01(BaseRule): """Jinja tags should have a single whitespace on either side. This rule is only active if the ``jinja`` templater (or one of it's subclasses, like the ``dbt`` templater) are used for the current file. **Anti-pattern** Jinja tags with either no whitespace or very long whitespace are hard to read. .. code-block:: jinja :force: SELECT {{ a }} from {{ref('foo')}} **Best practice** A single whitespace surrounding Jinja tags, alternatively longer gaps containing newlines are acceptable. .. code-block:: jinja :force: SELECT {{ a }} from {{ ref('foo') }}; SELECT {{ a }} from {{ ref('foo') }}; """ name = "jinja.padding" aliases = ("L046",) groups = ("all", "core", "jinja") crawl_behaviour = RootOnlyCrawler() targets_templated = True is_fix_compatible = True @staticmethod def _get_whitespace_ends(s: str) -> Tuple[str, str, str, str, str]: """Remove tag ends and partition off any whitespace ends. This function assumes that we've already trimmed the string to just the tag, and will raise an AssertionError if not. >>> Rule_JJ01._get_whitespace_ends(' {{not_trimmed}} ') Traceback (most recent call last): ... AssertionError In essence it divides up a tag into the end tokens, any leading or trailing whitespace and the inner content >>> Rule_JJ01._get_whitespace_ends('{{ my_content }}') ('{{', ' ', 'my_content', ' ', '}}') It also works with block tags and more complicated content and end markers. >>> Rule_JJ01._get_whitespace_ends('{%+if a + b is True -%}') ('{%+', '', 'if a + b is True', ' ', '-%}') """ assert s[0] == "{" and s[-1] == "}" # Jinja tags all have a length of two. We can use slicing # to remove them easily. main = s[2:-2] pre = s[:2] post = s[-2:] # Optionally Jinja tags may also have plus of minus notation # https://jinja2docs.readthedocs.io/en/stable/templates.html#whitespace-control modifier_chars = ["+", "-"] if main and main[0] in modifier_chars: main = main[1:] pre = s[:3] if main and main[-1] in modifier_chars: main = main[:-1] post = s[-3:] inner = main.strip() pos = main.find(inner) return pre, main[:pos], inner, main[pos + len(inner) :], post @classmethod def _find_raw_at_src_idx(cls, segment: BaseSegment, src_idx: int): """Recursively search to find a raw segment for a position in the source. NOTE: This assumes it's not being called on a `raw`. In the case that there are multiple potential targets, we will find the first. """ assert segment.segments for seg in segment.segments: if not seg.pos_marker: # pragma: no cover continue src_slice = seg.pos_marker.source_slice # If it's before, skip onward. if src_slice.stop <= src_idx: continue # Is the current segment raw? if seg.is_raw(): return seg # Otherwise recurse return cls._find_raw_at_src_idx(seg, src_idx) def _eval(self, context: RuleContext) -> List[LintResult]: """Look for non-literal segments. NOTE: The existing crawlers don't filter very well for only templated code, and so we process the whole file from the root here. """ # If the position maker for the root segment is literal then there's # no templated code. So we can return early. assert context.segment.pos_marker if context.segment.pos_marker.is_literal(): return [] # We'll need the templated file. If for whatever reason it's # not present, abort. if not context.templated_file: # pragma: no cover return [] # We also only work with setups which use the jinja templater # or a derivative of that. Otherwise return empty. _templater = context.config.get("templater_obj") if not isinstance(_templater, JinjaTemplater): self.logger.debug(f"Detected non-jinja templater: {_templater}") return [] results = [] # Work through the templated slices for raw_slice in context.templated_file.raw_sliced: # We only want templated slices. if raw_slice.slice_type not in ("templated", "block_start", "block_end"): continue stripped = raw_slice.raw.strip() if not stripped or stripped[0] != "{" or stripped[-1] != "}": continue # pragma: no cover self.logger.debug( "Tag found @ source index %s: %r ", raw_slice.source_idx, stripped ) # Partition and Position src_idx = raw_slice.source_idx tag_pre, ws_pre, inner, ws_post, tag_post = self._get_whitespace_ends( stripped ) position = raw_slice.raw.find(stripped[0]) self.logger.debug( "Tag string segments: %r | %r | %r | %r | %r @ %s + %s", tag_pre, ws_pre, inner, ws_post, tag_post, src_idx, position, ) # For the following section, whitespace should be a single # whitespace OR it should contain a newline. pre_fix = None post_fix = None # Check the initial whitespace. if not ws_pre or (ws_pre != " " and "\n" not in ws_pre): pre_fix = " " # Check latter whitespace. if not ws_post or (ws_post != " " and "\n" not in ws_post): post_fix = " " # If no fixes, continue if pre_fix is None and post_fix is None: continue fixed = ( tag_pre + (pre_fix or ws_pre) + inner + (post_fix or ws_post) + tag_post ) # We need to identify a raw segment to attach to fix to. raw_seg = self._find_raw_at_src_idx(context.segment, src_idx) # If that raw segment already has fixes, don't apply it again. # We're likely on a second pass. if raw_seg.source_fixes: continue source_fixes = [ SourceFix( fixed, slice( src_idx + position, src_idx + position + len(stripped), ), # This position in the templated file is rough, but # close enough for sequencing. raw_seg.pos_marker.templated_slice, ) ] results.append( LintResult( anchor=raw_seg, description=f"Jinja tags should have a single " f"whitespace on either side: {stripped}", fixes=[ LintFix.replace( raw_seg, [raw_seg.edit(source_fixes=source_fixes)], ) ], ) ) return results sqlfluff-2.3.5/src/sqlfluff/rules/jinja/__init__.py000066400000000000000000000006311451700765000223170ustar00rootroot00000000000000"""The jinja rules plugin bundle.""" from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.jinja.JJ01 import Rule_JJ01 return [Rule_JJ01] sqlfluff-2.3.5/src/sqlfluff/rules/layout/000077500000000000000000000000001451700765000204305ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT01.py000066400000000000000000000044531451700765000214700ustar00rootroot00000000000000"""Implementation of Rule LT01.""" from typing import List, Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT01(BaseRule): """Inappropriate Spacing. This rule checks for an enforces the spacing as configured in :ref:`layoutconfig`. This includes excessive whitespace, trailing whitespace at the end of a line and also the wrong spacing between elements on the line. Because of this wide reach you may find that you wish to add specific configuration in your project to tweak how specific elements are treated. Rather than configuration on this specific rule, use the `sqlfluff.layout` section of your configuration file to customise how this rule operates. The ``•`` character represents a space in the examples below. **Anti-pattern** .. code-block:: sql :force: SELECT a, b(c) as d•• FROM foo•••• JOIN bar USING(a) **Best practice** * Unless an indent or preceding a comment, whitespace should be a single space. * There should also be no trailing whitespace at the ends of lines. * There should be a space after :code:`USING` so that it's not confused for a function. .. code-block:: sql SELECT a, b(c) as d FROM foo JOIN bar USING (a) """ name = "layout.spacing" # NOTE: This rule combines the following legacy rules: # - L001: Trailing Whitespace # - L005 & L008: Space around commas # - L006: Space around operators # - L023: Space after AS in WITH clause # - L024: Space immediately after USING # - L039: Unnecessary Whitespace # - L048: Spacing around quoted literals # - L071: Spacing around brackets aliases = ("L001", "L005", "L006", "L008", "L023", "L024", "L039", "L048", "L071") groups = ("all", "core", "layout") crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[List[LintResult]]: """Unnecessary whitespace.""" sequence = ReflowSequence.from_root(context.segment, config=context.config) return sequence.respace().get_results() sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT02.py000066400000000000000000000034611451700765000214670ustar00rootroot00000000000000"""Implementation of Rule LT02.""" from typing import List from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT02(BaseRule): """Incorrect Indentation. **Anti-pattern** The ``•`` character represents a space and the ``→`` character represents a tab. In this example, the third line contains five spaces instead of four and the second line contains two spaces and one tab. .. code-block:: sql :force: SELECT ••→a, •••••b FROM foo **Best practice** Change the indentation to use a multiple of four spaces. This example also assumes that the ``indent_unit`` config value is set to ``space``. If it had instead been set to ``tab``, then the indents would be tabs instead. .. code-block:: sql :force: SELECT ••••a, ••••b FROM foo """ name = "layout.indent" # NOTE: We're combining three legacy rules here into one. aliases = ("L002", "L003", "L004") groups = ("all", "core", "layout") crawl_behaviour = RootOnlyCrawler() is_fix_compatible = True targets_templated = True template_safe_fixes = True _adjust_anchors = True def _eval(self, context: RuleContext) -> List[LintResult]: """Indentation not consistent with previous lines. To set the default tab size, set the `tab_space_size` value in the appropriate configuration. To correct indents to tabs use the `indent_unit` value set to `tab`. """ return ( ReflowSequence.from_root(context.segment, context.config) .reindent() .get_results() ) sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT03.py000066400000000000000000000127071451700765000214730ustar00rootroot00000000000000"""Implementation of Rule LT03.""" from typing import List, Sequence from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.reflow import ReflowSequence class Rule_LT03(BaseRule): """Operators should follow a standard for being before/after newlines. The configuration for whether operators should be ``trailing`` or ``leading`` is part of :ref:`layoutconfig`. The default configuration is: .. code-block:: cfg [sqlfluff:layout:type:binary_operator] line_position = leading [sqlfluff:layout:type:comparison_operator] line_position = leading **Anti-pattern** In this example, if ``line_position = leading`` (or unspecified, as is the default), then the operator ``+`` should not be at the end of the second line. .. code-block:: sql SELECT a + b FROM foo **Best practice** If ``line_position = leading`` (or unspecified, as this is the default), place the operator after the newline. .. code-block:: sql SELECT a + b FROM foo If ``line_position = trailing``, place the operator before the newline. .. code-block:: sql SELECT a + b FROM foo """ name = "layout.operators" aliases = ("L007",) groups = ("all", "layout") crawl_behaviour = SegmentSeekerCrawler({"binary_operator", "comparison_operator"}) is_fix_compatible = True def _seek_newline( self, segments: Sequence[BaseSegment], idx: int, dir: int ) -> bool: """Seek in a direction, looking for newlines. Args: segments: A sequence of segments to seek within. idx: The index of the "current" segment. dir: The direction to seek in (+1 for forward, -1 for backward) """ assert dir in (1, -1) for segment in segments[idx + dir :: dir]: if segment.is_type("newline"): # It's definitely leading. No problems. self.logger.debug( "Shortcut (dir = %s) OK. Found newline: %s", dir, segment ) return True elif not segment.is_type("whitespace", "indent", "comment"): # We found something before it which suggests it's not leading. # We should run the full reflow routine to check. break return False def _check_trail_lead_shortcut( self, segment: BaseSegment, parent: BaseSegment, line_position: str ) -> bool: """Check to see whether we should pass the rule and shortcut. Args: segment: The target segment. parent: The parent segment (must contain `segment`). line_position: The `line_position` config for the segment. """ idx = parent.segments.index(segment) # Shortcut #1: Leading. if line_position == "leading": if self._seek_newline(parent.segments, idx, dir=-1): return True # If we didn't find a newline before, if there's _also_ not a newline # after, then we can also shortcut. i.e. it's a comma "mid line". if not self._seek_newline(parent.segments, idx, dir=1): return True # Shortcut #2: Trailing. elif line_position == "trailing": if self._seek_newline(parent.segments, idx, dir=1): return True # If we didn't find a newline after, if there's _also_ not a newline # before, then we can also shortcut. i.e. it's a comma "mid line". if not self._seek_newline(parent.segments, idx, dir=-1): return True return False def _eval(self, context: RuleContext) -> List[LintResult]: """Operators should follow a standard for being before/after newlines. For the fixing routines we delegate to the reflow utils. However for performance reasons we have some initial shortcuts to quickly identify situations which are _ok_ to avoid the overhead of the full reflow path. """ # NOTE: These shortcuts assume that any newlines will be direct # siblings of the operator in question. This isn't _always_ the case # but is true often enough to have meaningful upside from early # detection. if context.segment.is_type("comparison_operator"): comparison_positioning = context.config.get( "line_position", ["layout", "type", "comparison_operator"] ) if self._check_trail_lead_shortcut( context.segment, context.parent_stack[-1], comparison_positioning ): return [LintResult()] elif context.segment.is_type("binary_operator"): binary_positioning = context.config.get( "line_position", ["layout", "type", "binary_operator"] ) if self._check_trail_lead_shortcut( context.segment, context.parent_stack[-1], binary_positioning ): return [LintResult()] return ( ReflowSequence.from_around_target( context.segment, root_segment=context.parent_stack[0], config=context.config, ) .rebreak() .get_results() ) sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT04.py000066400000000000000000000050661451700765000214740ustar00rootroot00000000000000"""Implementation of Rule LT04.""" from typing import List from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.rules.layout.LT03 import Rule_LT03 from sqlfluff.utils.reflow import ReflowSequence class Rule_LT04(Rule_LT03): """Leading/Trailing comma enforcement. The configuration for whether operators should be ``trailing`` or ``leading`` is part of :ref:`layoutconfig`. The default configuration is: .. code-block:: cfg [sqlfluff:layout:type:comma] line_position = trailing **Anti-pattern** There is a mixture of leading and trailing commas. .. code-block:: sql SELECT a , b, c FROM foo **Best practice** By default, `SQLFluff` prefers trailing commas. However it is configurable for leading commas. The chosen style must be used consistently throughout your SQL. .. code-block:: sql SELECT a, b, c FROM foo -- Alternatively, set the configuration file to 'leading' -- and then the following would be acceptable: SELECT a , b , c FROM foo """ name = "layout.commas" aliases = ("L019",) groups = ("all", "layout") crawl_behaviour = SegmentSeekerCrawler({"comma"}) _adjust_anchors = True is_fix_compatible = True def _eval(self, context: RuleContext) -> List[LintResult]: """Enforce comma placement. For the fixing routines we delegate to the reflow utils. However for performance reasons we have some initial shortcuts to quickly identify situations which are _ok_ to avoid the overhead of the full reflow path. """ comma_positioning = context.config.get( "line_position", ["layout", "type", "comma"] ) # NOTE: These shortcuts assume that any newlines will be direct # siblings of the comma in question. This isn't _always_ the case # but is true often enough to have meaningful upside from early # detection. if self._check_trail_lead_shortcut( context.segment, context.parent_stack[-1], comma_positioning ): return [LintResult()] return ( ReflowSequence.from_around_target( context.segment, root_segment=context.parent_stack[0], config=context.config, ) .rebreak() .get_results() ) sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT05.py000066400000000000000000000137141451700765000214740ustar00rootroot00000000000000"""Implementation of Rule LT05.""" from typing import List, cast from sqlfluff.core.parser.segments import TemplateSegment from sqlfluff.core.rules import LintResult, RuleContext from sqlfluff.core.rules.base import BaseRule from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT05(BaseRule): """Line is too long.""" name = "layout.long_lines" aliases = ("L016",) groups = ("all", "core", "layout") crawl_behaviour = RootOnlyCrawler() targets_templated = True template_safe_fixes = True _adjust_anchors = True _check_docstring = False is_fix_compatible = True config_keywords = [ "ignore_comment_lines", "ignore_comment_clauses", ] def _eval(self, context: RuleContext) -> List[LintResult]: """Line is too long.""" self.ignore_comment_lines: bool self.ignore_comment_clauses: bool # Reflow and generate fixes. results = ( ReflowSequence.from_root(context.segment, context.config) .break_long_lines() .get_results() ) # Ignore any comment line if appropriate. if self.ignore_comment_lines: raw_segments = context.segment.raw_segments for res in results[:]: # First handle the easy case that the anchor (i.e. the start # of the line is a comment). assert res.anchor assert res.anchor.pos_marker if res.anchor.is_type("comment"): self.logger.debug( "Purging result on long line starting with comment: %s", res.anchor.pos_marker.working_line_no, ) results.remove(res) continue # Then look for comments on the rest of the line: assert res.anchor.pos_marker raw_idx = raw_segments.index(res.anchor) for seg in raw_segments[raw_idx:]: if ( seg.pos_marker.working_line_no != res.anchor.pos_marker.working_line_no ): # We've gone past the end of the line. Stop looking. break # pragma: no cover # Is it a comment? if seg.is_type("comment"): self.logger.debug( "Purging result on long line containing comment: %s", res.anchor.pos_marker.working_line_no, ) results.remove(res) break # Is it a template comment? elif ( seg.is_type("placeholder") and cast(TemplateSegment, seg).block_type == "comment" ): self.logger.debug( "Purging result with template comment line: %s", res.anchor.pos_marker.working_line_no, ) results.remove(res) break # Ignore any comment clauses if present. if self.ignore_comment_clauses: raw_segments = context.segment.raw_segments for res in results[:]: # The anchor should be the first raw on the line. Work forward # until we're not on the line. Check if any have a parent which # is a comment_clause. assert res.anchor assert res.anchor.pos_marker raw_idx = raw_segments.index(res.anchor) for seg in raw_segments[raw_idx:]: if ( seg.pos_marker.working_line_no != res.anchor.pos_marker.working_line_no ): # We've gone past the end of the line. Stop looking. break # Look to see if any are in comment clauses for ps in context.segment.path_to(seg): if ps.segment.is_type( "comment_clause", "comment_equals_clause" ): # It IS! Ok, purge this result from results, unless # the line is already too long without the comment. # We'll know that based on the line position of # the comment. # We can fairly confidently assert that the segment # will have a position marker at this stage. assert ps.segment.pos_marker line_pos = ps.segment.pos_marker.working_line_pos if line_pos < context.config.get("max_line_length"): # OK purge it. self.logger.debug( "Purging result on long line with comment " "clause: %s", res.anchor.pos_marker.working_line_no, ) results.remove(res) break self.logger.debug( "Keeping result on long line with comment clause. " "Still too long without comment: %s", res.anchor.pos_marker.working_line_no, ) # If we finish the loop without breaking, we didn't find a # comment. Keep looking. else: continue # If we did finish with a break, we should break the outer # loop too. break return results sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT06.py000066400000000000000000000045531451700765000214760ustar00rootroot00000000000000"""Implementation of Rule LT06.""" from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_LT06(BaseRule): """Function name not immediately followed by parenthesis. **Anti-pattern** In this example, there is a space between the function and the parenthesis. .. code-block:: sql SELECT sum (a) FROM foo **Best practice** Remove the space between the function and the parenthesis. .. code-block:: sql SELECT sum(a) FROM foo """ name = "layout.functions" aliases = ("L017",) groups = ("all", "core", "layout") crawl_behaviour = SegmentSeekerCrawler({"function"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> LintResult: """Function name not immediately followed by bracket. Look for Function Segment with anything other than the function name before brackets NOTE: This hasn't been combined with LT01 because it has some special treatment for comments. That might be something we revisit at a later point if duplicate errors become problematic. """ segment = FunctionalContext(context).segment # We only trigger on start_bracket (open parenthesis) assert segment.all(sp.is_type("function")) children = segment.children() function_name = children.first(sp.is_type("function_name"))[0] start_bracket = children.first(sp.is_type("bracketed"))[0] intermediate_segments = children.select( start_seg=function_name, stop_seg=start_bracket ) if intermediate_segments: # It's only safe to fix if there is only whitespace # or newlines in the intervening section. if intermediate_segments.all(sp.is_type("whitespace", "newline")): return LintResult( anchor=intermediate_segments[0], fixes=[LintFix.delete(seg) for seg in intermediate_segments], ) else: # It's not all whitespace, just report the error. return LintResult( anchor=intermediate_segments[0], ) return LintResult() sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT07.py000066400000000000000000000103041451700765000214660ustar00rootroot00000000000000"""Implementation of Rule LT07.""" from typing import Optional, Set, cast from sqlfluff.core.parser import NewlineSegment, RawSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_LT07(BaseRule): """``WITH`` clause closing bracket should be on a new line. **Anti-pattern** In this example, the closing bracket is on the same line as CTE. .. code-block:: sql :force: WITH zoo AS ( SELECT a FROM foo) SELECT * FROM zoo **Best practice** Move the closing bracket on a new line. .. code-block:: sql WITH zoo AS ( SELECT a FROM foo ) SELECT * FROM zoo """ name = "layout.cte_bracket" aliases = ("L018",) groups = ("all", "core", "layout") crawl_behaviour = SegmentSeekerCrawler( {"with_compound_statement"}, provide_raw_stack=True ) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """WITH clause closing bracket should be aligned with WITH keyword. Look for a with clause and evaluate the position of closing brackets. """ # We only trigger on start_bracket (open parenthesis) assert context.segment.is_type("with_compound_statement") # Find the end brackets for the CTE *query* (i.e. ignore optional # list of CTE columns). cte_end_brackets: Set[RawSegment] = set() for cte in ( FunctionalContext(context) .segment.children(sp.is_type("common_table_expression")) .iterate_segments() ): cte_start_bracket = ( cte.children() .last(sp.is_type("bracketed")) .children() .first(sp.is_type("start_bracket")) ) cte_end_bracket = ( cte.children() .last(sp.is_type("bracketed")) .children() .last(sp.is_type("end_bracket")) ) if cte_start_bracket and cte_end_bracket: self.logger.debug( "Found CTE with brackets: %s & %s", cte_start_bracket, cte_end_bracket, ) # Are they on the same line? # NOTE: This assertion should be fairly safe because # there aren't many reasons for an bracket to not yet # be positioned. assert cte_start_bracket[0].pos_marker assert cte_end_bracket[0].pos_marker if ( cte_start_bracket[0].pos_marker.line_no == cte_end_bracket[0].pos_marker.line_no ): # Same line self.logger.debug("Skipping because on same line.") continue # Otherwise add to the ones to check. cte_end_brackets.add(cast(RawSegment, cte_end_bracket[0])) for seg in cte_end_brackets: contains_non_whitespace = False idx = context.segment.raw_segments.index(seg) self.logger.debug("End bracket %s has idx %s", seg, idx) # Search backward through the raw segments from just before # the location of the bracket. for elem in context.segment.raw_segments[idx - 1 :: -1]: if elem.is_type("newline"): break elif not elem.is_type("indent", "whitespace"): self.logger.debug("Found non-whitespace: %s", elem) contains_non_whitespace = True break if contains_non_whitespace: # We have to move it to a newline return LintResult( anchor=seg, fixes=[ LintFix.create_before( seg, [ NewlineSegment(), ], ) ], ) return None sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT08.py000066400000000000000000000201271451700765000214730ustar00rootroot00000000000000"""Implementation of Rule LT08.""" from typing import List, Optional from sqlfluff.core.parser import NewlineSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_LT08(BaseRule): """Blank line expected but not found after CTE closing bracket. **Anti-pattern** There is no blank line after the CTE closing bracket. In queries with many CTEs, this hinders readability. .. code-block:: sql WITH plop AS ( SELECT * FROM foo ) SELECT a FROM plop **Best practice** Add a blank line. .. code-block:: sql WITH plop AS ( SELECT * FROM foo ) SELECT a FROM plop """ name = "layout.cte_newline" aliases = ("L022",) groups = ("all", "core", "layout") crawl_behaviour = SegmentSeekerCrawler({"with_compound_statement"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[List[LintResult]]: """Blank line expected but not found after CTE definition.""" error_buffer = [] global_comma_style = context.config.get( "line_position", ["layout", "type", "comma"] ) assert context.segment.is_type("with_compound_statement") # First we need to find all the commas, the end brackets, the # things that come after that and the blank lines in between. # Find all the closing brackets. They are our anchor points. bracket_indices = [] expanded_segments = list( context.segment.iter_segments(expanding=["common_table_expression"]) ) for idx, seg in enumerate(expanded_segments): if seg.is_type("bracketed"): bracket_indices.append(idx) # Work through each point and deal with it individually for bracket_idx in bracket_indices: forward_slice = expanded_segments[bracket_idx:] seg_idx = 1 line_idx = 0 comma_seg_idx = 0 blank_lines = 0 comma_line_idx = None line_blank = False comma_style: str line_starts = {} comment_lines = [] self.logger.info( "## CTE closing bracket found at %s, idx: %s. Forward slice: %.20r", forward_slice[0].pos_marker, bracket_idx, "".join(elem.raw for elem in forward_slice), ) # Work forward to map out the following segments. while ( forward_slice[seg_idx].is_type("comma") or not forward_slice[seg_idx].is_code ): if forward_slice[seg_idx].is_type("newline"): if line_blank: # It's a blank line! blank_lines += 1 line_blank = True line_idx += 1 line_starts[line_idx] = seg_idx + 1 elif forward_slice[seg_idx].is_type("comment"): # Lines with comments aren't blank line_blank = False comment_lines.append(line_idx) elif forward_slice[seg_idx].is_type("comma"): # Keep track of where the comma is. # We'll evaluate it later. comma_line_idx = line_idx comma_seg_idx = seg_idx seg_idx += 1 # Infer the comma style (NB this could be different for each case!) if comma_line_idx is None: comma_style = "final" elif line_idx == 0: comma_style = "oneline" elif comma_line_idx == 0: comma_style = "trailing" elif comma_line_idx == line_idx: comma_style = "leading" else: comma_style = "floating" # Readout of findings self.logger.info( "blank_lines: %s, comma_line_idx: %s. final_line_idx: %s, " "final_seg_idx: %s", blank_lines, comma_line_idx, line_idx, seg_idx, ) self.logger.info( "comma_style: %r, line_starts: %r, comment_lines: %r", comma_style, line_starts, comment_lines, ) # If we've got blank lines. We're good. if blank_lines >= 1: continue # We've got an issue self.logger.info("!! Found CTE without enough blank lines.") # Based on the current location of the comma we insert newlines # to correct the issue. # First handle the potential simple case of a current one line fix_type = "create_before" # In most cases we just insert newlines. if comma_style == "oneline": # Here we respect the target comma style to insert at the # relevant point. if global_comma_style == "trailing": # Add a blank line after the comma fix_point = forward_slice[comma_seg_idx + 1] # Optionally here, if the segment we've landed on is # whitespace then we REPLACE it rather than inserting. if forward_slice[comma_seg_idx + 1].is_type("whitespace"): fix_type = "replace" elif global_comma_style == "leading": # Add a blank line before the comma fix_point = forward_slice[comma_seg_idx] else: # pragma: no cover raise NotImplementedError( f"Unexpected global comma style {global_comma_style!r}" ) # In both cases it's a double newline. num_newlines = 2 else: # In the following cases we only care which one we're in # when comments don't get in the way. If they *do*, then # we just work around them. if not comment_lines or line_idx - 1 not in comment_lines: self.logger.info("Comment routines not applicable") if comma_style in ("trailing", "final", "floating"): # Detected an existing trailing comma or it's a final # CTE, OR the comma isn't leading or trailing. # If the preceding segment is whitespace, replace it if forward_slice[seg_idx - 1].is_type("whitespace"): fix_point = forward_slice[seg_idx - 1] fix_type = "replace" else: # Otherwise add a single newline before the end # content. fix_point = forward_slice[seg_idx] elif comma_style == "leading": # Detected an existing leading comma. fix_point = forward_slice[comma_seg_idx] else: self.logger.info("Handling preceding comments") offset = 1 while line_idx - offset in comment_lines: offset += 1 # If the offset - 1 equals the line_idx then there aren't # really any comment-only lines (ref #2945). # Reset to line_idx fix_point = forward_slice[ line_starts[line_idx - (offset - 1) or line_idx] ] num_newlines = 1 fixes = [ LintFix( fix_type, fix_point, [NewlineSegment()] * num_newlines, ) ] # Create a result, anchored on the start of the next content. error_buffer.append(LintResult(anchor=forward_slice[seg_idx], fixes=fixes)) # Return the buffer if we have one. return error_buffer or None sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT09.py000066400000000000000000000410451451700765000214760ustar00rootroot00000000000000"""Implementation of Rule LT09.""" from typing import List, NamedTuple, Optional, Sequence from sqlfluff.core.parser import BaseSegment, NewlineSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class SelectTargetsInfo(NamedTuple): """Info about select targets and nearby whitespace.""" select_idx: int first_new_line_idx: int first_select_target_idx: int first_whitespace_idx: int comment_after_select_idx: int select_targets: Sequence[BaseSegment] from_segment: Optional[BaseSegment] pre_from_whitespace: List[BaseSegment] class Rule_LT09(BaseRule): """Select targets should be on a new line unless there is only one select target. .. note:: By default, a wildcard (e.g. ``SELECT *``) is considered a single select target. If you want it to be treated as multiple select targets, configure ``wildcard_policy = multiple``. **Anti-pattern** Multiple select targets on the same line. .. code-block:: sql select a, b from foo; -- Single select target on its own line. SELECT a FROM foo; **Best practice** Multiple select targets each on their own line. .. code-block:: sql select a, b from foo; -- Single select target on the same line as the ``SELECT`` -- keyword. SELECT a FROM foo; -- When select targets span multiple lines, however they -- can still be on a new line. SELECT SUM( 1 + SUM( 2 + 3 ) ) AS col FROM test_table; """ name = "layout.select_targets" aliases = ("L036",) groups = ("all", "layout") config_keywords = ["wildcard_policy"] crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: self.wildcard_policy: str assert context.segment.is_type("select_clause") select_targets_info = self._get_indexes(context) select_clause = FunctionalContext(context).segment wildcards = select_clause.children( sp.is_type("select_clause_element") ).children(sp.is_type("wildcard_expression")) has_wildcard = bool(wildcards) if len(select_targets_info.select_targets) == 1 and ( not has_wildcard or self.wildcard_policy == "single" ): return self._eval_single_select_target_element( select_targets_info, context, ) elif len(select_targets_info.select_targets): return self._eval_multiple_select_target_elements( select_targets_info, context.segment ) return None @staticmethod def _get_indexes(context: RuleContext) -> SelectTargetsInfo: children = FunctionalContext(context).segment.children() select_targets = children.select(sp.is_type("select_clause_element")) first_select_target_idx = children.find(select_targets.get()) selects = children.select(sp.is_keyword("select")) select_idx = children.find(selects.get()) if selects else -1 newlines = children.select(sp.is_type("newline")) first_new_line_idx = children.find(newlines.get()) if newlines else -1 comment_after_select_idx = -1 if newlines: comment_after_select = children.select( sp.is_type("comment"), start_seg=selects.get(), stop_seg=newlines.get(), loop_while=sp.or_( sp.is_type("comment"), sp.is_type("whitespace"), sp.is_meta() ), ) if comment_after_select: comment_after_select_idx = ( children.find(comment_after_select.get()) if comment_after_select else -1 ) first_whitespace_idx = -1 if first_new_line_idx != -1: # TRICKY: Ignore whitespace prior to the first newline, e.g. if # the line with "SELECT" (before any select targets) has trailing # whitespace. segments_after_first_line = children.select( sp.is_type("whitespace"), start_seg=children[first_new_line_idx] ) first_whitespace_idx = children.find(segments_after_first_line.get()) siblings_post = FunctionalContext(context).siblings_post from_segment = siblings_post.first(sp.is_type("from_clause")).first().get() pre_from_whitespace = siblings_post.select( sp.is_type("whitespace"), stop_seg=from_segment ) return SelectTargetsInfo( select_idx, first_new_line_idx, first_select_target_idx, first_whitespace_idx, comment_after_select_idx, select_targets, from_segment, list(pre_from_whitespace), ) def _eval_multiple_select_target_elements( self, select_targets_info, segment ) -> Optional[LintResult]: """Multiple select targets. Ensure each is on a separate line.""" fixes = [] previous_code = None select_clause_raws = Segments(segment).raw_segments for i, select_target in enumerate(select_targets_info.select_targets): assert select_target.pos_marker target_start_line = select_target.pos_marker.working_line_no target_initial_code = ( Segments(select_target).raw_segments.first(sp.is_code()).get() ) assert target_initial_code previous_code = ( select_clause_raws.select( # Get the first code that isn't a comma. select_if=sp.and_(sp.is_code(), sp.not_(sp.raw_is(","))), start_seg=previous_code, stop_seg=target_initial_code, ) .last() .get() ) assert previous_code assert previous_code.pos_marker previous_end_line = previous_code.pos_marker.working_line_no self.logger.debug( "- Evaluating %s [%s, %s]: Prev ends with: %s", select_target, previous_end_line, target_start_line, previous_code, ) # Check whether this target *starts* on the same line that the # previous one *ends* on. If they are on the same line, insert a newline. if target_start_line == previous_end_line: # Find and delete any whitespace before the select target. start_seg = select_targets_info.select_idx # If any select modifier (e.g. distinct ) is present, start # there rather than at the beginning. modifier = segment.get_child("select_clause_modifier") if modifier: start_seg = segment.segments.index(modifier) ws_to_delete = segment.select_children( start_seg=segment.segments[start_seg] if not i else select_targets_info.select_targets[i - 1], select_if=lambda s: s.is_type("whitespace"), loop_while=lambda s: s.is_type("whitespace", "comma") or s.is_meta, ) fixes += [LintFix.delete(ws) for ws in ws_to_delete] fixes.append(LintFix.create_before(select_target, [NewlineSegment()])) # If we are at the last select target check if the FROM clause # is on the same line, and if so move it to its own line. if select_targets_info.from_segment: if (i + 1 == len(select_targets_info.select_targets)) and ( select_target.pos_marker.working_line_no == select_targets_info.from_segment.pos_marker.working_line_no ): fixes.extend( [ LintFix.delete(ws) for ws in select_targets_info.pre_from_whitespace ] ) fixes.append( LintFix.create_before( select_targets_info.from_segment, [NewlineSegment()], ) ) if fixes: return LintResult(anchor=segment, fixes=fixes) return None def _eval_single_select_target_element( self, select_targets_info, context: RuleContext ): select_clause = FunctionalContext(context).segment parent_stack = context.parent_stack target_idx = select_targets_info.first_select_target_idx select_children = select_clause.children() target_seg = select_children[target_idx] # If it's all on one line, then there's no issue. if not ( select_targets_info.select_idx < select_targets_info.first_new_line_idx < target_idx ): self.logger.info( "Target at index %s is already on a single line.", target_idx, ) return None # Does the target contain a newline? # i.e. even if it's a single element, does it already span more than # one line? if "newline" in target_seg.descendant_type_set: self.logger.info( "Target at index %s spans multiple lines so ignoring.", target_idx, ) return None if select_targets_info.comment_after_select_idx != -1: # The SELECT is followed by a comment on the same line. In order # to autofix this, we'd need to move the select target between # SELECT and the comment and potentially delete the entire line # where the select target was (if it is now empty). This is # *fairly tricky and complex*, in part because the newline on # the select target's line is several levels higher in the # parser tree. Hence, we currently don't autofix this. Could be # autofixed in the future if/when we have the time. return LintResult(anchor=select_clause.get()) # Prepare the select clause which will be inserted insert_buff = [WhitespaceSegment(), target_seg] # Delete the first select target from its original location. # We'll add it to the right section at the end, once we know # what to add. initial_deletes = [target_seg] # If there's whitespace before it, delete that too. if select_children[target_idx - 1].is_type("whitespace"): initial_deletes.append(select_children[target_idx - 1]) # Do we have a modifier? modifier: Optional[Segments] modifier = select_children.first(sp.is_type("select_clause_modifier")) if ( # Check if the modifier is one we care about modifier # We only care if it's not already on the first line. and select_children.index(modifier.get()) >= select_targets_info.first_new_line_idx ): # Prepend it to the insert buffer insert_buff = [WhitespaceSegment(), modifier[0]] + insert_buff modifier_idx = select_children.index(modifier.get()) # Delete the whitespace after it (which is two after, thanks to indent) if ( len(select_children) > modifier_idx + 1 and select_children[modifier_idx + 2].is_whitespace ): initial_deletes.append(select_children[modifier_idx + 2]) # Delete the modifier itself initial_deletes.append(modifier[0]) # Set the position marker for removing the preceding # whitespace and newline, which we'll use below. start_idx = modifier_idx start_seg = modifier[0] else: # Set the position marker for removing the preceding # whitespace and newline, which we'll use below. start_idx = target_idx start_seg = select_children[select_targets_info.first_new_line_idx] fixes = [ # Insert the select_clause in place of the first newline in the # Select statement LintFix.replace( select_children[select_targets_info.first_new_line_idx], insert_buff, ), # Materialise any deletes so far... *(LintFix.delete(seg) for seg in initial_deletes), ] if parent_stack and parent_stack[-1].is_type("select_statement"): select_stmt = parent_stack[-1] select_clause_idx = select_stmt.segments.index(select_clause.get()) after_select_clause_idx = select_clause_idx + 1 if len(select_stmt.segments) > after_select_clause_idx: add_newline = True to_delete: Sequence[BaseSegment] = [target_seg] next_segment = select_stmt.segments[after_select_clause_idx] if next_segment.is_type("newline"): # Since we're deleting the newline, we should also delete all # whitespace before it or it will add random whitespace to # following statements. So walk back through the segment # deleting whitespace until you get the previous newline, or # something else. to_delete = select_children.reversed().select( loop_while=sp.is_type("whitespace"), start_seg=select_children[start_idx], ) if to_delete: # The select_clause is immediately followed by a # newline. Delete the newline in order to avoid leaving # behind an empty line after fix, *unless* we stopped # due to something other than a newline. delete_last_newline = select_children[ start_idx - len(to_delete) - 1 ].is_type("newline") # Delete the newline if we decided to. if delete_last_newline: fixes.append(LintFix.delete(next_segment)) elif next_segment.is_type("whitespace"): # The select_clause has stuff after (most likely a comment) # Delete the whitespace immediately after the select clause # so the other stuff aligns nicely based on where the select # clause started. fixes.append(LintFix.delete(next_segment)) if to_delete: # Clean up by moving leftover select_clause segments. # Context: Some of the other fixes we make in # _eval_single_select_target_element() leave leftover # child segments that need to be moved to become # *siblings* of the select_clause. move_after_select_clause = select_children.select( start_seg=start_seg, stop_seg=to_delete[-1], ) # :TRICKY: Below, we have a couple places where we # filter to guard against deleting the same segment # multiple times -- this is illegal. all_deletes = set( fix.anchor for fix in fixes if fix.edit_type == "delete" ) for seg in (*to_delete, *move_after_select_clause): if seg not in all_deletes: fixes.append(LintFix.delete(seg)) all_deletes.add(seg) if move_after_select_clause or add_newline: fixes.append( LintFix.create_after( select_clause[0], ([NewlineSegment()] if add_newline else []) + list(move_after_select_clause), ) ) return LintResult( anchor=select_clause.get(), fixes=fixes, ) sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT10.py000066400000000000000000000115661451700765000214730ustar00rootroot00000000000000"""Implementation of Rule LT10.""" from typing import Optional from sqlfluff.core.parser import NewlineSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_LT10(BaseRule): """``SELECT`` modifiers (e.g. ``DISTINCT``) must be on the same line as ``SELECT``. **Anti-pattern** .. code-block:: sql select distinct a, b from x **Best practice** .. code-block:: sql select distinct a, b from x """ name = "layout.select_modifiers" aliases = ("L041",) groups = ("all", "core", "layout") crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Select clause modifiers must appear on same line as SELECT.""" # We only care about select_clause. assert context.segment.is_type("select_clause") # Get children of select_clause and the corresponding select keyword. child_segments = FunctionalContext(context).segment.children() select_keyword = child_segments[0] # See if we have a select_clause_modifier. select_clause_modifier_seg = child_segments.first( sp.is_type("select_clause_modifier") ) # Rule doesn't apply if there's no select clause modifier. if not select_clause_modifier_seg: return None select_clause_modifier = select_clause_modifier_seg[0] # Are there any newlines between the select keyword # and the select clause modifier. leading_newline_segments = child_segments.select( select_if=sp.is_type("newline"), loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()), start_seg=select_keyword, ) # Rule doesn't apply if select clause modifier # is already on the same line as the select keyword. if not leading_newline_segments: return None # We should check if there is whitespace before the select clause modifier # and remove this during the lint fix. leading_whitespace_segments = child_segments.select( select_if=sp.is_type("whitespace"), loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()), start_seg=select_keyword, ) # We should also check if the following select clause element # is on the same line as the select clause modifier. trailing_newline_segments = child_segments.select( select_if=sp.is_type("newline"), loop_while=sp.or_(sp.is_whitespace(), sp.is_meta()), start_seg=select_clause_modifier, ) # We will insert these segments directly after the select keyword. edit_segments = [ WhitespaceSegment(), select_clause_modifier, ] if not trailing_newline_segments: # if the first select clause element is on the same line # as the select clause modifier then also insert a newline. edit_segments.append(NewlineSegment()) fixes = [] # Move select clause modifier after select keyword. fixes.append( LintFix.create_after( anchor_segment=select_keyword, edit_segments=edit_segments, ) ) # Delete original newlines and whitespace between select keyword # and select clause modifier. # If there is not a newline after the select clause modifier then delete # newlines between the select keyword and select clause modifier. if not trailing_newline_segments: fixes.extend(LintFix.delete(s) for s in leading_newline_segments) # If there is a newline after the select clause modifier then delete both the # newlines and whitespace between the select keyword and select clause modifier. else: fixes.extend( LintFix.delete(s) for s in leading_newline_segments + leading_whitespace_segments ) # Delete the original select clause modifier. fixes.append(LintFix.delete(select_clause_modifier)) # If there is whitespace (on the same line) after the select clause modifier # then also delete this. trailing_whitespace_segments = child_segments.select( select_if=sp.is_whitespace(), loop_while=sp.or_(sp.is_type("whitespace"), sp.is_meta()), start_seg=select_clause_modifier, ) if trailing_whitespace_segments: fixes.extend((LintFix.delete(s) for s in trailing_whitespace_segments)) return LintResult( anchor=context.segment, fixes=fixes, ) sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT11.py000066400000000000000000000026201451700765000214630ustar00rootroot00000000000000"""Implementation of Rule LT11.""" from typing import List from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_LT11(BaseRule): """Set operators should be surrounded by newlines. **Anti-pattern** In this example, `UNION ALL` is not on a line itself. .. code-block:: sql SELECT 'a' AS col UNION ALL SELECT 'b' AS col **Best practice** .. code-block:: sql SELECT 'a' AS col UNION ALL SELECT 'b' AS col """ name = "layout.set_operators" aliases = ("L065",) groups = ("all", "core", "layout") is_fix_compatible = True crawl_behaviour = SegmentSeekerCrawler({"set_operator"}) def _eval(self, context: RuleContext) -> List[LintResult]: """Set operators should be surrounded by newlines. For any set operator we check if there is any NewLineSegment in the non-code segments preceding or following it. In particular, as part of this rule we allow multiple NewLineSegments. """ return ( ReflowSequence.from_around_target( context.segment, root_segment=context.parent_stack[0], config=context.config, ) .rebreak() .get_results() ) sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT12.py000066400000000000000000000121531451700765000214660ustar00rootroot00000000000000"""Implementation of Rule LT12.""" from typing import List, Optional, Tuple from sqlfluff.core.parser import BaseSegment, NewlineSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp, tsp def get_trailing_newlines(segment: BaseSegment) -> List[BaseSegment]: """Returns list of trailing newlines in the tree.""" result = [] for seg in segment.recursive_crawl_all(reverse=True): if seg.is_type("newline"): result.append(seg) if not seg.is_whitespace and not seg.is_type("dedent", "end_of_file"): break return result def get_last_segment(segment: Segments) -> Tuple[List[BaseSegment], Segments]: """Returns rightmost & lowest descendant and its "parent stack".""" parent_stack: List[BaseSegment] = [] while True: children = segment.children() if children: parent_stack.append(segment[0]) segment = children.last(predicate=sp.not_(sp.is_type("end_of_file"))) else: return parent_stack, segment class Rule_LT12(BaseRule): """Files must end with a single trailing newline. **Anti-pattern** The content in file does not end with a single trailing newline. The ``$`` represents end of file. .. code-block:: sql :force: SELECT a FROM foo$ -- Ending on an indented line means there is no newline -- at the end of the file, the • represents space. SELECT ••••a FROM ••••foo ••••$ -- Ending on a semi-colon means the last line is not a -- newline. SELECT a FROM foo ;$ -- Ending with multiple newlines. SELECT a FROM foo $ **Best practice** Add trailing newline to the end. The ``$`` character represents end of file. .. code-block:: sql :force: SELECT a FROM foo $ -- Ensuring the last line is not indented so is just a -- newline. SELECT ••••a FROM ••••foo $ -- Even when ending on a semi-colon, ensure there is a -- newline after. SELECT a FROM foo ; $ """ name = "layout.end_of_file" # Between 2.0.0 and 2.0.4 we supported had a kebab-case name for this rule # so the old name remains here as an alias to enable backward compatibility. aliases = ("L009", "layout.end-of-file") groups = ("all", "core", "layout") targets_templated = True # Use the RootOnlyCrawler to only call _eval() ONCE, with the root segment. crawl_behaviour = RootOnlyCrawler() lint_phase = "post" is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Files must end with a single trailing newline. We only care about the segment and the siblings which come after it for this rule, we discard the others into the kwargs argument. """ # We only care about the final segment of the parse tree. parent_stack, segment = get_last_segment(FunctionalContext(context).segment) self.logger.debug("Found last segment as: %s", segment) if not segment: # NOTE: Edge case. If the file is totally empty, we won't find a final # segment. In this case return without error. return None trailing_newlines = Segments(*get_trailing_newlines(context.segment)) trailing_literal_newlines = trailing_newlines self.logger.debug( "Untemplated trailing newlines: %s", trailing_literal_newlines ) if context.templated_file: trailing_literal_newlines = trailing_newlines.select( loop_while=lambda seg: sp.templated_slices( seg, context.templated_file ).all(tsp.is_slice_type("literal")) ) self.logger.debug("Templated trailing newlines: %s", trailing_literal_newlines) if not trailing_literal_newlines: # We make an edit to create this segment after the child of the FileSegment. if len(parent_stack) == 1: fix_anchor_segment = segment[0] else: fix_anchor_segment = parent_stack[1] self.logger.debug("Anchor on: %s", fix_anchor_segment) return LintResult( anchor=segment[0], fixes=[ LintFix.create_after( fix_anchor_segment, [NewlineSegment()], ) ], ) elif len(trailing_literal_newlines) > 1: # Delete extra newlines. return LintResult( anchor=segment[0], fixes=[LintFix.delete(d) for d in trailing_literal_newlines[1:]], ) else: # Single newline, no need for fix. return None sqlfluff-2.3.5/src/sqlfluff/rules/layout/LT13.py000066400000000000000000000061361451700765000214730ustar00rootroot00000000000000"""Implementation of Rule LT13.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import RootOnlyCrawler from sqlfluff.utils.functional import Segments, rsp, sp class Rule_LT13(BaseRule): """Files must not begin with newlines or whitespace. **Anti-pattern** The file begins with newlines or whitespace. The ``^`` represents the beginning of the file. .. code-block:: sql :force: ^ SELECT a FROM foo -- Beginning on an indented line is also forbidden, -- (the • represents space). ••••SELECT ••••a FROM ••••foo **Best practice** Start file on either code or comment. (The ``^`` represents the beginning of the file.) .. code-block:: sql :force: ^SELECT a FROM foo -- Including an initial block comment. ^/* This is a description of my SQL code. */ SELECT a FROM foo -- Including an initial inline comment. ^--This is a description of my SQL code. SELECT a FROM foo """ name = "layout.start_of_file" aliases = ("L050",) groups = ("all", "layout") targets_templated = True # Use the RootOnlyCrawler to only call _eval() ONCE, with the root segment. crawl_behaviour = RootOnlyCrawler() lint_phase = "post" is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Files must not begin with newlines or whitespace.""" # Only check raw segments. This ensures we don't try and delete the same # whitespace multiple times (i.e. for non-raw segments higher in the # tree). raw_segments = [] whitespace_types = {"newline", "whitespace", "indent", "dedent"} for seg in context.segment.recursive_crawl_all(): if not seg.is_raw(): continue if seg.is_type(*whitespace_types): raw_segments.append(seg) continue raw_stack = Segments(*raw_segments, templated_file=context.templated_file) # Non-whitespace segment. if ( not raw_stack.all(sp.is_meta()) # It is possible that a template segment (e.g. # {{ config(materialized='view') }}) renders to an empty string # and as such is omitted from the parsed tree. We therefore # should flag if a templated raw slice intersects with the # source slices in the raw stack and skip this rule to avoid # risking collisions with template objects. and not raw_stack.raw_slices.any(rsp.is_slice_type("templated")) ): return LintResult( anchor=context.segment, fixes=[LintFix.delete(d) for d in raw_stack], ) else: break return None sqlfluff-2.3.5/src/sqlfluff/rules/layout/__init__.py000066400000000000000000000024061451700765000225430ustar00rootroot00000000000000"""The aliasing plugin bundle.""" from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.layout.LT01 import Rule_LT01 from sqlfluff.rules.layout.LT02 import Rule_LT02 from sqlfluff.rules.layout.LT03 import Rule_LT03 from sqlfluff.rules.layout.LT04 import Rule_LT04 from sqlfluff.rules.layout.LT05 import Rule_LT05 from sqlfluff.rules.layout.LT06 import Rule_LT06 from sqlfluff.rules.layout.LT07 import Rule_LT07 from sqlfluff.rules.layout.LT08 import Rule_LT08 from sqlfluff.rules.layout.LT09 import Rule_LT09 from sqlfluff.rules.layout.LT10 import Rule_LT10 from sqlfluff.rules.layout.LT11 import Rule_LT11 from sqlfluff.rules.layout.LT12 import Rule_LT12 from sqlfluff.rules.layout.LT13 import Rule_LT13 return [ Rule_LT01, Rule_LT02, Rule_LT03, Rule_LT04, Rule_LT05, Rule_LT06, Rule_LT07, Rule_LT08, Rule_LT09, Rule_LT10, Rule_LT11, Rule_LT12, Rule_LT13, ] sqlfluff-2.3.5/src/sqlfluff/rules/references/000077500000000000000000000000001451700765000212345ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/references/RF01.py000066400000000000000000000213231451700765000222570ustar00rootroot00000000000000"""Implementation of Rule RF01.""" from dataclasses import dataclass, field from typing import List, Optional, Tuple, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo from sqlfluff.core.rules import ( BaseRule, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.core.rules.reference import object_ref_matches_table from sqlfluff.utils.analysis.query import Query _START_TYPES = [ "delete_statement", "merge_statement", "select_statement", "update_statement", ] @dataclass class RF01Query(Query): """Query with custom RF01 info.""" aliases: List[AliasInfo] = field(default_factory=list) standalone_aliases: List[str] = field(default_factory=list) class Rule_RF01(BaseRule): """References cannot reference objects not present in ``FROM`` clause. .. note:: This rule is disabled by default for BigQuery, Databricks, Hive, Redshift, SOQL and SparkSQL due to the support of things like structs and lateral views which trigger false positives. It can be enabled with the ``force_enable = True`` flag. **Anti-pattern** In this example, the reference ``vee`` has not been declared. .. code-block:: sql SELECT vee.a FROM foo **Best practice** Remove the reference. .. code-block:: sql SELECT a FROM foo """ name = "references.from" aliases = ("L026",) groups = ("all", "core", "references") config_keywords = ["force_enable"] # If any of the parents would have also triggered the rule, don't fire # because they will more accurately process any internal references. crawl_behaviour = SegmentSeekerCrawler(set(_START_TYPES), allow_recurse=False) _dialects_disabled_by_default = [ "bigquery", "databricks", "hive", "redshift", "soql", "sparksql", ] def _eval(self, context: RuleContext) -> List[LintResult]: # Config type hints self.force_enable: bool if ( context.dialect.name in self._dialects_disabled_by_default and not self.force_enable ): return [] violations: List[LintResult] = [] dml_target_table: Optional[Tuple[str, ...]] = None self.logger.debug("Trigger on: %s", context.segment) if not context.segment.is_type("select_statement"): # Extract first table reference. This will be the target # table in a DML statement. table_reference = next( context.segment.recursive_crawl("table_reference"), None ) if table_reference: dml_target_table = self._table_ref_as_tuple(table_reference) self.logger.debug("DML Reference Table: %s", dml_target_table) # Verify table references in any SELECT statements found in or # below context.segment in the parser tree. query = RF01Query.from_segment(context.segment, context.dialect) self._analyze_table_references( query, dml_target_table, context.dialect, violations ) return violations @classmethod def _alias_info_as_tuples(cls, alias_info: AliasInfo) -> List[Tuple[str, ...]]: result: List[Tuple[str, ...]] = [] if alias_info.aliased: result.append((alias_info.ref_str,)) if alias_info.object_reference: result.append(cls._table_ref_as_tuple(alias_info.object_reference)) return result @staticmethod def _table_ref_as_tuple(table_reference) -> Tuple[str, ...]: return tuple(ref.part for ref in table_reference.iter_raw_references()) def _analyze_table_references( self, query: RF01Query, dml_target_table: Optional[Tuple[str, ...]], dialect: Dialect, violations: List[LintResult], ) -> None: # For each query... for selectable in query.selectables: select_info = selectable.select_info self.logger.debug( "Selectable: %s", selectable, ) if select_info: # Record the available tables. query.aliases += select_info.table_aliases query.standalone_aliases += select_info.standalone_aliases self.logger.debug( "Aliases: %s %s", [alias.ref_str for alias in select_info.table_aliases], select_info.standalone_aliases, ) # Try and resolve each reference to a value in query.aliases (or # in an ancestor query). for r in select_info.reference_buffer: if not self._should_ignore_reference(r, selectable): # This function walks up the query's parent stack if necessary. violation = self._resolve_reference( r, self._get_table_refs(r, dialect), dml_target_table, query ) if violation: violations.append(violation) # Visit children. for child in query.children: self._analyze_table_references( cast(RF01Query, child), dml_target_table, dialect, violations ) @staticmethod def _should_ignore_reference(reference, selectable) -> bool: ref_path = selectable.selectable.path_to(reference) # Ignore references occurring in an "INTO" clause: # - They are table references, not column references. # - They are the target table, similar to an INSERT or UPDATE # statement, thus not expected to match a table in the FROM # clause. if ref_path: return any(ps.segment.is_type("into_table_clause") for ps in ref_path) else: return False # pragma: no cover @staticmethod def _get_table_refs(ref, dialect): """Given ObjectReferenceSegment, determine possible table references.""" tbl_refs = [] # First, handle any schema.table references. for sr, tr in ref.extract_possible_multipart_references( levels=[ ref.ObjectReferenceLevel.SCHEMA, ref.ObjectReferenceLevel.TABLE, ] ): tbl_refs.append((tr, (sr.part, tr.part))) # Maybe check for simple table references. Two cases: # - For most dialects, skip this if it's a schema+table reference -- the # reference was specific, so we shouldn't ignore that by looking # elsewhere.) # - Always do this in BigQuery. BigQuery table references are frequently # ambiguous because BigQuery SQL supports structures, making some # multi-level "." references impossible to interpret with certainty. # We may need to genericize this code someday to support other # dialects. If so, this check should probably align somehow with # whether the dialect overrides # ObjectReferenceSegment.extract_possible_references(). if not tbl_refs or dialect.name in ["bigquery"]: for tr in ref.extract_possible_references( level=ref.ObjectReferenceLevel.TABLE ): tbl_refs.append((tr, (tr.part,))) return tbl_refs def _resolve_reference( self, r, tbl_refs, dml_target_table: Optional[Tuple[str, ...]], query: RF01Query ): # Does this query define the referenced table? possible_references = [tbl_ref[1] for tbl_ref in tbl_refs] targets = [] for alias in query.aliases: targets += self._alias_info_as_tuples(alias) for standalone_alias in query.standalone_aliases: targets.append((standalone_alias,)) if not object_ref_matches_table(possible_references, targets): # No. Check the parent query, if there is one. if query.parent: return self._resolve_reference( r, tbl_refs, dml_target_table, cast(RF01Query, query.parent) ) # No parent query. If there's a DML statement at the root, check its # target table or alias. elif not dml_target_table or not object_ref_matches_table( possible_references, [dml_target_table] ): return LintResult( # Return the first segment rather than the string anchor=tbl_refs[0][0].segments[0], description=f"Reference {r.raw!r} refers to table/view " "not found in the FROM clause or found in ancestor " "statement.", ) sqlfluff-2.3.5/src/sqlfluff/rules/references/RF02.py000066400000000000000000000077001451700765000222630ustar00rootroot00000000000000"""Implementation of Rule RF02.""" from typing import List, Optional import regex from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import LintResult from sqlfluff.rules.aliasing.AL04 import Rule_AL04 class Rule_RF02(Rule_AL04): """References should be qualified if select has more than one referenced table/view. .. note:: Except if they're present in a ``USING`` clause. **Anti-pattern** In this example, the reference ``vee`` has not been declared, and the variables ``a`` and ``b`` are potentially ambiguous. .. code-block:: sql SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a **Best practice** Add the references. .. code-block:: sql SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a """ name = "references.qualification" aliases = ("L027",) groups = ("all", "references") # Crawl behaviour is defined in AL04 def _lint_references_and_aliases( self, table_aliases: List[AliasInfo], standalone_aliases: List[str], references, col_aliases: List[ColumnAliasInfo], using_cols: List[str], parent_select: Optional[BaseSegment], ) -> Optional[List[LintResult]]: # Config type hints self.ignore_words_regex: str # Do we have more than one? If so, all references should be qualified. if len(table_aliases) <= 1: return None # Get the ignore_words_list configuration. try: ignore_words_list = self.ignore_words_list except AttributeError: # First-time only, read the settings from configuration. This is # very slow. ignore_words_list = self._init_ignore_words_list() # A buffer to keep any violations. violation_buff = [] # Check all the references that we have. for r in references: # Skip if in ignore list if ignore_words_list and r.raw.lower() in ignore_words_list: continue # Skip if matches ignore regex if self.ignore_words_regex and regex.search(self.ignore_words_regex, r.raw): continue this_ref_type = r.qualification() # Discard column aliases that # refer to the current column reference. col_alias_names = [ c.alias_identifier_name for c in col_aliases if r not in c.column_reference_segments ] if ( this_ref_type == "unqualified" # Allow unqualified columns that # are actually aliases defined # in a different select clause element. and r.raw not in col_alias_names # Allow columns defined in a USING expression. and r.raw not in using_cols # Allow columns defined as standalone aliases # (e.g. value table functions from bigquery) and r.raw not in standalone_aliases ): violation_buff.append( LintResult( anchor=r, description=f"Unqualified reference {r.raw!r} found in " "select with more than one referenced table/view.", ) ) return violation_buff or None def _init_ignore_words_list(self) -> List[str]: """Called first time rule is evaluated to fetch & cache the policy.""" ignore_words_config: str = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] return self.ignore_words_list sqlfluff-2.3.5/src/sqlfluff/rules/references/RF03.py000066400000000000000000000260041451700765000222620ustar00rootroot00000000000000"""Implementation of Rule RF03.""" from typing import Iterator, List, Optional, Set from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser.segments import BaseSegment, SymbolSegment from sqlfluff.core.rules import ( BaseRule, EvalResultType, LintFix, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import IdentifierSegment, ObjectReferenceSegment from sqlfluff.utils.analysis.query import Query from sqlfluff.utils.analysis.select import SelectStatementColumnsAndTables _START_TYPES = ["select_statement", "set_expression", "with_compound_statement"] class Rule_RF03(BaseRule): """References should be consistent in statements with a single table. .. note:: For BigQuery, Hive and Redshift this rule is disabled by default. This is due to historical false positives associated with STRUCT data types. This default behaviour may be changed in the future. The rule can be enabled with the ``force_enable = True`` flag. "consistent" will be fixed to "qualified" if inconsistency is found. **Anti-pattern** In this example, only the field ``b`` is referenced. .. code-block:: sql SELECT a, foo.b FROM foo **Best practice** Add or remove references to all fields. .. code-block:: sql SELECT a, b FROM foo -- Also good SELECT foo.a, foo.b FROM foo """ name = "references.consistent" aliases = ("L028",) groups = ("all", "references") config_keywords = [ "single_table_references", "force_enable", ] # If any of the parents would have also triggered the rule, don't fire # because they will more accurately process any internal references. crawl_behaviour = SegmentSeekerCrawler(set(_START_TYPES), allow_recurse=False) _is_struct_dialect = False _dialects_with_structs = ["bigquery", "hive", "redshift"] # This could be turned into an option _fix_inconsistent_to = "qualified" is_fix_compatible = True def _eval(self, context: RuleContext) -> EvalResultType: """Override base class for dialects that use structs, or SELECT aliases.""" # Config type hints self.force_enable: bool # Some dialects use structs (e.g. column.field) which look like # table references and so incorrectly trigger this rule. if ( context.dialect.name in self._dialects_with_structs and not self.force_enable ): return LintResult() if context.dialect.name in self._dialects_with_structs: self._is_struct_dialect = True query: Query = Query.from_segment(context.segment, dialect=context.dialect) visited: Set = set() # Recursively visit and check each query in the tree. return list(self._visit_queries(query, visited)) def _iter_available_targets(self, query) -> Iterator[str]: """Iterate along a list of valid alias targets.""" for selectable in query.selectables: select_info = selectable.select_info for alias in select_info.table_aliases: if alias.ref_str: yield alias.ref_str def _visit_queries(self, query: Query, visited: set) -> Iterator[LintResult]: select_info: Optional[SelectStatementColumnsAndTables] = None if query.selectables: select_info = query.selectables[0].select_info # How many table names are visible from here? If more than one then do # nothing. if select_info and len(select_info.table_aliases) == 1: fixable = True # :TRICKY: Subqueries in the column list of a SELECT can see tables # in the FROM list of the containing query. Thus, count tables at # the *parent* query level. possible_ref_tables = list(self._iter_available_targets(query)) if query.parent: possible_ref_tables += list( self._iter_available_targets(query.parent) ) if len(possible_ref_tables) > 1: # If more than one table name is visible, check for and report # potential lint warnings, but don't generate fixes, because # fixes are unsafe if there's more than one table visible. fixable = False yield from _check_references( select_info.table_aliases, select_info.standalone_aliases, select_info.reference_buffer, select_info.col_aliases, self.single_table_references, # type: ignore self._is_struct_dialect, self._fix_inconsistent_to, fixable, ) children = list(query.children) # 'query.children' includes CTEs and "main" queries, but not queries in # the "FROM" list. We want to visit those as well. if select_info: for a in select_info.table_aliases: for q in query.crawl_sources(a.from_expression_element, True): if not isinstance(q, Query): continue # Check for previously visited selectables to avoid possible # infinite recursion, e.g.: # WITH test1 AS (SELECT i + 1, j + 1 FROM test1) # SELECT * FROM test1; if any(s.selectable in visited for s in q.selectables): continue visited.update(s.selectable for s in q.selectables) children.append(q) for child in children: yield from self._visit_queries(child, visited) def _check_references( table_aliases: List[AliasInfo], standalone_aliases: List[str], references: List[ObjectReferenceSegment], col_aliases: List[ColumnAliasInfo], single_table_references: str, is_struct_dialect: bool, fix_inconsistent_to: Optional[str], fixable: bool, ) -> Iterator[LintResult]: """Iterate through references and check consistency.""" # A buffer to keep any violations. col_alias_names: List[str] = [c.alias_identifier_name for c in col_aliases] table_ref_str: str = table_aliases[0].ref_str table_ref_str_source = table_aliases[0].segment # Check all the references that we have. seen_ref_types: Set[str] = set() for ref in references: this_ref_type: str = ref.qualification() if this_ref_type == "qualified" and is_struct_dialect: # If this col appears "qualified" check if it is more logically a struct. if next(ref.iter_raw_references()).part != table_ref_str: this_ref_type = "unqualified" lint_res = _validate_one_reference( single_table_references, ref, this_ref_type, standalone_aliases, table_ref_str, table_ref_str_source, col_alias_names, seen_ref_types, fixable, ) seen_ref_types.add(this_ref_type) if not lint_res: continue if fix_inconsistent_to and single_table_references == "consistent": # If we found a "consistent" error but we have a fix directive, # recurse with a different single_table_references value yield from _check_references( table_aliases, standalone_aliases, references, col_aliases, # NB vars are passed in a different order here single_table_references=fix_inconsistent_to, is_struct_dialect=is_struct_dialect, fix_inconsistent_to=None, fixable=fixable, ) yield lint_res def _validate_one_reference( single_table_references: str, ref: BaseSegment, this_ref_type: str, standalone_aliases: List[str], table_ref_str: str, table_ref_str_source: Optional[BaseSegment], col_alias_names: List[str], seen_ref_types: Set[str], fixable: bool, ) -> Optional[LintResult]: # We skip any unqualified wildcard references (i.e. *). They shouldn't # count. if not ref.is_qualified() and ref.is_type("wildcard_identifier"): # type: ignore return None # Oddball case: Column aliases provided via function calls in by # FROM or JOIN. References to these don't need to be qualified. # Note there could be a table with a column by the same name as # this alias, so avoid bogus warnings by just skipping them # entirely rather than trying to enforce anything. if ref.raw in standalone_aliases: return None # Oddball case: tsql table variables can't be used to qualify references. # This appears here as an empty string for table_ref_str. if not table_ref_str: return None # Certain dialects allow use of SELECT alias in WHERE clauses if ref.raw in col_alias_names: return None # Check first for consistency if single_table_references == "consistent": if seen_ref_types and this_ref_type not in seen_ref_types: return LintResult( anchor=ref, description=f"{this_ref_type.capitalize()} reference " f"{ref.raw!r} found in single table select which is " "inconsistent with previous references.", ) # Config is consistent, and this reference matches types so far. return None # Otherwise check for a specified type of referencing. # If it's the right kind already, just return. if single_table_references == this_ref_type: return None # If not, it's the wrong type and we should handle it. if single_table_references == "unqualified": # If this is qualified we must have a "table", "."" at least fixes = [LintFix.delete(el) for el in ref.segments[:2]] if fixable else None return LintResult( anchor=ref, fixes=fixes, description="{} reference {!r} found in single table " "select.".format(this_ref_type.capitalize(), ref.raw), ) fixes = None if fixable: fixes = [ LintFix.create_before( ref.segments[0] if len(ref.segments) else ref, source=[table_ref_str_source] if table_ref_str_source else None, edit_segments=[ IdentifierSegment( raw=table_ref_str, type="naked_identifier", ), SymbolSegment(raw=".", type="symbol"), ], ) ] return LintResult( anchor=ref, fixes=fixes, description="{} reference {!r} found in single table " "select.".format(this_ref_type.capitalize(), ref.raw), ) sqlfluff-2.3.5/src/sqlfluff/rules/references/RF04.py000066400000000000000000000103251451700765000222620ustar00rootroot00000000000000"""Implementation of Rule RF04.""" from typing import List, Optional import regex from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.identifers import identifiers_policy_applicable class Rule_RF04(BaseRule): """Keywords should not be used as identifiers. Although `unreserved` keywords `can` be used as identifiers, and `reserved words` can be used as quoted identifiers, best practice is to avoid where possible, to avoid any misunderstandings as to what the alias represents. .. note:: Note that `reserved` keywords cannot be used as unquoted identifiers and will cause parsing errors and so are not covered by this rule. **Anti-pattern** In this example, ``SUM`` (built-in function) is used as an alias. .. code-block:: sql SELECT sum.a FROM foo AS sum **Best practice** Avoid keywords as the name of an alias. .. code-block:: sql SELECT vee.a FROM foo AS vee """ name = "references.keywords" aliases = ("L029",) groups = ("all", "references") crawl_behaviour = SegmentSeekerCrawler({"naked_identifier", "quoted_identifier"}) config_keywords = [ "unquoted_identifiers_policy", "quoted_identifiers_policy", "ignore_words", "ignore_words_regex", ] def _eval(self, context: RuleContext) -> Optional[LintResult]: """Keywords should not be used as identifiers.""" # Config type hints self.ignore_words_regex: str # Skip 1 letter identifiers. These can be datepart keywords # (e.g. "d" for Snowflake) but most people expect to be able to use them. if len(context.segment.raw) == 1: return LintResult(memory=context.memory) # Get the ignore list configuration and cache it try: ignore_words_list = self.ignore_words_list except AttributeError: # First-time only, read the settings from configuration. # So we can cache them for next time for speed. ignore_words_list = self._init_ignore_string() # Skip if in ignore list if ignore_words_list and context.segment.raw.lower() in ignore_words_list: return LintResult(memory=context.memory) # Skip if matches ignore regex if self.ignore_words_regex and regex.search( self.ignore_words_regex, context.segment.raw ): return LintResult(memory=context.memory) if ( ( context.segment.is_type("naked_identifier") and identifiers_policy_applicable( self.unquoted_identifiers_policy, # type: ignore context.parent_stack, ) and ( context.segment.raw.upper() in context.dialect.sets("unreserved_keywords") ) ) ) or ( ( context.segment.is_type("quoted_identifier") and identifiers_policy_applicable( self.quoted_identifiers_policy, context.parent_stack # type: ignore ) and ( context.segment.raw.upper()[1:-1] in context.dialect.sets("unreserved_keywords") or context.segment.raw.upper()[1:-1] in context.dialect.sets("reserved_keywords") ) ) ): return LintResult(anchor=context.segment) else: return None def _init_ignore_string(self) -> List[str]: """Called first time rule is evaluated to fetch & cache the ignore_words.""" # Use str() in case bools are passed which might otherwise be read as bool ignore_words_config = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] ignore_words_list = self.ignore_words_list return ignore_words_list sqlfluff-2.3.5/src/sqlfluff/rules/references/RF05.py000066400000000000000000000203321451700765000222620ustar00rootroot00000000000000"""Implementation of Rule RF05.""" from typing import List, Optional, Set import regex from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.identifers import identifiers_policy_applicable class Rule_RF05(BaseRule): """Do not use special characters in identifiers. **Anti-pattern** Using special characters within identifiers when creating or aliasing objects. .. code-block:: sql CREATE TABLE DBO.ColumnNames ( [Internal Space] INT, [Greater>Than] INT, [Less str: """Returns additional allowed characters, with adjustments for dialect.""" result: Set[str] = set() if self.additional_allowed_characters: result.update(self.additional_allowed_characters) if dialect_name == "bigquery": # In BigQuery, also allow hyphens. result.update("-") return "".join(result) def _eval(self, context: RuleContext) -> Optional[LintResult]: """Do not use special characters in object names.""" # Config type hints self.quoted_identifiers_policy: str self.unquoted_identifiers_policy: str self.allow_space_in_identifier: bool self.additional_allowed_characters: str self.ignore_words: str self.ignore_words_regex: str # Confirm it's a single identifier. assert context.segment.is_type("naked_identifier", "quoted_identifier") # Get the ignore_words_list configuration. try: ignore_words_list = self.ignore_words_list except AttributeError: # First-time only, read the settings from configuration. This is # very slow. ignore_words_list = self._init_ignore_words_list() # Assume unquoted (we'll update if quoted) policy = self.unquoted_identifiers_policy identifier = context.segment.raw # Skip if in ignore list if ignore_words_list and identifier.lower() in ignore_words_list: return None # Skip if matches ignore regex if self.ignore_words_regex and regex.search( self.ignore_words_regex, identifier ): return LintResult(memory=context.memory) # Do some extra processing for quoted identifiers. if context.segment.is_type("quoted_identifier"): # Update the default policy to quoted policy = self.quoted_identifiers_policy # Strip the quotes first identifier = context.segment.raw[1:-1] # Skip if in ignore list - repeat check now we've strip the quotes if ignore_words_list and identifier.lower() in ignore_words_list: return None # Skip if matches ignore regex - repeat check now we've strip the quotes if self.ignore_words_regex and regex.search( self.ignore_words_regex, identifier ): return LintResult(memory=context.memory) # BigQuery table references are quoted in back ticks so allow dots # # It also allows a star at the end of table_references for wildcards # (https://cloud.google.com/bigquery/docs/querying-wildcard-tables) # # Strip both out before testing the identifier if ( context.dialect.name in ["bigquery"] and context.parent_stack and context.parent_stack[-1].is_type("table_reference") ): if identifier and identifier[-1] == "*": identifier = identifier[:-1] identifier = identifier.replace(".", "") # Databricks & SparkSQL file references for direct file query # are quoted in back ticks to allow for identifiers common # in file paths and regex patterns for path globbing # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html # # Path Glob Filters (done inline for SQL direct file query) # https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter # if ( context.dialect.name in ["databricks", "sparksql"] and context.parent_stack ): # Databricks & SparkSQL file references for direct file query # are quoted in back ticks to allow for identifiers common # in file paths and regex patterns for path globbing # https://spark.apache.org/docs/latest/sql-ref-syntax-qry-select-file.html # # Path Glob Filters (done inline for SQL direct file query) # https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter # if context.parent_stack[-1].is_type("file_reference"): return None # Databricks & SparkSQL properties keys # used for setting table and runtime # configurations denote namespace using dots, so these are # removed before testing L057 to not trigger false positives # Runtime configurations: # https://spark.apache.org/docs/latest/configuration.html#application-properties # Example configurations for table: # https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#configuration # if context.parent_stack[-1].is_type("property_name_identifier"): identifier = identifier.replace(".", "") # Strip spaces if allowed (note a separate config as only valid for quoted # identifiers) if self.allow_space_in_identifier: identifier = identifier.replace(" ", "") # We always allow underscores so strip them out identifier = identifier.replace("_", "") # redshift allows a # at the beginning of temporary table names if ( context.dialect.name == "redshift" and identifier[0] == "#" and context.parent_stack and context.parent_stack[-1].is_type("table_reference") ): identifier = identifier[1:] # Set the identified minus the allowed characters additional_allowed_characters = self._get_additional_allowed_characters( context.dialect.name ) if additional_allowed_characters: identifier = identifier.translate( str.maketrans("", "", additional_allowed_characters) ) # Finally test if the remaining identifier is only made up of alphanumerics if identifiers_policy_applicable(policy, context.parent_stack) and not ( identifier.isalnum() ): return LintResult(anchor=context.segment) return None def _init_ignore_words_list(self) -> List[str]: """Called first time rule is evaluated to fetch & cache the policy.""" ignore_words_config: str = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] return self.ignore_words_list sqlfluff-2.3.5/src/sqlfluff/rules/references/RF06.py000066400000000000000000000204721451700765000222700ustar00rootroot00000000000000"""Implementation of Rule RF06.""" from typing import TYPE_CHECKING, List, Optional, Type, cast import regex from sqlfluff.core.parser import CodeSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp if TYPE_CHECKING: # pragma: no cover from sqlfluff.core.parser.parsers import RegexParser class Rule_RF06(BaseRule): """Unnecessary quoted identifier. This rule will fail if the quotes used to quote an identifier are (un)necessary depending on the ``force_quote_identifier`` configuration. When ``prefer_quoted_identifiers = False`` (default behaviour), the quotes are unnecessary, except for reserved keywords and special characters in identifiers. .. note:: This rule is disabled by default for Postgres and Snowflake because they allow quotes as part of the column name. In other words, ``date`` and ``"date"`` are two different columns. It can be enabled with the ``force_enable = True`` flag. **Anti-pattern** In this example, a valid unquoted identifier, that is also not a reserved keyword, is needlessly quoted. .. code-block:: sql SELECT 123 as "foo" **Best practice** Use unquoted identifiers where possible. .. code-block:: sql SELECT 123 as foo When ``prefer_quoted_identifiers = True``, the quotes are always necessary, no matter if the identifier is valid, a reserved keyword, or contains special characters. .. note:: Note due to different quotes being used by different dialects supported by `SQLFluff`, and those quotes meaning different things in different contexts, this mode is not ``sqlfluff fix`` compatible. **Anti-pattern** In this example, a valid unquoted identifier, that is also not a reserved keyword, is required to be quoted. .. code-block:: sql SELECT 123 as foo **Best practice** Use quoted identifiers. .. code-block:: sql SELECT 123 as "foo" -- For ANSI, ... -- or SELECT 123 as `foo` -- For BigQuery, MySql, ... """ name = "references.quoting" aliases = ("L059",) groups = ("all", "references") config_keywords = [ "prefer_quoted_identifiers", "prefer_quoted_keywords", "ignore_words", "ignore_words_regex", "force_enable", ] crawl_behaviour = SegmentSeekerCrawler({"quoted_identifier", "naked_identifier"}) _dialects_allowing_quotes_in_column_names = ["postgres", "snowflake"] is_fix_compatible = True # Ignore "password_auth" type to allow quotes around passwords within # `CREATE USER` statements in Exasol dialect. # `EXECUTE AS` clauses in TSQL also require quotes. _ignore_types: List[str] = ["password_auth", "execute_as_clause"] def _eval(self, context: RuleContext) -> Optional[LintResult]: """Unnecessary quoted identifier.""" # Config type hints self.prefer_quoted_identifiers: bool self.prefer_quoted_keywords: bool self.ignore_words: str self.ignore_words_regex: str self.force_enable: bool # Some dialects allow quotes as PART OF the column name. In other words, # these are two different columns: # - date # - "date" # For safety, disable this rule by default in those dialects. if ( context.dialect.name in self._dialects_allowing_quotes_in_column_names and not self.force_enable ): return LintResult() # Ignore some segment types if FunctionalContext(context).parent_stack.any(sp.is_type(*self._ignore_types)): return None identifier_is_quoted = not regex.search( r'^[^"\'[].+[^"\'\]]$', context.segment.raw ) identifier_contents = context.segment.raw if identifier_is_quoted: identifier_contents = identifier_contents[1:-1] identifier_is_keyword = identifier_contents.upper() in context.dialect.sets( "reserved_keywords" ) or identifier_contents.upper() in context.dialect.sets("unreserved_keywords") if self.prefer_quoted_identifiers: context_policy = "naked_identifier" else: context_policy = "quoted_identifier" # Get the ignore_words_list configuration. try: ignore_words_list = self.ignore_words_list except AttributeError: # First-time only, read the settings from configuration. This is # very slow. ignore_words_list = self._init_ignore_words_list() # Skip if in ignore list if ignore_words_list and identifier_contents.lower() in ignore_words_list: return None # Skip if matches ignore regex if self.ignore_words_regex and regex.search( self.ignore_words_regex, identifier_contents ): return LintResult(memory=context.memory) if self.prefer_quoted_keywords and identifier_is_keyword: if not identifier_is_quoted: return LintResult( context.segment, description=( f"Missing quoted keyword identifier {identifier_contents}." ), ) return None # Ignore the segments that are not of the same type as the defined policy above. # Also TSQL has a keyword called QUOTED_IDENTIFIER which maps to the name so # need to explicitly check for that. if not context.segment.is_type( context_policy ) or context.segment.raw.lower() in ( "quoted_identifier", "naked_identifier", ): return None # Manage cases of identifiers must be quoted first. # Naked identifiers are _de facto_ making this rule fail as configuration forces # them to be quoted. # In this case, it cannot be fixed as which quote to use is dialect dependent if self.prefer_quoted_identifiers: return LintResult( context.segment, description=f"Missing quoted identifier {identifier_contents}.", ) # Now we only deal with NOT forced quoted identifiers configuration # (meaning prefer_quoted_identifiers=False). # Retrieve NakedIdentifierSegment RegexParser for the dialect. naked_identifier_parser = cast( "RegexParser", context.dialect._library["NakedIdentifierSegment"] ) anti_template = cast(str, naked_identifier_parser.anti_template) NakedIdentifierSegment = cast( Type[CodeSegment], context.dialect.get_segment("IdentifierSegment") ) # Check if quoted_identifier_contents could be a valid naked identifier # and that it is not a reserved keyword. if ( regex.fullmatch( naked_identifier_parser.template, identifier_contents, regex.IGNORECASE, ) is not None ) and ( regex.fullmatch( anti_template, identifier_contents, regex.IGNORECASE, ) is None ): return LintResult( context.segment, fixes=[ LintFix.replace( context.segment, [ NakedIdentifierSegment( raw=identifier_contents, type="naked_identifier", ) ], ) ], description=f"Unnecessary quoted identifier {context.segment.raw}.", ) return None def _init_ignore_words_list(self) -> List[str]: """Called first time rule is evaluated to fetch & cache the policy.""" ignore_words_config: str = str(getattr(self, "ignore_words")) if ignore_words_config and ignore_words_config != "None": self.ignore_words_list = self.split_comma_separated_string( ignore_words_config.lower() ) else: self.ignore_words_list = [] return self.ignore_words_list sqlfluff-2.3.5/src/sqlfluff/rules/references/__init__.py000066400000000000000000000013611451700765000233460ustar00rootroot00000000000000"""The references plugin bundle.""" from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.references.RF01 import Rule_RF01 from sqlfluff.rules.references.RF02 import Rule_RF02 from sqlfluff.rules.references.RF03 import Rule_RF03 from sqlfluff.rules.references.RF04 import Rule_RF04 from sqlfluff.rules.references.RF05 import Rule_RF05 from sqlfluff.rules.references.RF06 import Rule_RF06 return [Rule_RF01, Rule_RF02, Rule_RF03, Rule_RF04, Rule_RF05, Rule_RF06] sqlfluff-2.3.5/src/sqlfluff/rules/structure/000077500000000000000000000000001451700765000211535ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST01.py000066400000000000000000000052161451700765000222200ustar00rootroot00000000000000"""Implementation of Rule ST01.""" from typing import Optional, Tuple from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp class Rule_ST01(BaseRule): """Do not specify ``else null`` in a case when statement (redundant). **Anti-pattern** .. code-block:: sql select case when name like '%cat%' then 'meow' when name like '%dog%' then 'woof' else null end from x **Best practice** Omit ``else null`` .. code-block:: sql select case when name like '%cat%' then 'meow' when name like '%dog%' then 'woof' end from x """ name = "structure.else_null" aliases = ("L035",) groups: Tuple[str, ...] = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"case_expression"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Find rule violations and provide fixes. 0. Look for a case expression 1. Look for "ELSE" 2. Mark "ELSE" for deletion (populate "fixes") 3. Backtrack and mark all newlines/whitespaces for deletion 4. Look for a raw "NULL" segment 5.a. The raw "NULL" segment is found, we mark it for deletion and return 5.b. We reach the end of case when without matching "NULL": the rule passes """ assert context.segment.is_type("case_expression") children = FunctionalContext(context).segment.children() else_clause = children.first(sp.is_type("else_clause")) # Does the "ELSE" have a "NULL"? NOTE: Here, it's safe to look for # "NULL", as an expression would *contain* NULL but not be == NULL. if else_clause and else_clause.children( lambda child: child.raw_upper == "NULL" ): # Found ELSE with NULL. Delete the whole else clause as well as # indents/whitespaces/meta preceding the ELSE. :TRICKY: Note # the use of reversed() to make select() effectively search in # reverse. before_else = children.reversed().select( start_seg=else_clause[0], loop_while=sp.or_(sp.is_type("whitespace", "newline"), sp.is_meta()), ) return LintResult( anchor=context.segment, fixes=[LintFix.delete(else_clause[0])] + [LintFix.delete(seg) for seg in before_else], ) return None sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST02.py000066400000000000000000000223441451700765000222220ustar00rootroot00000000000000"""Implementation of Rule ST02.""" from typing import List, Optional, Tuple from sqlfluff.core.parser import ( KeywordSegment, SymbolSegment, WhitespaceSegment, WordSegment, ) from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp class Rule_ST02(BaseRule): """Unnecessary ``CASE`` statement. **Anti-pattern** ``CASE`` statement returns booleans. .. code-block:: sql :force: select case when fab > 0 then true else false end as is_fab from fancy_table -- This rule can also simplify CASE statements -- that aim to fill NULL values. select case when fab is null then 0 else fab end as fab_clean from fancy_table -- This also covers where the case statement -- replaces NULL values with NULL values. select case when fab is null then null else fab end as fab_clean from fancy_table **Best practice** Reduce to ``WHEN`` condition within ``COALESCE`` function. .. code-block:: sql :force: select coalesce(fab > 0, false) as is_fab from fancy_table -- To fill NULL values. select coalesce(fab, 0) as fab_clean from fancy_table -- NULL filling NULL. select fab as fab_clean from fancy_table """ name = "structure.simple_case" aliases = ("L043",) groups: Tuple[str, ...] = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"case_expression"}) is_fix_compatible = True @staticmethod def _coalesce_fix_list( context: RuleContext, coalesce_arg_1: BaseSegment, coalesce_arg_2: BaseSegment, preceding_not: bool = False, ) -> List[LintFix]: """Generate list of fixes to convert CASE statement to COALESCE function.""" # Add coalesce and opening parenthesis. edits = [ WordSegment("coalesce", type="function_name_identifier"), SymbolSegment("(", type="start_bracket"), coalesce_arg_1, SymbolSegment(",", type="comma"), WhitespaceSegment(), coalesce_arg_2, SymbolSegment(")", type="end_bracket"), ] if preceding_not: not_edits: List[BaseSegment] = [ KeywordSegment("not"), WhitespaceSegment(), ] edits = not_edits + edits fixes = [ LintFix.replace( context.segment, edits, ) ] return fixes @staticmethod def _column_only_fix_list( context: RuleContext, column_reference_segment: BaseSegment, ) -> List[LintFix]: """Generate list of fixes to reduce CASE statement to a single column.""" fixes = [ LintFix.replace( context.segment, [column_reference_segment], ) ] return fixes def _eval(self, context: RuleContext) -> Optional[LintResult]: """Unnecessary CASE statement.""" # Look for CASE expression. if context.segment.segments[0].raw_upper == "CASE": # Find all 'WHEN' clauses and the optional 'ELSE' clause. children = FunctionalContext(context).segment.children() when_clauses = children.select(sp.is_type("when_clause")) else_clauses = children.select(sp.is_type("else_clause")) # Can't fix if multiple WHEN clauses. if len(when_clauses) > 1: return None # Find condition and then expressions. condition_expression = when_clauses.children(sp.is_type("expression"))[0] then_expression = when_clauses.children(sp.is_type("expression"))[1] # Method 1: Check if THEN/ELSE expressions are both Boolean and can # therefore be reduced. if else_clauses: else_expression = else_clauses.children(sp.is_type("expression"))[0] upper_bools = ["TRUE", "FALSE"] if ( (then_expression.raw_upper in upper_bools) and (else_expression.raw_upper in upper_bools) and (then_expression.raw_upper != else_expression.raw_upper) ): coalesce_arg_1: BaseSegment = condition_expression coalesce_arg_2: BaseSegment = KeywordSegment("false") preceding_not = then_expression.raw_upper == "FALSE" fixes = self._coalesce_fix_list( context, coalesce_arg_1, coalesce_arg_2, preceding_not, ) return LintResult( anchor=condition_expression, fixes=fixes, description="Unnecessary CASE statement. " "Use COALESCE function instead.", ) # Method 2: Check if the condition expression is comparing a column # reference to NULL and whether that column reference is also in either the # THEN/ELSE expression. We can only apply this method when there is only # one condition in the condition expression. condition_expression_segments_raw = { segment.raw_upper for segment in condition_expression.segments } if {"IS", "NULL"}.issubset(condition_expression_segments_raw) and ( not condition_expression_segments_raw.intersection({"AND", "OR"}) ): # Check if the comparison is to NULL or NOT NULL. is_not_prefix = "NOT" in condition_expression_segments_raw # Locate column reference in condition expression. column_reference_segment = ( Segments(condition_expression) .children(sp.is_type("column_reference")) .get() ) # Return None if none found (this condition does not apply to functions) if not column_reference_segment: return None if else_clauses: else_expression = else_clauses.children(sp.is_type("expression"))[0] # Check if we can reduce the CASE expression to a single coalesce # function. if ( not is_not_prefix and column_reference_segment.raw_upper == else_expression.raw_upper ): coalesce_arg_1 = else_expression coalesce_arg_2 = then_expression elif ( is_not_prefix and column_reference_segment.raw_upper == then_expression.raw_upper ): coalesce_arg_1 = then_expression coalesce_arg_2 = else_expression else: return None if coalesce_arg_2.raw_upper == "NULL": # Can just specify the column on it's own # rather than using a COALESCE function. return LintResult( anchor=condition_expression, fixes=self._column_only_fix_list( context, column_reference_segment, ), description="Unnecessary CASE statement. " f"Just use column '{column_reference_segment.raw}'.", ) return LintResult( anchor=condition_expression, fixes=self._coalesce_fix_list( context, coalesce_arg_1, coalesce_arg_2, ), description="Unnecessary CASE statement. " "Use COALESCE function instead.", ) elif column_reference_segment.raw_upper == then_expression.raw_upper: # Can just specify the column on it's own # rather than using a COALESCE function. # In this case no ELSE statement is equivalent to ELSE NULL. return LintResult( anchor=condition_expression, fixes=self._column_only_fix_list( context, column_reference_segment, ), description="Unnecessary CASE statement. " f"Just use column '{column_reference_segment.raw}'.", ) return None sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST03.py000066400000000000000000000041731451700765000222230ustar00rootroot00000000000000"""Implementation of Rule ST03.""" from sqlfluff.core.rules import BaseRule, EvalResultType, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.analysis.query import Query class Rule_ST03(BaseRule): """Query defines a CTE (common-table expression) but does not use it. **Anti-pattern** Defining a CTE that is not used by the query is harmless, but it means the code is unnecessary and could be removed. .. code-block:: sql WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 **Best practice** Remove unused CTEs. .. code-block:: sql WITH cte1 AS ( SELECT a FROM t ) SELECT * FROM cte1 """ name = "structure.unused_cte" aliases = ("L045",) groups = ("all", "core", "structure") crawl_behaviour = SegmentSeekerCrawler({"with_compound_statement"}) def _eval(self, context: RuleContext) -> EvalResultType: result = [] query: Query = Query.from_root(context.segment, dialect=context.dialect) # Build up a dict of remaining CTEs (uppercased as not case sensitive). remaining_ctes = {k.upper(): k for k in query.ctes.keys()} # Work through all the references in the file, checking off CTES as the # are referenced. We don't recurse inside inner WITH statements. for reference in context.segment.recursive_crawl( "table_reference", no_recursive_seg_type="with_compound_statement" ): remaining_ctes.pop(reference.raw.upper(), None) # For any left un-referenced at the end. Raise an issue about them. for name in remaining_ctes.values(): cte = query.ctes[name] result += [ LintResult( anchor=cte.cte_name_segment, description=f"Query defines CTE " f'"{cte.cte_name_segment.raw}" ' f"but does not use it.", ) ] return result sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST04.py000066400000000000000000000104511451700765000222200ustar00rootroot00000000000000"""Implementation of Rule ST04.""" from sqlfluff.core.parser import NewlineSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, sp from sqlfluff.utils.reflow.reindent import construct_single_indent class Rule_ST04(BaseRule): """Nested ``CASE`` statement in ``ELSE`` clause could be flattened. **Anti-pattern** In this example, the outer ``CASE``'s ``ELSE`` is an unnecessary, nested ``CASE``. .. code-block:: sql SELECT CASE WHEN species = 'Cat' THEN 'Meow' ELSE CASE WHEN species = 'Dog' THEN 'Woof' END END as sound FROM mytable **Best practice** Move the body of the inner ``CASE`` to the end of the outer one. .. code-block:: sql SELECT CASE WHEN species = 'Cat' THEN 'Meow' WHEN species = 'Dog' THEN 'Woof' END AS sound FROM mytable """ name = "structure.nested_case" aliases = ("L058",) groups = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"case_expression"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> LintResult: """Nested CASE statement in ELSE clause could be flattened.""" segment = FunctionalContext(context).segment assert segment.select(sp.is_type("case_expression")) case1_children = segment.children() case1_last_when = case1_children.last(sp.is_type("when_clause")).get() case1_else_clause = case1_children.select(sp.is_type("else_clause")) case1_else_expressions = case1_else_clause.children(sp.is_type("expression")) expression_children = case1_else_expressions.children() case2 = expression_children.select(sp.is_type("case_expression")) # The len() checks below are for safety, to ensure the CASE inside # the ELSE is not part of a larger expression. In that case, it's # not safe to simplify in this way -- we'd be deleting other code. if ( not case1_last_when or len(case1_else_expressions) > 1 or len(expression_children) > 1 or not case2 ): return LintResult() # We can assert that this exists because of the previous check. assert case1_last_when # We can also assert that we'll also have an else clause because # otherwise the case2 check above would fail. case1_else_clause_seg = case1_else_clause.get() assert case1_else_clause_seg # Delete stuff between the last "WHEN" clause and the "ELSE" clause. case1_to_delete = case1_children.select( start_seg=case1_last_when, stop_seg=case1_else_clause_seg ) # Delete the nested "CASE" expression. fixes = case1_to_delete.apply(lambda seg: LintFix.delete(seg)) tab_space_size: int = context.config.get("tab_space_size", ["indentation"]) indent_unit: str = context.config.get("indent_unit", ["indentation"]) # Determine the indentation to use when we move the nested "WHEN" # and "ELSE" clauses, based on the indentation of case1_last_when. # If no whitespace segments found, use default indent. indent = ( case1_children.select(stop_seg=case1_last_when) .reversed() .select(sp.is_type("whitespace")) ) indent_str = ( "".join(seg.raw for seg in indent) if indent else construct_single_indent(indent_unit, tab_space_size) ) # Move the nested "when" and "else" clauses after the last outer # "when". nested_clauses = case2.children(sp.is_type("when_clause", "else_clause")) create_after_last_when = nested_clauses.apply( lambda seg: [NewlineSegment(), WhitespaceSegment(indent_str), seg] ) segments = [item for sublist in create_after_last_when for item in sublist] fixes.append(LintFix.create_after(case1_last_when, segments, source=segments)) # Delete the outer "else" clause. fixes.append(LintFix.delete(case1_else_clause_seg)) return LintResult(case2[0], fixes=fixes) sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST05.py000066400000000000000000000502241451700765000222230ustar00rootroot00000000000000"""Implementation of Rule ST05.""" from functools import partial from typing import Iterator, List, NamedTuple, Optional, Set, Tuple, Type, TypeVar, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo from sqlfluff.core.parser import ( BaseSegment, CodeSegment, KeywordSegment, NewlineSegment, SymbolSegment, WhitespaceSegment, ) from sqlfluff.core.rules import ( BaseRule, EvalResultType, LintFix, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ( CTEDefinitionSegment, TableExpressionSegment, TableReferenceSegment, WithCompoundStatementSegment, ) from sqlfluff.utils.analysis.query import Query, Selectable from sqlfluff.utils.analysis.select import get_select_statement_info from sqlfluff.utils.functional import FunctionalContext, Segments from sqlfluff.utils.functional.segment_predicates import ( is_keyword, is_type, is_whitespace, ) _SELECT_TYPES = [ "with_compound_statement", "set_expression", "select_statement", ] class _NestedSubQuerySummary(NamedTuple): query: Query selectable: Selectable table_alias: AliasInfo select_source_names: Set[str] class Rule_ST05(BaseRule): """Join/From clauses should not contain subqueries. Use CTEs instead. By default this rule is configured to allow subqueries within ``FROM`` clauses but not within ``JOIN`` clauses. If you prefer a stricter lint then this is configurable. .. note:: Some dialects don't allow CTEs, and for those dialects this rule makes no sense and should be disabled. **Anti-pattern** .. code-block:: sql select a.x, a.y, b.z from a join ( select x, z from b ) using(x) **Best practice** .. code-block:: sql with c as ( select x, z from b ) select a.x, a.y, c.z from a join c using(x) """ name = "structure.subquery" aliases = ("L042",) groups = ("all", "structure") config_keywords = ["forbid_subquery_in"] crawl_behaviour = SegmentSeekerCrawler(set(_SELECT_TYPES)) _config_mapping = { "join": ["join_clause"], "from": ["from_expression_element"], "both": ["join_clause", "from_expression_element"], } is_fix_compatible = True def _eval(self, context: RuleContext) -> EvalResultType: """Join/From clauses should not contain subqueries. Use CTEs instead.""" self.forbid_subquery_in: str functional_context = FunctionalContext(context) segment = functional_context.segment parent_stack = functional_context.parent_stack is_select = segment.all(is_type(*_SELECT_TYPES)) is_select_child = parent_stack.any(is_type(*_SELECT_TYPES)) if not is_select or is_select_child: # Nothing to do. return None query: Query = Query.from_segment(context.segment, context.dialect) # generate an instance which will track and shape our output CTE ctes = _CTEBuilder() # Init the output/final select & # populate existing CTEs for cte in query.ctes.values(): ctes.insert_cte(cte.cte_definition_segment) is_with = segment.all(is_type("with_compound_statement")) # TODO: consider if we can fix recursive CTEs is_recursive = is_with and len(segment.children(is_keyword("recursive"))) > 0 case_preference = _get_case_preference(segment) output_select = segment if is_with: output_select = segment.children( is_type( "set_expression", "select_statement", ) ) # If there are offending elements calculate fixes clone_map = SegmentCloneMap(segment[0]) result = self._lint_query( dialect=context.dialect, query=query, ctes=ctes, case_preference=case_preference, clone_map=clone_map, ) if result: lint_result, from_expression, alias_name, subquery_parent = result assert any( from_expression is seg for seg in subquery_parent.recursive_crawl_all() ) this_seg_clone = clone_map[from_expression] new_table_ref = _create_table_ref(alias_name, context.dialect) this_seg_clone.segments = (new_table_ref,) ctes.replace_with_clone(subquery_parent, clone_map) # Issue 3617: In T-SQL (and possibly other dialects) the automated fix # leaves parentheses in a location that causes a syntax error. This is an # unusual corner case. For simplicity, we still generate the lint warning # but don't try to generate a fix. Someone could look at this later (a # correct fix would involve removing the parentheses.) bracketed_ctas = [seg.type for seg in parent_stack[-2:]] == [ "create_table_statement", "bracketed", ] if bracketed_ctas or ctes.has_duplicate_aliases() or is_recursive: # If we have duplicate CTE names just don't fix anything # Return the lint warnings anyway return lint_result # Compute fix. output_select_clone = clone_map[output_select[0]] fixes = ctes.ensure_space_after_from( output_select[0], output_select_clone, subquery_parent ) new_select = ctes.compose_select( output_select_clone, case_preference=case_preference ) lint_result.fixes = [ LintFix.replace( segment[0], edit_segments=[new_select], ) ] lint_result.fixes.extend(fixes) return lint_result return None def _nested_subqueries( self, query: Query, dialect: Dialect ) -> Iterator[_NestedSubQuerySummary]: parent_types = self._config_mapping[self.forbid_subquery_in] for q in [query] + list(query.ctes.values()): for selectable in q.selectables: if not selectable.select_info: continue # pragma: no cover select_source_names = set() for a in selectable.select_info.table_aliases: # For each table in FROM, return table name and any alias. if a.ref_str: select_source_names.add(a.ref_str) if a.object_reference: select_source_names.add(a.object_reference.raw) for table_alias in selectable.select_info.table_aliases: try: query = Query.from_root( table_alias.from_expression_element, dialect ) except AssertionError: # Couldn't find a selectable, carry on. continue path_to = selectable.selectable.path_to( table_alias.from_expression_element ) if not ( # The from_expression_element table_alias.from_expression_element.is_type(*parent_types) # Or any of it's parents up to the selectable or any(ps.segment.is_type(*parent_types) for ps in path_to) ): continue if _is_correlated_subquery( Segments(query.selectables[0].selectable), select_source_names, dialect, ): continue yield _NestedSubQuerySummary( q, selectable, table_alias, select_source_names ) def _lint_query( self, dialect: Dialect, query: Query, ctes: "_CTEBuilder", case_preference: str, clone_map, ) -> Optional[Tuple[LintResult, BaseSegment, str, BaseSegment]]: """Given the root query, compute lint warnings.""" nsq: _NestedSubQuerySummary for nsq in self._nested_subqueries(query, dialect): alias_name, _ = ctes.create_cte_alias(nsq.table_alias) # 'anchor' is the TableExpressionSegment we fix/replace w/CTE name. anchor = nsq.table_alias.from_expression_element.segments[0] new_cte = _create_cte_seg( # 'prep_1 as (select ...)' alias_name=alias_name, subquery=clone_map[anchor], case_preference=case_preference, dialect=dialect, ) ctes.insert_cte(new_cte) # Grab the first keyword or symbol in the subquery to # use as the anchor. This makes the lint warning less # likely to be filtered out if a bit of the subquery # happens to be templated. anchor = next(anchor.recursive_crawl("keyword", "symbol")) res = LintResult( anchor=anchor, description=f"{nsq.query.selectables[0].selectable.type} clauses " "should not contain subqueries. Use CTEs instead", fixes=[], ) if len(nsq.query.selectables) == 1: return ( res, # FromExpressionElementSegment, parent of original "anchor" segment nsq.table_alias.from_expression_element, alias_name, # Name of CTE we're creating from the nested query # Query with the subquery: 'select a from (select x from b)' nsq.query.selectables[0].selectable, ) return None def _get_first_select_statement_descendant( segment: BaseSegment, ) -> Optional[BaseSegment]: """Find first SELECT statement segment (if any) in descendants of 'segment'.""" for select_statement in segment.recursive_crawl( "select_statement", recurse_into=False ): # We only want the first one. return select_statement return None # pragma: no cover def _is_correlated_subquery( nested_select: Segments, select_source_names: Set[str], dialect: Dialect ) -> bool: """Given nested select and the sources of its parent, determine if correlated. https://en.wikipedia.org/wiki/Correlated_subquery """ select_statement = _get_first_select_statement_descendant(nested_select[0]) if not select_statement: return False # pragma: no cover nested_select_info = get_select_statement_info(select_statement, dialect) if nested_select_info: for r in nested_select_info.reference_buffer: for tr in r.extract_possible_references(level=r.ObjectReferenceLevel.TABLE): # Check for correlated subquery, as indicated by use of a # parent reference. if tr.part in select_source_names: return True return False class _CTEBuilder: """Gather CTE parts, maintain order and track naming/aliasing.""" def __init__(self) -> None: self.ctes: List[CTEDefinitionSegment] = [] self.name_idx = 0 def list_used_names(self) -> List[str]: """Check CTEs and return used aliases.""" used_names: List[str] = [] for cte in self.ctes: id_seg = cte.get_identifier() cte_name = id_seg.raw if id_seg.is_type("quoted_identifier"): cte_name = cte_name[1:-1] used_names.append(cte_name) return used_names def has_duplicate_aliases(self) -> bool: used_names = self.list_used_names() return len(set(used_names)) != len(used_names) def insert_cte(self, cte: CTEDefinitionSegment) -> None: """Add a new CTE to the list as late as possible but before all its parents.""" # This should still have the position markers of its true position inbound_subquery = ( Segments(cte).children().last(lambda seg: bool(seg.pos_marker)) ) insert_position = next( ( i for i, el in enumerate(self.ctes) if _is_child(Segments(el).children().last(), inbound_subquery) ), len(self.ctes), ) self.ctes.insert(insert_position, cte) def create_cte_alias(self, alias: Optional[AliasInfo]) -> Tuple[str, bool]: """Find or create the name for the next CTE.""" if alias and alias.aliased and alias.ref_str: # If we know the name use it return alias.ref_str, False self.name_idx = self.name_idx + 1 name = f"prep_{self.name_idx}" if name in self.list_used_names(): # corner case where prep_x exists in origin query return self.create_cte_alias(None) return name, True def get_cte_segments(self) -> List[BaseSegment]: """Return a valid list of CTES with required padding segments.""" cte_segments: List[BaseSegment] = [] for cte in self.ctes: cte_segments = cte_segments + [ cte, SymbolSegment(",", type="comma"), NewlineSegment(), ] return cte_segments[:-2] def compose_select( self, output_select_clone: BaseSegment, case_preference: str ) -> BaseSegment: """Compose our final new CTE.""" # Compose the CTE. new_select = WithCompoundStatementSegment( segments=tuple( [ _segmentify("WITH", case_preference), WhitespaceSegment(), *self.get_cte_segments(), NewlineSegment(), output_select_clone, ] ) ) return new_select def ensure_space_after_from( self, output_select: BaseSegment, output_select_clone: BaseSegment, subquery_parent: BaseSegment, ) -> List[LintFix]: """Ensure there's whitespace between "FROM" and the CTE table name.""" fixes = [] if subquery_parent is output_select: ( missing_space_after_from, from_clause, from_clause_children, from_segment, ) = self._missing_space_after_from(output_select_clone) if missing_space_after_from: # Case 1: from_clause is a child of cloned "output_select_clone" # that will be inserted by a fix. We can directly manipulate the # "segments" list. to insert whitespace between "FROM" and the # CTE table name. idx_from = from_clause_children.index(from_segment[0]) from_clause.segments = list( from_clause_children[: idx_from + 1] + (WhitespaceSegment(),) + from_clause_children[idx_from + 1 :] ) else: ( missing_space_after_from, from_clause, from_clause_children, from_segment, ) = self._missing_space_after_from(subquery_parent) if missing_space_after_from: # Case 2. from_segment is in the current parse tree, so we can't # modify it directly. Create a LintFix to do it. fixes.append( LintFix.create_after(from_segment[0], [WhitespaceSegment()]) ) return fixes @staticmethod def _missing_space_after_from(segment: BaseSegment): missing_space_after_from = False from_clause_children = None from_segment = None from_clause = segment.get_child("from_clause") if from_clause is not None: from_clause_children = Segments(*from_clause.segments) from_segment = from_clause_children.first(is_keyword("from")) if from_segment and not from_clause_children.select( start_seg=from_segment[0], loop_while=is_whitespace() ): missing_space_after_from = True return missing_space_after_from, from_clause, from_clause_children, from_segment def replace_with_clone(self, segment, clone_map) -> None: for idx, cte in enumerate(self.ctes): if any(segment is seg for seg in cte.recursive_crawl_all()): self.ctes[idx] = clone_map[self.ctes[idx]] return None def _is_child(maybe_parent: Segments, maybe_child: Segments) -> bool: """Is the child actually between the start and end markers of the parent.""" assert ( len(maybe_child) == 1 ), "Cannot assess child relationship of multiple segments" assert ( len(maybe_parent) == 1 ), "Cannot assess child relationship of multiple parents" child_markers = maybe_child[0].pos_marker parent_pos = maybe_parent[0].pos_marker assert parent_pos and child_markers if child_markers < parent_pos.start_point_marker(): return False # pragma: no cover if child_markers > parent_pos.end_point_marker(): return False return True S = TypeVar("S", bound=Type[BaseSegment]) def _get_seg(class_def: S, dialect: Dialect) -> S: return cast(S, dialect.get_segment(class_def.__name__)) def _create_cte_seg( alias_name: str, subquery: BaseSegment, case_preference: str, dialect: Dialect ) -> CTEDefinitionSegment: CTESegment = _get_seg(CTEDefinitionSegment, dialect) IdentifierSegment = cast( Type[CodeSegment], dialect.get_segment("IdentifierSegment") ) element: CTEDefinitionSegment = CTESegment( segments=( IdentifierSegment( raw=alias_name, type="naked_identifier", ), WhitespaceSegment(), _segmentify("AS", casing=case_preference), WhitespaceSegment(), subquery, ) ) return element def _create_table_ref(table_name: str, dialect: Dialect) -> TableExpressionSegment: Seg = partial(_get_seg, dialect=dialect) TableExpressionSeg = Seg(TableExpressionSegment) TableReferenceSeg = Seg(TableReferenceSegment) IdentifierSegment = cast( Type[CodeSegment], dialect.get_segment("IdentifierSegment") ) table_seg = TableExpressionSeg( segments=( TableReferenceSeg( segments=( IdentifierSegment( raw=table_name, type="naked_identifier", ), ), ), ), ) return table_seg # type: ignore def _get_case_preference(root_select: Segments): # First get the segment itself so we have access to the generator root_segment = root_select.get() assert root_segment, "Root SELECT not found." # Get the first item of the recursive crawl. first_keyword = next( root_segment.recursive_crawl( "keyword", recurse_into=False, ), None, ) assert first_keyword, "Keyword not found." # Get case preference based on the case of that keyword. if first_keyword.raw.islower(): return "LOWER" return "UPPER" def _segmentify(input_el: str, casing: str) -> BaseSegment: """Apply casing and convert strings to Keywords.""" input_el = input_el.lower() if casing == "UPPER": input_el = input_el.upper() return KeywordSegment(raw=input_el) class SegmentCloneMap: """Clones a segment tree, maps from original segments to their clones.""" def __init__(self, segment: BaseSegment): segment_copy = segment.copy() self.segment_map = {} for old_segment, new_segment in zip( segment.recursive_crawl_all(), segment_copy.recursive_crawl_all(), ): new_segment.pos_marker = old_segment.pos_marker self.segment_map[id(old_segment)] = new_segment def __getitem__(self, old_segment: BaseSegment) -> BaseSegment: return self.segment_map[id(old_segment)] sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST06.py000066400000000000000000000226261451700765000222310ustar00rootroot00000000000000"""Implementation of Rule ST06.""" from typing import Iterator, List, Optional, Tuple, Union from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import ( BaseRule, EvalResultType, LintFix, LintResult, RuleContext, ) from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_ST06(BaseRule): """Select wildcards then simple targets before calculations and aggregates. **Anti-pattern** .. code-block:: sql select a, *, row_number() over (partition by id order by date) as y, b from x **Best practice** Order ``select`` targets in ascending complexity .. code-block:: sql select *, a, b, row_number() over (partition by id order by date) as y from x """ name = "structure.column_order" aliases = ("L034",) groups = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"select_clause"}) is_fix_compatible = True def _validate(self, i: int, segment: BaseSegment) -> None: # Check if we've seen a more complex select target element already if self.seen_band_elements[i + 1 : :] != [[]] * len( self.seen_band_elements[i + 1 : :] ): # Found a violation (i.e. a simpler element that *follows* a more # complex element. self.violation_exists = True self.current_element_band: Optional[int] = i self.seen_band_elements[i].append(segment) def _eval(self, context: RuleContext) -> EvalResultType: self.violation_exists = False # Bands of select targets in order to be enforced select_element_order_preference: Tuple[ Tuple[Union[str, Tuple[str, ...]], ...], ... ] = ( ("wildcard_expression",), ( "object_reference", "literal", "cast_expression", ("function", "cast"), ("expression", "cast_expression"), ), ) # Track which bands have been seen, with additional empty list for the # non-matching elements. If we find a matching target element, we append the # element to the corresponding index. self.seen_band_elements: List[List[BaseSegment]] = [ [] for _ in select_element_order_preference ] + [ [] ] # type: ignore assert context.segment.is_type("select_clause") # Ignore select clauses which belong to: # - set expression, which is most commonly a union # - insert_statement # - create table statement # # In each of these contexts, the order of columns in a select should # be preserved. if len(context.parent_stack) >= 2 and context.parent_stack[-2].is_type( "insert_statement", "set_expression" ): return None if ( len(context.parent_stack) >= 3 and context.parent_stack[-3].is_type("insert_statement", "set_expression") and context.parent_stack[-2].is_type("with_compound_statement") ): return None if len(context.parent_stack) >= 3 and context.parent_stack[-3].is_type( "create_table_statement", "merge_statement" ): return None if ( len(context.parent_stack) >= 4 and context.parent_stack[-4].is_type( "create_table_statement", "merge_statement" ) and context.parent_stack[-2].is_type("with_compound_statement") ): return None select_clause_segment = context.segment select_target_elements = context.segment.get_children("select_clause_element") if not select_target_elements: return None # Iterate through all the select targets to find any order violations for segment in select_target_elements: # The band index of the current segment in # select_element_order_preference self.current_element_band = None # Compare the segment to the bands in select_element_order_preference for i, band in enumerate(select_element_order_preference): for e in band: # Identify simple select target if isinstance(e, str) and segment.get_child(e): self._validate(i, segment) # Identify function elif isinstance(e, tuple) and e[0] == "function": try: _function = segment.get_child("function") assert _function _function_name = _function.get_child("function_name") assert _function_name if _function_name.raw == e[1]: self._validate(i, segment) except (AttributeError, AssertionError): # If the segment doesn't match pass # Identify simple expression elif isinstance(e, tuple) and e[0] == "expression": try: _expression = segment.get_child("expression") assert _expression if ( _expression.get_child(e[1]) and _expression.segments[0].type in ( "column_reference", "object_reference", "literal", "cast_expression", ) # len == 2 to ensure the expression is 'simple' and ( len(_expression.segments) == 2 # cast_expression is one length or len(_expression.segments) == 1 ) ): self._validate(i, segment) except (AttributeError, AssertionError): # If the segment doesn't match pass # If the target doesn't exist in select_element_order_preference then it # is 'complex' and must go last if self.current_element_band is None: self.seen_band_elements[-1].append(segment) if self.violation_exists: if len(context.parent_stack) and any( self._implicit_column_references(context.parent_stack[-1]) ): # If there are implicit column references (i.e. column # numbers), warn but don't fix, because it's much more # complicated to autofix. return LintResult(anchor=select_clause_segment) # Create a list of all the edit fixes # We have to do this at the end of iterating through all the # select_target_elements to get the order correct. This means we can't # add a lint fix to each individual LintResult as we go ordered_select_target_elements = [ segment for band in self.seen_band_elements for segment in band ] # TODO: The "if" in the loop below compares corresponding items # to avoid creating "do-nothing" edits. A potentially better # approach would leverage difflib.SequenceMatcher.get_opcodes(), # which generates a list of edit actions (similar to the # command-line "diff" tool in Linux). This is more complex to # implement, but minimizing the number of LintFixes makes the # final application of patches (in "sqlfluff fix") more robust. fixes = [ LintFix.replace( initial_select_target_element, [replace_select_target_element], ) for initial_select_target_element, replace_select_target_element in zip( # noqa: E501 select_target_elements, ordered_select_target_elements ) if initial_select_target_element is not replace_select_target_element ] # Anchoring on the select statement segment ensures that # select statements which include macro targets are ignored # when ignore_templated_areas is set return LintResult(anchor=select_clause_segment, fixes=fixes) return None @classmethod def _implicit_column_references(cls, segment: BaseSegment) -> Iterator[BaseSegment]: """Yield any implicit ORDER BY or GROUP BY column references. This function was adapted from similar code in AM06. """ _ignore_types: List[str] = ["withingroup_clause", "window_specification"] if not segment.is_type(*_ignore_types): # Ignore Windowing clauses if segment.is_type("groupby_clause", "orderby_clause"): for seg in segment.segments: if seg.is_type("numeric_literal"): yield segment else: for seg in segment.segments: yield from cls._implicit_column_references(seg) sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST07.py000066400000000000000000000155251451700765000222320ustar00rootroot00000000000000"""Implementation of Rule ST07.""" from typing import List, Optional, Tuple from sqlfluff.core.parser import ( BaseSegment, KeywordSegment, SymbolSegment, WhitespaceSegment, ) from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ColumnReferenceSegment, IdentifierSegment from sqlfluff.utils.analysis.select import get_select_statement_info from sqlfluff.utils.functional import FunctionalContext, Segments, sp class Rule_ST07(BaseRule): """Prefer specifying join keys instead of using ``USING``. .. note:: This rule was originally taken from the `dbt Style Guide `_ which notes that: Certain warehouses have inconsistencies in ``USING`` results (specifically Snowflake). In fact `dbt removed it from their style guide in February 2022 `_. However, some like the rule, so for now we will keep it in SQLFluff, but encourage those that do not find value in the rule, to turn it off. .. note:: This rule is disabled for ClickHouse as it supports ``USING`` without brackets which this rule does not support. **Anti-pattern** .. code-block:: sql SELECT table_a.field_1, table_b.field_2 FROM table_a INNER JOIN table_b USING (id) **Best practice** Specify the keys directly .. code-block:: sql SELECT table_a.field_1, table_b.field_2 FROM table_a INNER JOIN table_b ON table_a.id = table_b.id """ name = "structure.using" aliases = ("L032",) groups: Tuple[str, ...] = ("all", "structure") crawl_behaviour = SegmentSeekerCrawler({"join_clause"}) is_fix_compatible = True _dialects_disabled_by_default = [ "clickhouse", ] def _eval(self, context: RuleContext) -> Optional[LintResult]: if context.dialect.name in self._dialects_disabled_by_default: return LintResult() """Look for USING in a join clause.""" segment = FunctionalContext(context).segment parent_stack = FunctionalContext(context).parent_stack # We are not concerned with non join clauses assert context.segment.is_type("join_clause") using_anchor = segment.children(sp.is_keyword("using")).first() # If there is no evidence of a USING then we exit if len(using_anchor) == 0: return None anchor = using_anchor.get() description = "Found USING statement. Expected only ON statements." # All returns from here out will be some form of linting error. # we prepare the variable here unfixable_result = LintResult( anchor=anchor, description=description, ) tables_in_join = parent_stack.last().children( sp.is_type("join_clause", "from_expression_element") ) # We can only safely fix the first join clause if segment.get(0) != tables_in_join.get(1): return unfixable_result parent_select = parent_stack.last(sp.is_type("select_statement")).get() if not parent_select: # pragma: no cover return unfixable_result select_info = get_select_statement_info(parent_select, context.dialect) table_aliases = [ ta for ta in (select_info.table_aliases if select_info else []) if ta.ref_str ] if len(table_aliases) < 2: return unfixable_result to_delete, insert_after_anchor = _extract_deletion_sequence_and_anchor(segment) table_a, table_b = table_aliases[:2] edit_segments = [ KeywordSegment(raw="ON"), WhitespaceSegment(raw=" "), ] + _generate_join_conditions( table_a.ref_str, table_b.ref_str, _extract_cols_from_using(segment, using_anchor), ) assert table_a.segment assert table_b.segment fixes = [ LintFix.create_before( anchor_segment=insert_after_anchor, source=[table_a.segment, table_b.segment], edit_segments=edit_segments, ), *[LintFix.delete(seg) for seg in to_delete], ] return LintResult( anchor=anchor, description=description, fixes=fixes, ) def _extract_cols_from_using(join_clause: Segments, using_segs: Segments) -> List[str]: # First bracket after the USING keyword, then find ids using_cols: List[str] = ( join_clause.children() .select(start_seg=using_segs[0], select_if=sp.is_type("bracketed")) .first() .children(sp.is_type("identifier")) .apply(lambda el: el.raw) ) return using_cols def _generate_join_conditions( table_a_ref: str, table_b_ref: str, columns: List[str] ) -> List[BaseSegment]: edit_segments: List[BaseSegment] = [] for col in columns: edit_segments = edit_segments + [ _create_col_reference( table_a_ref, col, ), WhitespaceSegment(raw=" "), SymbolSegment(raw="="), WhitespaceSegment(raw=" "), _create_col_reference( table_b_ref, col, ), WhitespaceSegment(raw=" "), KeywordSegment(raw="AND"), WhitespaceSegment(raw=" "), ] # Trim the " " "AND" " " at the end return edit_segments[:-3] SequenceAndAnchorRes = Tuple[List[BaseSegment], BaseSegment] def _extract_deletion_sequence_and_anchor( join_clause: Segments, ) -> SequenceAndAnchorRes: insert_anchor: Optional[BaseSegment] = None to_delete: List[BaseSegment] = [] for seg in join_clause.children(): if seg.raw_upper == "USING": # Start collecting once we hit USING to_delete.append(seg) continue if len(to_delete) == 0: # Skip if we haven't started collecting continue if to_delete[-1].is_type("bracketed"): # terminate when we hit the brackets insert_anchor = seg break to_delete.append(seg) assert insert_anchor, "Insert Anchor must be present at this point" return to_delete, insert_anchor def _create_col_reference(table_ref: str, column_name: str) -> ColumnReferenceSegment: segments = ( IdentifierSegment(raw=table_ref, type="naked_identifier"), SymbolSegment(raw=".", type="symbol"), IdentifierSegment(raw=column_name, type="naked_identifier"), ) return ColumnReferenceSegment(segments=segments, pos_marker=None) sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST08.py000066400000000000000000000120621451700765000222240ustar00rootroot00000000000000"""Implementation of Rule ST08.""" from typing import Optional, Tuple from sqlfluff.core.parser import BaseSegment, KeywordSegment, WhitespaceSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.utils.functional import FunctionalContext, Segments, sp from sqlfluff.utils.reflow.sequence import ReflowSequence class Rule_ST08(BaseRule): """``DISTINCT`` used with parentheses. **Anti-pattern** In this example, parentheses are not needed and confuse ``DISTINCT`` with a function. The parentheses can also be misleading about which columns are affected by the ``DISTINCT`` (all the columns!). .. code-block:: sql SELECT DISTINCT(a), b FROM foo **Best practice** Remove parentheses to be clear that the ``DISTINCT`` applies to both columns. .. code-block:: sql SELECT DISTINCT a, b FROM foo """ name = "structure.distinct" aliases = ("L015",) groups = ("all", "structure", "core") crawl_behaviour = SegmentSeekerCrawler({"select_clause", "function"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Looking for DISTINCT before a bracket. Look for DISTINCT keyword immediately followed by open parenthesis. """ seq = None anchor = None children = FunctionalContext(context).segment.children() if context.segment.is_type("select_clause"): # Look for `select_clause_modifier` modifier = children.select(sp.is_type("select_clause_modifier")) first_element = children.select(sp.is_type("select_clause_element")).first() expression = ( first_element.children(sp.is_type("expression")).first() or first_element ) bracketed = expression.children(sp.is_type("bracketed")).first() # is the first element only an expression with only brackets? if modifier and bracketed: # If there's nothing else in the expression, remove the brackets. if len(expression[0].segments) == 1: anchor, seq = self._remove_unneeded_brackets(context, bracketed) # Otherwise, still make sure there's a space after the DISTINCT. else: anchor = modifier[0] seq = ReflowSequence.from_around_target( modifier[0], context.parent_stack[0], config=context.config, sides="after", ) elif context.segment.is_type("function"): # Look for a function call DISTINCT() whose parent is an expression # with a single child. anchor = context.parent_stack[-1] if not anchor.is_type("expression") or len(anchor.segments) != 1: return None function_name = children.select(sp.is_type("function_name")).first() bracketed = children.first(sp.is_type("bracketed")) if ( not function_name or function_name[0].raw_upper != "DISTINCT" or not bracketed ): return None # Using ReflowSequence here creates an unneeded space between CONCAT # and "(" in the test case test_fail_distinct_concat_inside_count: # SELECT COUNT(DISTINCT(CONCAT(col1, '-', col2, '-', col3))) # # seq = ReflowSequence.from_around_target( # anchor, # context.parent_stack[0], # config=context.config, # ).replace( # anchor, # (KeywordSegment("DISTINCT"), WhitespaceSegment()) # + self.filter_meta(bracketed[0].segments)[1:-1], # ) # Do this until we have a fix for the above. return LintResult( anchor=anchor, fixes=[ LintFix.replace( anchor, (KeywordSegment("DISTINCT"), WhitespaceSegment()) + self.filter_meta(bracketed[0].segments)[1:-1], ) ], ) if seq and anchor: # Get modifications. fixes = seq.respace().get_fixes() if fixes: return LintResult( anchor=anchor, fixes=fixes, ) return None def _remove_unneeded_brackets( self, context: RuleContext, bracketed: Segments ) -> Tuple[BaseSegment, ReflowSequence]: # Remove the brackets and strip any meta segments. anchor = bracketed.get() assert anchor seq = ReflowSequence.from_around_target( anchor, context.parent_stack[0], config=context.config, sides="before", ).replace(anchor, self.filter_meta(anchor.segments)[1:-1]) return anchor, seq sqlfluff-2.3.5/src/sqlfluff/rules/structure/ST09.py000066400000000000000000000254411451700765000222320ustar00rootroot00000000000000"""Implementation of Rule ST09.""" from typing import List, Optional, Tuple, cast from sqlfluff.core.parser import BaseSegment, SymbolSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler from sqlfluff.dialects.dialect_ansi import ( FromExpressionElementSegment, JoinClauseSegment, ) from sqlfluff.utils.functional import FunctionalContext, Segments class Rule_ST09(BaseRule): """Joins should list the table referenced earlier/later first. This rule will break conditions from join clauses down into subconditions using the "and" and "or" binary operators. Subconditions that are made up of a qualified column reference, a comparison operator and another qualified column reference are then evaluated to check whether they list the table that was referenced earlier - or later, depending on the ``preferred_first_table_in_join_clause`` configuration. Subconditions that do not follow that pattern are ignored by this rule. .. note:: Joins in ``WHERE`` clauses are currently not supported by this rule. **Anti-pattern** In this example, the tables that were referenced later are listed first and the ``preferred_first_table_in_join_clause`` configuration is set to ``earlier``. .. code-block:: sql select foo.a, foo.b, bar.c from foo left join bar -- This subcondition does not list -- the table referenced earlier first: on bar.a = foo.a -- Neither does this subcondition: and bar.b = foo.b **Best practice** List the tables that were referenced earlier first. .. code-block:: sql select foo.a, foo.b, bar.c from foo left join bar on foo.a = bar.a and foo.b = bar.b """ name = "structure.join_condition_order" aliases = () groups: Tuple[str, ...] = ("all", "structure") config_keywords = ["preferred_first_table_in_join_clause"] crawl_behaviour = SegmentSeekerCrawler({"from_expression"}) is_fix_compatible = True def _eval(self, context: RuleContext) -> Optional[LintResult]: """Find rule violations and provide fixes. 0. Grab all table aliases into a table_aliases list. 1. Grab all conditions from the different join_on_condition segments. 2. Break conditions down into subconditions using the "and" and "or" binary operators. 3. Keep subconditions that are made up of a qualified column_reference, a comparison_operator and another qualified column_reference segments. 4. Check whether the table associated with the first column_reference segment has a greater index in table_aliases than the second column_reference segment. If so, populate the fixes list (lower index instead of greater index if preferred_first_table_in_join_clause == "later"). 5.a. If fixes is empty the rule passes. 5.b. If fixes isn't empty we return a LintResult object with fixable violations. """ self.preferred_first_table_in_join_clause: str assert context.segment.is_type("from_expression") # STEP 0. table_aliases: List[str] = [] children = FunctionalContext(context).segment.children() # we use recursive_crawl to deal with brackets join_clauses = children.recursive_crawl("join_clause") join_on_conditions = join_clauses.children().recursive_crawl( "join_on_condition" ) # we only care about join_on_condition segments if len(join_on_conditions) == 0: return None # the first alias comes from the from clause from_expression_alias: str = ( cast( FromExpressionElementSegment, children.recursive_crawl("from_expression_element")[0], ) .get_eventual_alias() .ref_str ) table_aliases.append(from_expression_alias) # the rest of the aliases come from the different join clauses join_clause_aliases: List[str] = [ cast(JoinClauseSegment, join_clause).get_eventual_aliases()[0][1].ref_str for join_clause in [clause for clause in join_clauses] ] table_aliases = table_aliases + join_clause_aliases table_aliases = [alias.upper() for alias in table_aliases] # STEP 1. conditions: List[List[BaseSegment]] = [] join_on_condition__expressions = join_on_conditions.children().recursive_crawl( "expression" ) for expression in join_on_condition__expressions: expression_group = [] for element in Segments(expression).children(): if element.type not in ("whitespace", "newline"): expression_group.append(element) conditions.append(expression_group) # STEP 2. subconditions: List[List[List[BaseSegment]]] = [] for expression_group in conditions: subconditions.append( self._split_list_by_segment_type( segment_list=expression_group, delimiter_type="binary_operator", delimiters=["and", "or"], ) ) subconditions_flattened: List[List[BaseSegment]] = [ item for sublist in subconditions for item in sublist ] # STEP 3. column_operator_column_subconditions: List[List[BaseSegment]] = [ subcondition for subcondition in subconditions_flattened if self._is_qualified_column_operator_qualified_column_sequence( subcondition ) ] # STEP 4. fixes: List[LintFix] = [] for subcondition in column_operator_column_subconditions: comparison_operator = subcondition[1] first_column_reference = subcondition[0] second_column_reference = subcondition[2] raw_comparison_operators = comparison_operator.get_children( "raw_comparison_operator" ) first_table_seg = first_column_reference.get_child( "naked_identifier", "quoted_identifier" ) second_table_seg = second_column_reference.get_child( "naked_identifier", "quoted_identifier" ) assert first_table_seg and second_table_seg first_table = first_table_seg.raw_upper second_table = second_table_seg.raw_upper # if we swap the two column references around the comparison operator # we might have to replace the comparison operator with a different one raw_comparison_operator_opposites = {"<": ">", ">": "<"} # there seem to be edge cases where either the first table or the second # table is not in table_aliases, in which case we cannot provide any fix if first_table not in table_aliases or second_table not in table_aliases: continue if ( table_aliases.index(first_table) > table_aliases.index(second_table) and self.preferred_first_table_in_join_clause == "earlier" ) or ( table_aliases.index(first_table) < table_aliases.index(second_table) and self.preferred_first_table_in_join_clause == "later" ): fixes = ( fixes + [ LintFix.replace( first_column_reference, [second_column_reference], ) ] + [ LintFix.replace( second_column_reference, [first_column_reference], ) ] + ( [ LintFix.replace( raw_comparison_operators[0], [ SymbolSegment( raw=raw_comparison_operator_opposites[ raw_comparison_operators[0].raw ], type="raw_comparison_operator", ) ], ) ] if raw_comparison_operators[0].raw in raw_comparison_operator_opposites and [r.raw for r in raw_comparison_operators] != ["<", ">"] else [] ) ) # STEP 5.a. if fixes == []: return None # STEP 5.b. else: return LintResult( anchor=context.segment, fixes=fixes, description=( "Joins should list the table referenced " f"{self.preferred_first_table_in_join_clause} first." ), ) @staticmethod def _split_list_by_segment_type( segment_list: List[BaseSegment], delimiter_type: str, delimiters: List[str] ) -> List: # Break down a list into multiple sub-lists using a set of delimiters delimiters = [delimiter.upper() for delimiter in delimiters] new_list = [] sub_list = [] for i in range(len(segment_list)): if i == len(segment_list) - 1: sub_list.append(segment_list[i]) new_list.append(sub_list) elif ( segment_list[i].type == delimiter_type and segment_list[i].raw_upper in delimiters ): new_list.append(sub_list) sub_list = [] else: sub_list.append(segment_list[i]) return new_list @staticmethod def _is_qualified_column_operator_qualified_column_sequence( segment_list: List[BaseSegment], ) -> bool: # Check if list is made up of a qualified column_reference segment, # a comparison_operator segment and another qualified column_reference segment if len(segment_list) != 3: return False if ( segment_list[0].type == "column_reference" and "dot" in segment_list[0].direct_descendant_type_set and segment_list[1].type == "comparison_operator" and segment_list[2].type == "column_reference" and "dot" in segment_list[2].direct_descendant_type_set ): return True return False sqlfluff-2.3.5/src/sqlfluff/rules/structure/__init__.py000066400000000000000000000020021451700765000232560ustar00rootroot00000000000000"""The structure plugin bundle.""" from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.structure.ST01 import Rule_ST01 from sqlfluff.rules.structure.ST02 import Rule_ST02 from sqlfluff.rules.structure.ST03 import Rule_ST03 from sqlfluff.rules.structure.ST04 import Rule_ST04 from sqlfluff.rules.structure.ST05 import Rule_ST05 from sqlfluff.rules.structure.ST06 import Rule_ST06 from sqlfluff.rules.structure.ST07 import Rule_ST07 from sqlfluff.rules.structure.ST08 import Rule_ST08 from sqlfluff.rules.structure.ST09 import Rule_ST09 return [ Rule_ST01, Rule_ST02, Rule_ST03, Rule_ST04, Rule_ST05, Rule_ST06, Rule_ST07, Rule_ST08, Rule_ST09, ] sqlfluff-2.3.5/src/sqlfluff/rules/tsql/000077500000000000000000000000001451700765000200765ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/rules/tsql/TQ01.py000066400000000000000000000047561451700765000211510ustar00rootroot00000000000000"""Implementation of Rule TQ01.""" from typing import Optional from sqlfluff.core.rules import BaseRule, LintResult, RuleContext from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler class Rule_TQ01(BaseRule): r"""``SP_`` prefix should not be used for user-defined stored procedures in T-SQL. **Anti-pattern** The ``SP_`` prefix is used to identify system procedures and can adversely affect performance of the user-defined stored procedure. It can also break system procedures if there is a naming conflict. .. code-block:: sql :force: CREATE PROCEDURE dbo.sp_pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 **Best practice** Use a different name for the stored procedure. .. code-block:: sql :force: CREATE PROCEDURE dbo.pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 -- Alternatively prefix with USP_ to -- indicate a user-defined stored procedure. CREATE PROCEDURE dbo.usp_pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 """ name = "tsql.sp_prefix" aliases = ("L056",) groups = ("all", "tsql") crawl_behaviour = SegmentSeekerCrawler({"create_procedure_statement"}) def _eval(self, context: RuleContext) -> Optional[LintResult]: r"""``SP_`` prefix should not be used for user-defined stored procedures.""" # Rule only applies to T-SQL syntax. if context.dialect.name != "tsql": return None # pragma: no cover # We are only interested in CREATE PROCEDURE statements. assert context.segment.is_type("create_procedure_statement") # Find the object reference for the stored procedure. object_reference_segment = next( (s for s in context.segment.segments if s.type == "object_reference") ) # We only want to check the stored procedure name. procedure_segment = object_reference_segment.segments[-1] # If stored procedure name starts with 'SP\_' then raise lint error. if procedure_segment.raw_upper.lstrip('["').startswith("SP_"): "s".lstrip return LintResult( procedure_segment, description="'SP_' prefix should not be used for user-defined stored " "procedures.", ) return None sqlfluff-2.3.5/src/sqlfluff/rules/tsql/__init__.py000066400000000000000000000012551451700765000222120ustar00rootroot00000000000000"""The tsql rules plugin bundle. This plugin bundles linting rules which apply exclusively to TSQL. At some point in the future it might be useful to spin this off into a separate installable python package, but so long as the number of rules remain low, it makes sense to keep it bundled with SQLFluff core. """ from typing import List, Type from sqlfluff.core.plugin import hookimpl from sqlfluff.core.rules import BaseRule @hookimpl def get_rules() -> List[Type[BaseRule]]: """Get plugin rules. NOTE: Rules are imported only on fetch to manage import times when rules aren't used. """ from sqlfluff.rules.tsql.TQ01 import Rule_TQ01 return [Rule_TQ01] sqlfluff-2.3.5/src/sqlfluff/utils/000077500000000000000000000000001451700765000171215ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/utils/__init__.py000066400000000000000000000000731451700765000212320ustar00rootroot00000000000000"""Utilities which are usable by the cli, api or rules.""" sqlfluff-2.3.5/src/sqlfluff/utils/analysis/000077500000000000000000000000001451700765000207445ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/utils/analysis/__init__.py000066400000000000000000000001101451700765000230450ustar00rootroot00000000000000"""Code analysis tools to support development of more complex rules.""" sqlfluff-2.3.5/src/sqlfluff/utils/analysis/query.py000066400000000000000000000375561451700765000225030ustar00rootroot00000000000000"""Tools for more complex analysis of SELECT statements.""" import logging from dataclasses import dataclass, field from enum import Enum from typing import ( Dict, Generic, Iterator, List, NamedTuple, Optional, Type, TypeVar, Union, cast, ) from sqlfluff.core.cached_property import cached_property from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo from sqlfluff.core.parser import BaseSegment from sqlfluff.dialects.dialect_ansi import ObjectReferenceSegment from sqlfluff.utils.analysis.select import ( SelectStatementColumnsAndTables, get_select_statement_info, ) from sqlfluff.utils.functional import Segments, sp analysis_logger = logging.getLogger("sqlfluff.rules.analysis") # Segment types which directly are or contain selectables. SELECTABLE_TYPES = ( "with_compound_statement", "set_expression", "select_statement", ) # Segment types which are likely to contain a subselect. SUBSELECT_TYPES = ( "merge_statement", "update_statement", "delete_statement", # NOTE: Values clauses won't have sub selects, but it's # also harmless to look, and they may appear in similar # locations. We include them here because they come through # the same code paths - although are likely to return nothing. "values_clause", ) class QueryType(Enum): """Query type: Simple is just a query; WithCompound has CTE(s).""" Simple = 1 WithCompound = 2 class WildcardInfo(NamedTuple): """Structure returned by Selectable.get_wildcard_info().""" segment: BaseSegment tables: List[str] @dataclass class Selectable: """A "SELECT" query segment.""" selectable: BaseSegment dialect: Dialect def as_str(self) -> str: """String representation for logging/testing.""" return self.selectable.raw @cached_property def select_info(self) -> Optional[SelectStatementColumnsAndTables]: """Returns SelectStatementColumnsAndTables on the SELECT.""" if self.selectable.is_type("select_statement"): return get_select_statement_info( self.selectable, self.dialect, early_exit=False ) else: # DML or values_clause # This is a bit dodgy, but a very useful abstraction. Here, we # interpret a DML or values_clause segment as if it were a SELECT. # Someday, we may need to tweak this, e.g. perhaps add a separate # QueryType for this (depending on the needs of the rules that use # it. # # For more info on the syntax and behavior of VALUES and its # similarity to a SELECT statement with literal values (no table # source), see the "Examples" section of the Postgres docs page: # (https://www.postgresql.org/docs/8.2/sql-values.html). values = Segments(self.selectable) alias_expression = values.children().first(sp.is_type("alias_expression")) name = alias_expression.children().first( sp.is_type("naked_identifier", "quoted_identifier") ) alias_info = AliasInfo( name[0].raw if name else "", name[0] if name else None, bool(name), self.selectable, alias_expression[0] if alias_expression else None, None, ) return SelectStatementColumnsAndTables( select_statement=self.selectable, table_aliases=[alias_info], standalone_aliases=[], reference_buffer=[], select_targets=[], col_aliases=[], using_cols=[], ) def get_wildcard_info(self) -> List[WildcardInfo]: """Find wildcard (*) targets in the SELECT.""" buff: List[WildcardInfo] = [] # Some select-like statements don't have select_info # (e.g. test_exasol_invalid_foreign_key_from) if not self.select_info: # pragma: no cover # TODO: Review whether to remove this. # Restructure of Exasol dialect means it no longer applies. return buff for seg in self.select_info.select_targets: if seg.get_child("wildcard_expression"): if "." in seg.raw: # The wildcard specifies a target table. table = seg.raw.rsplit(".", 1)[0] buff.append(WildcardInfo(seg, [table])) else: # The wildcard is unqualified (i.e. does not specify a # table). This means to include all columns from all the # tables in the query. buff.append( WildcardInfo( seg, [ alias_info.ref_str if alias_info.aliased else alias_info.from_expression_element.raw for alias_info in self.select_info.table_aliases if alias_info.ref_str ], ) ) return buff def find_alias(self, table: str) -> Optional[AliasInfo]: """Find corresponding table_aliases entry (if any) matching "table".""" alias_info = [ t for t in (self.select_info.table_aliases if self.select_info else []) if t.aliased and t.ref_str == table ] assert len(alias_info) <= 1 return alias_info[0] if alias_info else None T = TypeVar("T", bound="Query") @dataclass class Query(Generic[T]): """A main SELECT query plus possible CTEs.""" query_type: QueryType dialect: Dialect selectables: List[Selectable] = field(default_factory=list) ctes: Dict[str, T] = field(default_factory=dict) # Parent scope. This query can "see" CTEs defined by parents. parent: Optional[T] = field(default=None) # subqueries are subselects in either the SELECT or FROM clause. subqueries: List[T] = field(default_factory=list) cte_definition_segment: Optional[BaseSegment] = field(default=None) cte_name_segment: Optional[BaseSegment] = field(default=None) def __post_init__(self): # Once instantiated, set the `parent` attribute of any # subqueries and ctes. Some might already be set - but # we'll reset them anyway here. for subquery in self.subqueries: subquery.parent = self # NOTE: In normal operation, CTEs are typically set after # instantiation, and so for this method there aren't normally # any present. It is included here for completeness but not # covered in the test suite. # See `.from_segment()` for the way `parent` is set for CTEs. for cte in self.ctes.values(): # pragma: no cover cte.parent = self @property def children(self: T) -> List[T]: """Children could be CTEs, subselects or Others.""" return list(self.ctes.values()) + self.subqueries def as_dict(self: T) -> Dict: """Dict representation for logging/testing.""" result: Dict[str, Union[str, List[str], Dict, List[Dict]]] = {} if self.query_type != QueryType.Simple: result["query_type"] = self.query_type.name if self.selectables: result["selectables"] = [s.as_str() for s in self.selectables] if self.ctes: result["ctes"] = {k: v.as_dict() for k, v in self.ctes.items()} if self.subqueries: result["subqueries"] = [q.as_dict() for q in self.subqueries] return result def lookup_cte(self: T, name: str, pop: bool = True) -> Optional[T]: """Look up a CTE by name, in the current or any parent scope.""" cte = self.ctes.get(name.upper()) if cte: if pop: del self.ctes[name.upper()] return cte if self.parent: return self.parent.lookup_cte(name, pop) else: return None def crawl_sources( self: T, segment: BaseSegment, recurse_into=True, pop=False, lookup_cte=True ) -> Iterator[Union[str, T]]: """Find SELECTs, table refs, or value table function calls in segment. For each SELECT, yield a list of Query objects. As we find table references or function call strings, yield those. """ found_nested_select = False for seg in segment.recursive_crawl( "table_reference", "set_expression", "select_statement", "values_clause", recurse_into=False, allow_self=False, ): # Crawl efficiently, don't recurse here. We do that later. # What do we have? # 1. If it's a table reference, work out whether it's to a CTE # or to an external table. if seg.is_type("table_reference"): _seg = cast(ObjectReferenceSegment, seg) if not _seg.is_qualified() and lookup_cte: cte = self.lookup_cte(_seg.raw, pop=pop) if cte: # It's a CTE. yield cte # It's an external table reference. yield _seg.raw # 2. If it's some kind of more complex expression which is still # valid in this position, generate an appropriate sub-select. else: assert seg.is_type( "set_expression", "select_statement", "values_clause" ) found_nested_select = True # Generate a subquery, referencing the current query # as the parent. yield self.__class__.from_segment(seg, self.dialect, parent=self) if not found_nested_select: # If we reach here, the SELECT may be querying from a value table # function, e.g. UNNEST(). For our purposes, this is basically the # same as an external table. Return the "table" part as a string. table_expr = segment.get_child("table_expression") if table_expr: yield table_expr.raw @classmethod def _extract_subqueries( cls: Type[T], selectable: Selectable, dialect: Dialect ) -> Iterator[T]: """Given a Selectable, extract subqueries.""" assert selectable.selectable.is_type( *SELECTABLE_TYPES, *SUBSELECT_TYPES, ), f"Found unexpected {selectable.selectable}" # For MERGE, UPDATE & DELETE, we should expect to find a sub select. for subselect in selectable.selectable.recursive_crawl( *SELECTABLE_TYPES, recurse_into=False, allow_self=False, ): # NOTE: We don't need to set the parent here, because it will # be set when attached to the parent later. yield cls.from_segment(subselect, dialect=dialect) @classmethod def from_root(cls: Type[T], root_segment, dialect: Dialect) -> T: """Given a root segment, find the first appropriate selectable and analyse.""" selectable_segment = next( # Could be a Selectable or a MERGE root_segment.recursive_crawl(*SELECTABLE_TYPES, "merge_statement"), None, ) assert selectable_segment, f"No selectable found in {root_segment.raw!r}." return cls.from_segment(selectable_segment, dialect=dialect) @classmethod def from_segment( cls: Type[T], segment: BaseSegment, dialect: Dialect, parent: Optional[T] = None, ) -> T: """Recursively generate a query from an appropriate segment.""" assert segment.is_type( *SELECTABLE_TYPES, *SUBSELECT_TYPES ), f"Invalid segment for `from_segment`: {segment}" selectables = [] subqueries = [] cte_defs = [] query_type = QueryType.Simple if segment.is_type("select_statement", *SUBSELECT_TYPES): # It's a select. Instantiate a Query. selectables = [Selectable(segment, dialect=dialect)] elif segment.is_type("set_expression"): # It's a set expression. There may be multiple selectables. for _seg in segment.get_children("select_statement"): selectables.append(Selectable(_seg, dialect=dialect)) else: # Otherwise it's a WITH statement. assert segment.is_type("with_compound_statement") query_type = QueryType.WithCompound for _seg in segment.recursive_crawl( # NOTE: We don't _specify_ set expressions here, because # all set expressions are made of selects, and we # want to look straight through to those child # expressions. "select_statement", recurse_into=False, no_recursive_seg_type="common_table_expression", ): selectables.append(Selectable(_seg, dialect=dialect)) # We also need to handle CTEs for _seg in segment.recursive_crawl( "common_table_expression", recurse_into=False, # Don't recurse into any other WITH statements. no_recursive_seg_type="with_compound_statement", ): # Just store the segments for now. cte_defs.append(_seg) # Extract subqueries from any selectables. for selectable in selectables: # NOTE: If any VALUES clauses are present, they pass through here # safely without Exception. They won't yield any subqueries. subqueries += list(cls._extract_subqueries(selectable, dialect)) # Instantiate the query outer_query = cls( query_type, dialect, selectables, parent=parent, subqueries=subqueries, ) # If we don't have any CTEs, we can stop now. if not cte_defs: return outer_query # Otherwise build up the CTE map. ctes = {} for cte in cte_defs: # NOTE: This feels a little risky to just assume the first segment # is the name, but it's the same functionality we've run with for # a while. name_seg = cte.segments[0] name = name_seg.raw_upper # Get the query out of it, just stop on the first one we find. try: inner_qry = next( cte.recursive_crawl( *SELECTABLE_TYPES, "values_clause", # Very rarely, we might find things like update # clauses in here, handle them accordingly. *SUBSELECT_TYPES, ), ) # If this fails it's because we didn't find anything "selectable" # in the CTE. Flag this up, but then carry on. It's likely something # strange (w.g. a Clickhouse WITH clause setting a with). except StopIteration: # pragma: no cover # Log it as an issue, but otherwise skip this one. analysis_logger.info(f"Skipping unexpected CTE structure: {cte.raw!r}") continue qry = cls.from_segment(inner_qry, dialect=dialect, parent=outer_query) assert qry # Populate the CTE specific args. qry.cte_definition_segment = cte qry.cte_name_segment = name_seg # File it in the dictionary. ctes[name] = qry # Set the CTEs attribute on the outer. # NOTE: Because we're setting this after instantiation, it's important # that we've already set the `parent` value of the cte queries. outer_query.ctes = ctes return outer_query sqlfluff-2.3.5/src/sqlfluff/utils/analysis/select.py000066400000000000000000000216141451700765000226010ustar00rootroot00000000000000"""Basic code analysis tools for SELECT statements.""" from typing import List, NamedTuple, Optional, cast from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.dialects.dialect_ansi import ( ObjectReferenceSegment, SelectClauseElementSegment, ) class SelectStatementColumnsAndTables(NamedTuple): """Structure returned by get_select_statement_info().""" select_statement: BaseSegment table_aliases: List[AliasInfo] standalone_aliases: List[str] # value table function aliases reference_buffer: List[ObjectReferenceSegment] select_targets: List[SelectClauseElementSegment] col_aliases: List[ColumnAliasInfo] using_cols: List[str] def _get_object_references(segment: BaseSegment) -> List[ObjectReferenceSegment]: return list( cast(ObjectReferenceSegment, _seg) for _seg in segment.recursive_crawl( "object_reference", no_recursive_seg_type="select_statement" ) ) def get_select_statement_info( segment: BaseSegment, dialect: Optional[Dialect], early_exit: bool = True ) -> Optional[SelectStatementColumnsAndTables]: """Analyze a select statement: targets, aliases, etc. Return info.""" assert segment.is_type("select_statement") table_aliases, standalone_aliases = get_aliases_from_select(segment, dialect) if early_exit and not table_aliases and not standalone_aliases: return None # Iterate through all the references, both in the select clause, but also # potential others. sc = segment.get_child("select_clause") # Sometimes there is no select clause (e.g. "SELECT *" is a select_clause_element) if not sc: # pragma: no cover # TODO: Review whether this clause should be removed. It might only # have existed for an old way of structuring the Exasol dialect. return None # NOTE: In this first crawl, don't crawl inside any sub-selects, that's very # important for both isolation and performance reasons. reference_buffer = _get_object_references(sc) for potential_clause in ( "where_clause", "groupby_clause", "having_clause", "orderby_clause", "qualify_clause", ): clause = segment.get_child(potential_clause) if clause: reference_buffer += _get_object_references(clause) # Get all select targets. _select_clause = segment.get_child("select_clause") assert _select_clause, "Select statement found without select clause." select_targets = cast( List[SelectClauseElementSegment], _select_clause.get_children("select_clause_element"), ) # Get all column aliases. NOTE: In two steps so mypy can follow. _pre_aliases = [s.get_alias() for s in select_targets] col_aliases = [_alias for _alias in _pre_aliases if _alias is not None] # Get any columns referred to in a using clause, and extract anything # from ON clauses. using_cols = [] fc = segment.get_child("from_clause") if fc: for join_clause in fc.recursive_crawl( "join_clause", no_recursive_seg_type="select_statement" ): seen_using = False for seg in join_clause.iter_segments(): if seg.is_type("keyword") and seg.raw_upper == "USING": seen_using = True elif seg.is_type("join_on_condition"): for on_seg in seg.segments: if on_seg.is_type("bracketed", "expression"): # Deal with expressions reference_buffer += _get_object_references(seg) elif seen_using and seg.is_type("bracketed"): for subseg in seg.segments: if subseg.is_type("identifier"): using_cols.append(subseg.raw) seen_using = False return SelectStatementColumnsAndTables( select_statement=segment, table_aliases=table_aliases or [], standalone_aliases=standalone_aliases or [], reference_buffer=reference_buffer, select_targets=select_targets, col_aliases=col_aliases, using_cols=using_cols, ) def get_aliases_from_select(segment, dialect=None): """Gets the aliases referred to in the FROM clause. Returns a tuple of two lists: - Table aliases - Value table function aliases """ fc = segment.get_child("from_clause") if not fc: # If there's no from clause then just abort. return None, None aliases = fc.get_eventual_aliases() # We only want table aliases, so filter out aliases for value table # functions, lambda parameters and pivot columns. standalone_aliases = [] standalone_aliases += _get_pivot_table_columns(segment, dialect) standalone_aliases += _get_lambda_argument_columns(segment, dialect) table_aliases = [] for table_expr, alias_info in aliases: if _has_value_table_function(table_expr, dialect): if alias_info[0] not in standalone_aliases: standalone_aliases.append(alias_info[0]) elif alias_info not in table_aliases: table_aliases.append(alias_info) return table_aliases, standalone_aliases def _has_value_table_function(table_expr, dialect) -> bool: if not dialect: # We need the dialect to get the value table function names. If # we don't have it, assume the clause does not have a value table # function. return False # pragma: no cover for function_name in table_expr.recursive_crawl("function_name"): # Other rules can increase whitespace in the function name, so use strip to # remove # See: https://github.com/sqlfluff/sqlfluff/issues/1304 if function_name.raw.upper().strip() in dialect.sets("value_table_functions"): return True return False def _get_pivot_table_columns(segment, dialect) -> list: if not dialect: # We need the dialect to get the pivot table column names. If # we don't have it, assume the clause does not have a pivot table return [] # pragma: no cover fc = segment.recursive_crawl("from_pivot_expression") if not fc: # If there's no pivot clause then just abort. return [] # pragma: no cover pivot_table_column_aliases = [] for pivot_table_column_alias in segment.recursive_crawl("pivot_column_reference"): if pivot_table_column_alias.raw not in pivot_table_column_aliases: pivot_table_column_aliases.append(pivot_table_column_alias.raw) return pivot_table_column_aliases # Lambda arguments, # e.g. `x` and `y` in `x -> x is not null` and `(x, y) -> x + y` # are declared in-place, and are as such standalone – i.e. they do not reference # identifiers or columns that we should expect to be declared somewhere else. # These columns are interesting to identify since they can get special # treatment in some rules. def _get_lambda_argument_columns( segment: BaseSegment, dialect: Dialect ) -> Optional[List[str]]: if not dialect or dialect.name not in ["athena", "sparksql"]: # Only athena and sparksql are known to have lambda expressions, # so all other dialects will have zero lambda columns return [] lambda_argument_columns = [] for potential_lambda in segment.recursive_crawl("expression"): potential_arrow = potential_lambda.get_child("binary_operator") if potential_arrow and potential_arrow.raw == "->": arrow_operator = potential_arrow # The arguments will be before the arrow operator, so we get anything # that is a column reference or a set of bracketed column references before # the arrow. There should be exactly one segment matching this, if there are # more, this doesn't cleanly match a lambda expression argument_segments = potential_lambda.select_children( stop_seg=arrow_operator, select_if=( lambda x: x.is_type("bracketed") or x.is_type("column_reference") ), ) assert len(argument_segments) == 1 child_segment = argument_segments[0] if child_segment.is_type("bracketed"): start_bracket = child_segment.get_child("start_bracket") # There will be a start bracket if it's bracketed. assert start_bracket if start_bracket.raw == "(": bracketed_arguments = child_segment.get_children("column_reference") raw_arguments = [argument.raw for argument in bracketed_arguments] lambda_argument_columns += raw_arguments elif child_segment.is_type("column_reference"): lambda_argument_columns.append(child_segment.raw) return lambda_argument_columns sqlfluff-2.3.5/src/sqlfluff/utils/functional/000077500000000000000000000000001451700765000212635ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/utils/functional/__init__.py000066400000000000000000000020711451700765000233740ustar00rootroot00000000000000"""Modules in this directory provide a "functional" API for rule writing. Wikipedia defines functional programming (https://en.wikipedia.org/wiki/Functional_programming) as a declarative programming paradigm where code is built by applying and composing functions. The modules in this API provide classes and predicates for working with segments and slices. The API is loosely inspired by packages such as Pandas and Numpy. These classes provide a simpler, higher-level API for writing rules, resulting in shorter, simpler, easier-to-read code. Rules can use these classes, the lower-level classes, or a mix, but it is suggested that each rule primarily use one or the other for readability. """ __all__ = ("Segments", "rsp", "sp", "tsp", "FunctionalContext") import sqlfluff.utils.functional.raw_file_slice_predicates as rsp import sqlfluff.utils.functional.segment_predicates as sp import sqlfluff.utils.functional.templated_file_slice_predicates as tsp from sqlfluff.utils.functional.context import FunctionalContext from sqlfluff.utils.functional.segments import Segments sqlfluff-2.3.5/src/sqlfluff/utils/functional/context.py000066400000000000000000000035751451700765000233330ustar00rootroot00000000000000"""Define FunctionalContext class.""" from sqlfluff.core.rules import RuleContext from sqlfluff.utils.functional.segments import Segments class FunctionalContext: """RuleContext written in a "functional" style; simplifies writing rules.""" def __init__(self, context: RuleContext): self.context = context @property def segment(self) -> "Segments": """Returns a Segments object for context.segment.""" return Segments( self.context.segment, templated_file=self.context.templated_file ) @property def parent_stack(self) -> "Segments": # pragma: no cover """Returns a Segments object for context.parent_stack.""" return Segments( *self.context.parent_stack, templated_file=self.context.templated_file ) @property def siblings_pre(self) -> "Segments": # pragma: no cover """Returns a Segments object for context.siblings_pre.""" return Segments( *self.context.siblings_pre, templated_file=self.context.templated_file ) @property def siblings_post(self) -> "Segments": # pragma: no cover """Returns a Segments object for context.siblings_post.""" return Segments( *self.context.siblings_post, templated_file=self.context.templated_file ) @property def raw_stack(self) -> "Segments": # pragma: no cover """Returns a Segments object for context.raw_stack.""" return Segments( *self.context.raw_stack, templated_file=self.context.templated_file ) @property def raw_segments(self) -> Segments: # pragma: no cover """Returns a Segments object for all the raw segments in the file.""" file_segment = self.context.parent_stack[0] return Segments( *file_segment.get_raw_segments(), templated_file=self.context.templated_file ) sqlfluff-2.3.5/src/sqlfluff/utils/functional/raw_file_slice_predicates.py000066400000000000000000000015471451700765000270160ustar00rootroot00000000000000"""Defines commonly used raw file slice predicates for rule writers. For consistency, all the predicates in this module are implemented as functions returning functions. This avoids rule writers having to remember the distinction between normal functions and functions returning functions. This is not necessarily a complete set of predicates covering all possible requirements. Rule authors can define their own predicates as needed, either as regular functions, `lambda`, etc. """ from typing import Callable from sqlfluff.core.templaters.base import RawFileSlice def is_slice_type( *slice_types: str, ) -> Callable[[RawFileSlice], bool]: """Returns a function that determines if segment is one of the types.""" def _(raw_slice: RawFileSlice) -> bool: return any(raw_slice.slice_type == slice_type for slice_type in slice_types) return _ sqlfluff-2.3.5/src/sqlfluff/utils/functional/raw_file_slices.py000066400000000000000000000042541451700765000247740ustar00rootroot00000000000000"""Surrogate class for working with RawFileSlice collections.""" from typing import Callable, Optional from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFile class RawFileSlices(tuple): """Encapsulates a sequence of one or more RawFileSlice. The slices may or may not be contiguous in a file. Provides useful operations on a sequence of slices to simplify rule creation. """ def __new__(cls, *raw_slices, templated_file=None): """Override new operator.""" return super(RawFileSlices, cls).__new__(cls, raw_slices) def __init__(self, *_: RawFileSlice, templated_file: TemplatedFile): self.templated_file = templated_file def all(self, predicate: Optional[Callable[[RawFileSlice], bool]] = None) -> bool: """Do all the raw slices match?""" for s in self: if predicate is not None and not predicate(s): return False return True def any(self, predicate: Optional[Callable[[RawFileSlice], bool]] = None) -> bool: """Do any of the raw slices match?""" for s in self: if predicate is None or predicate(s): return True return False def select( self, select_if: Optional[Callable[[RawFileSlice], bool]] = None, loop_while: Optional[Callable[[RawFileSlice], bool]] = None, start_slice: Optional[RawFileSlice] = None, stop_slice: Optional[RawFileSlice] = None, ) -> "RawFileSlices": """Retrieve range/subset. NOTE: Iterates the slices BETWEEN start_slice and stop_slice, i.e. those slices are not included in the loop. """ start_index = self.index(start_slice) if start_slice else -1 stop_index = self.index(stop_slice) if stop_slice else len(self) buff = [] for slice_ in self[start_index + 1 : stop_index]: if loop_while is not None and not loop_while(slice_): # NOTE: This likely needs more tests. break # pragma: no cover if select_if is None or select_if(slice_): buff.append(slice_) return RawFileSlices(*buff, templated_file=self.templated_file) sqlfluff-2.3.5/src/sqlfluff/utils/functional/segment_predicates.py000066400000000000000000000132331451700765000255040ustar00rootroot00000000000000"""Defines commonly used segment predicates for rule writers. For consistency, all the predicates in this module are implemented as functions returning functions. This avoids rule writers having to remember the distinction between normal functions and functions returning functions. This is not necessarily a complete set of predicates covering all possible requirements. Rule authors can define their own predicates as needed, either as regular functions, `lambda`, etc. """ from typing import Callable, Optional from sqlfluff.core.parser import BaseSegment from sqlfluff.core.templaters.base import TemplatedFile from sqlfluff.utils.functional.raw_file_slices import RawFileSlices from sqlfluff.utils.functional.templated_file_slices import TemplatedFileSlices def raw_is(*raws: str) -> Callable[[BaseSegment], bool]: # pragma: no cover """Returns a function that determines if segment matches one of the raw inputs.""" def _(segment: BaseSegment) -> bool: return segment.raw in raws return _ def raw_upper_is(*raws: str) -> Callable[[BaseSegment], bool]: """Returns a function that determines if segment matches one of the raw inputs.""" def _(segment: BaseSegment) -> bool: return segment.raw_upper in raws return _ def is_type(*seg_type: str) -> Callable[[BaseSegment], bool]: """Returns a function that determines if segment is one of the types.""" def _(segment: BaseSegment) -> bool: return segment.is_type(*seg_type) return _ def is_keyword(*keyword_name: str) -> Callable[[BaseSegment], bool]: """Returns a function that determines if it's a matching keyword.""" return and_( is_type("keyword"), raw_upper_is(*[raw.upper() for raw in keyword_name]) ) def is_code() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is code.""" def _(segment: BaseSegment) -> bool: return segment.is_code return _ def is_comment() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is comment.""" def _(segment: BaseSegment) -> bool: return segment.is_comment return _ def is_meta() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is meta.""" def _(segment: BaseSegment) -> bool: return segment.is_meta return _ def is_raw() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is raw.""" def _(segment: BaseSegment) -> bool: return segment.is_raw() return _ def is_whitespace() -> Callable[[BaseSegment], bool]: """Returns a function that checks if segment is whitespace.""" def _(segment: BaseSegment) -> bool: return segment.is_whitespace return _ def is_templated() -> Callable[[BaseSegment], bool]: # pragma: no cover """Returns a function that checks if segment is templated.""" def _(segment: BaseSegment) -> bool: return segment.is_templated return _ def get_type() -> Callable[[BaseSegment], str]: """Returns a function that gets segment type.""" def _(segment: BaseSegment) -> str: return segment.get_type() return _ def and_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]: """Returns a function that computes the functions and-ed together.""" def _(segment: BaseSegment) -> bool: return all(function(segment) for function in functions) return _ def or_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]: """Returns a function that computes the functions or-ed together.""" def _(segment: BaseSegment) -> bool: return any(function(segment) for function in functions) return _ def not_(fn: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]: """Returns a function that computes: not fn().""" def _(segment: BaseSegment) -> bool: return not fn(segment) return _ def raw_slices( segment: BaseSegment, templated_file: Optional[TemplatedFile], ) -> RawFileSlices: # pragma: no cover """Returns raw slices for a segment.""" if not templated_file: raise ValueError( 'raw_slices: "templated_file" parameter is required.' ) # pragma: no cover if not segment.pos_marker: raise ValueError( 'raw_slices: "segment" parameter must have pos_marker set.' ) # pragma: no cover return RawFileSlices( *templated_file.raw_slices_spanning_source_slice( segment.pos_marker.source_slice ), templated_file=templated_file ) def templated_slices( segment: BaseSegment, templated_file: Optional[TemplatedFile], ) -> TemplatedFileSlices: """Returns raw slices for a segment.""" if not templated_file: raise ValueError( 'templated_slices: "templated_file" parameter is required.' ) # pragma: no cover if not segment.pos_marker: raise ValueError( 'templated_slices: "segment" parameter must have pos_marker set.' ) # pragma: no cover # :TRICKY: We don't use _find_slice_indices_of_templated_pos() here because # it treats TemplatedFileSlice.templated_slice.stop as inclusive, not # exclusive. Other parts of SQLFluff rely on this behaviour, but we don't # want it. It's easy enough to do this ourselves. start = segment.pos_marker.templated_slice.start stop = segment.pos_marker.templated_slice.stop templated_slices = [ slice_ for slice_ in templated_file.sliced_file if (stop > slice_.templated_slice.start and start < slice_.templated_slice.stop) ] return TemplatedFileSlices(*templated_slices, templated_file=templated_file) sqlfluff-2.3.5/src/sqlfluff/utils/functional/segments.py000066400000000000000000000201351451700765000234630ustar00rootroot00000000000000"""Surrogate class for working with Segment collections.""" from typing import ( Any, Callable, Iterable, Iterator, List, Optional, Union, overload, ) from typing_extensions import SupportsIndex # NOTE: Required for py37 from sqlfluff.core.parser import BaseSegment from sqlfluff.core.templaters.base import TemplatedFile from sqlfluff.utils.functional.raw_file_slices import RawFileSlices PredicateType = Callable[[BaseSegment], bool] class Segments(tuple): """Encapsulates a sequence of one or more BaseSegments. The segments may or may not be contiguous in a parse tree. Provides useful operations on a sequence of segments to simplify rule creation. """ def __new__(cls, *segments, templated_file=None): """Override new operator.""" return super(Segments, cls).__new__(cls, segments) def __init__( self, *_: BaseSegment, templated_file: Optional[TemplatedFile] = None ) -> None: self.templated_file = templated_file def __add__(self, segments_) -> "Segments": return Segments( *tuple(self).__add__(tuple(segments_)), templated_file=self.templated_file ) def __radd__(self, segments_) -> "Segments": return Segments( *tuple(segments_).__add__(tuple(self)), templated_file=self.templated_file ) def find(self, segment: Optional[BaseSegment]) -> int: """Returns index if found, -1 if not found.""" try: return self.index(segment) except ValueError: return -1 def all(self, predicate: Optional[PredicateType] = None) -> bool: """Do all the segments match?""" for s in self: if predicate is not None and not predicate(s): return False return True def any(self, predicate: Optional[PredicateType] = None) -> bool: """Do any of the segments match?""" for s in self: if predicate is None or predicate(s): return True return False def reversed(self) -> "Segments": # pragma: no cover """Return the same segments in reverse order.""" return Segments(*reversed(self), templated_file=self.templated_file) @property def raw_slices(self) -> RawFileSlices: """Raw slices of the segments, sorted in source file order.""" if not self.templated_file: raise ValueError( 'Segments.raw_slices: "templated_file" property is required.' ) raw_slices = set() for s in self: if s.pos_marker is None: raise ValueError( "Segments include a positionless segment" ) # pragma: no cover source_slice = s.pos_marker.source_slice raw_slices.update( self.templated_file.raw_slices_spanning_source_slice(source_slice) ) return RawFileSlices( *sorted(raw_slices, key=lambda slice_: slice_.source_idx), templated_file=self.templated_file ) # TODO:This method isn't used as at 2022-08-10. Consider removing in future. @property def raw_segments(self) -> "Segments": # pragma: no cover """Get raw segments underlying the segments.""" raw_segments_list = [] for s in self: raw_segments_list.extend(s.raw_segments) return Segments(*raw_segments_list, templated_file=self.templated_file) def recursive_crawl_all(self) -> "Segments": # pragma: no cover """Recursively crawl all descendant segments.""" segments: List[BaseSegment] = [] for s in self: for i in s.recursive_crawl_all(): segments.append(i) return Segments(*segments, templated_file=self.templated_file) def recursive_crawl(self, *seg_type: str, recurse_into: bool = True) -> "Segments": """Recursively crawl for segments of a given type.""" segments: List[BaseSegment] = [] for s in self: for i in s.recursive_crawl(*seg_type, recurse_into=recurse_into): segments.append(i) return Segments(*segments, templated_file=self.templated_file) def children( self, predicate: Optional[PredicateType] = None, ) -> "Segments": """Returns an object with children of the segments in this object.""" child_segments: List[BaseSegment] = [] for s in self: for child in s.segments: if predicate is None or predicate(child): child_segments.append(child) return Segments(*child_segments, templated_file=self.templated_file) def first( self, predicate: Optional[PredicateType] = None, ) -> "Segments": """Returns the first segment (if any) that satisfies the predicates.""" for s in self: if predicate is None or predicate(s): return Segments(s, templated_file=self.templated_file) # If no segment satisfies "predicates", return empty Segments. return Segments(templated_file=self.templated_file) def last( self, predicate: Optional[PredicateType] = None, ) -> "Segments": """Returns the last segment (if any) that satisfies the predicates.""" for s in reversed(self): if predicate is None or predicate(s): return Segments(s, templated_file=self.templated_file) # If no segment satisfies "predicates", return empty Segments. return Segments(templated_file=self.templated_file) def __iter__(self) -> Iterator[BaseSegment]: # pragma: no cover # Typing understand we are looping BaseSegment return super().__iter__() @overload def __getitem__(self, item: SupportsIndex) -> BaseSegment: """Individual "getting" returns a single segment. NOTE: Using `SupportsIndex` rather than `int` is to ensure type compatibility with the parent `tuple` implementation. """ @overload def __getitem__(self, item: slice) -> "Segments": """Getting a slice returns another `Segments` object.""" def __getitem__( self, item: Union[SupportsIndex, slice] ) -> Union[BaseSegment, "Segments"]: result = super().__getitem__(item) if isinstance(result, tuple): return Segments(*result, templated_file=self.templated_file) else: return result def get(self, index: int = 0, *, default: Any = None) -> Optional[BaseSegment]: """Return specified item. Returns default if index out of range.""" try: return self[index] except IndexError: return default def apply(self, fn: Callable[[BaseSegment], Any]) -> List[Any]: """Apply function to every item.""" return [fn(s) for s in self] def select( self, select_if: Optional[PredicateType] = None, loop_while: Optional[PredicateType] = None, start_seg: Optional[BaseSegment] = None, stop_seg: Optional[BaseSegment] = None, ) -> "Segments": """Retrieve range/subset. NOTE: Iterates the segments BETWEEN start_seg and stop_seg, i.e. those segments are not included in the loop. """ start_index = self.index(start_seg) if start_seg else -1 stop_index = self.index(stop_seg) if stop_seg else len(self) buff = [] for seg in self[start_index + 1 : stop_index]: if loop_while is not None and not loop_while(seg): break if select_if is None or select_if(seg): buff.append(seg) return Segments(*buff, templated_file=self.templated_file) def iterate_segments( self, predicate: Optional[PredicateType] = None, ) -> Iterable["Segments"]: """Loop over each element as a fresh Segments.""" # Looping over Segments returns BaseEls # which is sometime what we want and sometimes not for base_el in self: if predicate and not predicate(base_el): # pragma: no cover continue yield Segments(base_el, templated_file=self.templated_file) sqlfluff-2.3.5/src/sqlfluff/utils/functional/templated_file_slice_predicates.py000066400000000000000000000015741451700765000302040ustar00rootroot00000000000000"""Defines commonly used templated file slice predicates for rule writers. For consistency, all the predicates in this module are implemented as functions returning functions. This avoids rule writers having to remember the distinction between normal functions and functions returning functions. This is not necessarily a complete set of predicates covering all possible requirements. Rule authors can define their own predicates as needed, either as regular functions, `lambda`, etc. """ from typing import Callable from sqlfluff.core.templaters.base import TemplatedFileSlice def is_slice_type( *slice_types: str, ) -> Callable[[TemplatedFileSlice], bool]: """Returns a function that determines if segment is one the types.""" def _(raw_slice: TemplatedFileSlice) -> bool: return any(raw_slice.slice_type == slice_type for slice_type in slice_types) return _ sqlfluff-2.3.5/src/sqlfluff/utils/functional/templated_file_slices.py000066400000000000000000000044321451700765000261600ustar00rootroot00000000000000"""Surrogate class for working with TemplatedFileSlice collections.""" from typing import Callable, Optional from sqlfluff.core.templaters.base import TemplatedFile, TemplatedFileSlice class TemplatedFileSlices(tuple): """Encapsulates a sequence of one or more TemplatedFileSlice. The slices may or may not be contiguous in a file. Provides useful operations on a sequence of slices to simplify rule creation. """ def __new__(cls, *templated_slices, templated_file=None): """Override new operator.""" return super(TemplatedFileSlices, cls).__new__(cls, templated_slices) def __init__(self, *_: TemplatedFileSlice, templated_file: TemplatedFile) -> None: self.templated_file = templated_file def all( self, predicate: Optional[Callable[[TemplatedFileSlice], bool]] = None ) -> bool: """Do all the templated slices match?""" for s in self: if predicate is not None and not predicate(s): return False return True def any( self, predicate: Optional[Callable[[TemplatedFileSlice], bool]] = None ) -> bool: # pragma: no cover """Do any of the templated slices match?""" for s in self: if predicate is None or predicate(s): return True return False def select( self, select_if: Optional[Callable[[TemplatedFileSlice], bool]] = None, loop_while: Optional[Callable[[TemplatedFileSlice], bool]] = None, start_slice: Optional[TemplatedFileSlice] = None, stop_slice: Optional[TemplatedFileSlice] = None, ) -> "TemplatedFileSlices": # pragma: no cover """Retrieve range/subset. NOTE: Iterates the slices BETWEEN start_slice and stop_slice, i.e. those slices are not included in the loop. """ start_index = self.index(start_slice) if start_slice else -1 stop_index = self.index(stop_slice) if stop_slice else len(self) buff = [] for slice_ in self[start_index + 1 : stop_index]: if loop_while is not None and not loop_while(slice_): break if select_if is None or select_if(slice_): buff.append(slice_) return TemplatedFileSlices(*buff, templated_file=self.templated_file) sqlfluff-2.3.5/src/sqlfluff/utils/identifers.py000066400000000000000000000016641451700765000216360ustar00rootroot00000000000000"""Helper utilities for identifiers. These are primarily common functions used by multiple rule bundles. Defined here to avoid duplication, but also avoid circular imports. """ from typing import Tuple from sqlfluff.core.parser import BaseSegment def identifiers_policy_applicable( policy: str, parent_stack: Tuple[BaseSegment, ...] ) -> bool: """Does `(un)quoted_identifiers_policy` apply to this segment? This method is used in CP02, RF04 and RF05. """ if policy == "all": return True if policy == "none": return False is_alias = parent_stack and parent_stack[-1].is_type( "alias_expression", "column_definition", "with_compound_statement" ) if policy == "aliases" and is_alias: return True is_inside_from = any(p.is_type("from_clause") for p in parent_stack) if policy == "column_aliases" and is_alias and not is_inside_from: return True return False sqlfluff-2.3.5/src/sqlfluff/utils/reflow/000077500000000000000000000000001451700765000204175ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/utils/reflow/__init__.py000066400000000000000000000002051451700765000225250ustar00rootroot00000000000000"""Reflow utilities for sqlfluff rules.""" from sqlfluff.utils.reflow.sequence import ReflowSequence __all__ = ("ReflowSequence",) sqlfluff-2.3.5/src/sqlfluff/utils/reflow/config.py000066400000000000000000000155601451700765000222450ustar00rootroot00000000000000"""Methods to set up appropriate reflow config from file.""" # Until we have a proper structure this will work. # TODO: Migrate this to the config file. from dataclasses import dataclass from typing import AbstractSet, Dict, FrozenSet, Optional, Set from sqlfluff.core.config import FluffConfig from sqlfluff.utils.reflow.depthmap import DepthInfo ConfigElementType = Dict[str, str] ConfigDictType = Dict[str, ConfigElementType] @dataclass() class BlockConfig: """Holds spacing config for a block and allows easy manipulation.""" spacing_before: str = "single" spacing_after: str = "single" spacing_within: Optional[str] = None line_position: Optional[str] = None def incorporate( self, before: Optional[str] = None, after: Optional[str] = None, within: Optional[str] = None, line_position: Optional[str] = None, config: Optional[ConfigElementType] = None, ) -> None: """Mutate the config based on additional information.""" config = config or {} self.spacing_before = ( before or config.get("spacing_before", None) or self.spacing_before ) self.spacing_after = ( after or config.get("spacing_after", None) or self.spacing_after ) self.spacing_within = ( within or config.get("spacing_within", None) or self.spacing_within ) self.line_position = ( line_position or config.get("line_position", None) or self.line_position ) @dataclass(frozen=True) class ReflowConfig: """An interface onto the configuration of how segments should reflow. This acts as the primary translation engine between configuration held either in dicts for testing, or in the FluffConfig in live usage, and the configuration used during reflow operations. """ _config_dict: ConfigDictType config_types: Set[str] # In production, these values are almost _always_ set because we # use `.from_fluff_config`, but the defaults are here to aid in # testing. tab_space_size: int = 4 indent_unit: str = " " max_line_length: int = 80 hanging_indents: bool = False skip_indentation_in: FrozenSet[str] = frozenset() allow_implicit_indents: bool = False trailing_comments: str = "before" @classmethod def from_dict(cls, config_dict: ConfigDictType, **kwargs): """Construct a ReflowConfig from a dict.""" config_types = set(config_dict.keys()) # Enrich any of the "align" keys with what they're aligning with. for seg_type in config_dict: for key in ("spacing_before", "spacing_after"): if config_dict[seg_type].get(key, None) == "align": new_key = "align:" + seg_type # Is there a limiter or boundary? # NOTE: A `boundary` is only applicable if `within` is present. if config_dict[seg_type].get("align_within", None): new_key += ":" + config_dict[seg_type]["align_within"] if config_dict[seg_type].get("align_scope", None): new_key += ":" + config_dict[seg_type]["align_scope"] config_dict[seg_type][key] = new_key return cls(_config_dict=config_dict, config_types=config_types, **kwargs) @classmethod def from_fluff_config(cls, config: FluffConfig): """Constructs a ReflowConfig from a FluffConfig.""" return cls.from_dict( config.get_section(["layout", "type"]), indent_unit=config.get("indent_unit", ["indentation"]), tab_space_size=config.get("tab_space_size", ["indentation"]), hanging_indents=config.get("hanging_indents", ["indentation"]), max_line_length=config.get("max_line_length"), skip_indentation_in=frozenset( config.get("skip_indentation_in", ["indentation"]).split(",") ), allow_implicit_indents=config.get( "allow_implicit_indents", ["indentation"] ), trailing_comments=config.get("trailing_comments", ["indentation"]), ) def get_block_config( self, block_class_types: AbstractSet[str], depth_info: Optional[DepthInfo] = None, ) -> BlockConfig: """Given the class types of a ReflowBlock return spacing config. When fetching the config for a single class type for a simple block we should just get an appropriate simple config back. >>> cfg = ReflowConfig.from_dict({"comma": {"spacing_before": "touch"}}) >>> cfg.get_block_config({"comma"}) # doctest: +ELLIPSIS BlockConfig(spacing_before='touch', spacing_after='single', ...) """ # set intersection to get the class types which matter configured_types = self.config_types.intersection(block_class_types) # Start with a default config. block_config = BlockConfig() # Update with the config from any specific classes. # First: With the types of any parent segments where # we're at one end (if depth info provided). if depth_info: parent_start, parent_end = True, True for idx, key in enumerate(depth_info.stack_hashes[::-1]): # Work out if we're allowed to claim the parent. if depth_info.stack_positions[key].type not in ("solo", "start"): parent_start = False if depth_info.stack_positions[key].type not in ("solo", "end"): parent_end = False if not (parent_start or parent_end): break # Get corresponding classes. parent_classes = depth_info.stack_class_types[-1 - idx] configured_parent_types = self.config_types.intersection(parent_classes) # Claim the _before_ config if at the start. if parent_start: for seg_type in configured_parent_types: block_config.incorporate( before=self._config_dict[seg_type].get("spacing_before") ) # Claim the _after_ config if at the end. if parent_end: for seg_type in configured_parent_types: block_config.incorporate( after=self._config_dict[seg_type].get("spacing_after") ) # Second: With the types of the raw segment itself. # Unless someone is doing something complicated with their configuration # there should only be one. # TODO: Extend (or at least harden) this code to handle multiple # configured (and matched) types much better. for seg_type in configured_types: block_config.incorporate(config=self._config_dict[seg_type]) return block_config sqlfluff-2.3.5/src/sqlfluff/utils/reflow/depthmap.py000066400000000000000000000156131451700765000226010ustar00rootroot00000000000000"""The DepthMap class is an enriched sequence of raw segments.""" import logging from dataclasses import dataclass from typing import Dict, FrozenSet, List, Sequence, Tuple, Type from sqlfluff.core.parser import BaseSegment from sqlfluff.core.parser.segments.base import PathStep from sqlfluff.core.parser.segments.raw import RawSegment reflow_logger = logging.getLogger("sqlfluff.rules.reflow") @dataclass(frozen=True) class StackPosition: """An element of the stack_positions property of DepthInfo.""" idx: int len: int type: str @staticmethod def _stack_pos_interpreter(path_step: PathStep) -> str: """Interpret a path step for stack_positions.""" # If no code, then no. if not path_step.code_idxs: return "" # If there's only one code element, this must be it. elif len(path_step.code_idxs) == 1: return "solo" # Check for whether first or last code element. # NOTE: code_idxs is always sorted because of how it's constructed. # That means the lowest is always as the start and the highest at the end. elif path_step.idx == path_step.code_idxs[0]: return "start" elif path_step.idx == path_step.code_idxs[-1]: return "end" else: return "" # NOTE: Empty string evaluates as falsy. @classmethod def from_path_step( cls: Type["StackPosition"], path_step: PathStep ) -> "StackPosition": """Interpret a PathStep to construct a StackPosition. The reason we don't just use the same object is partly to interpret it a little more, but also to drop the reference to a specific segment which could induce bugs at a later stage if used. """ return cls(path_step.idx, path_step.len, cls._stack_pos_interpreter(path_step)) @dataclass(frozen=True) class DepthInfo: """An object to hold the depth information for a specific raw segment.""" stack_depth: int stack_hashes: Tuple[int, ...] # This is a convenience cache to speed up operations. stack_hash_set: FrozenSet[int] stack_class_types: Tuple[FrozenSet[str], ...] stack_positions: Dict[int, StackPosition] @classmethod def from_raw_and_stack( cls, raw: RawSegment, stack: Sequence[PathStep] ) -> "DepthInfo": """Construct from a raw and its stack.""" stack_hashes = tuple(hash(ps.segment) for ps in stack) return cls( stack_depth=len(stack), stack_hashes=stack_hashes, stack_hash_set=frozenset(stack_hashes), stack_class_types=tuple(ps.segment.class_types for ps in stack), stack_positions={ # Reuse the hash first calculated above. stack_hashes[idx]: StackPosition.from_path_step(ps) for idx, ps in enumerate(stack) }, ) def common_with(self, other: "DepthInfo") -> Tuple[int, ...]: """Get the common depth and hashes with the other.""" # We use set intersection because it's faster and hashes should be unique. common_hashes = self.stack_hash_set.intersection(other.stack_hashes) # We should expect there to be _at least_ one common ancestor, because # they should share the same file segment. If that's not the case we # we should error because it's likely a bug or programming error. assert common_hashes, "DepthInfo comparison shares no common ancestor!" common_depth = len(common_hashes) return self.stack_hashes[:common_depth] def trim(self, amount: int) -> "DepthInfo": """Return a DepthInfo object with some amount trimmed.""" if amount == 0: # The trivial case. return self new_hash_set = self.stack_hash_set.difference(self.stack_hashes[-amount:]) return self.__class__( stack_depth=self.stack_depth - amount, stack_hashes=self.stack_hashes[:-amount], stack_hash_set=new_hash_set, stack_class_types=self.stack_class_types[:-amount], stack_positions={ k: v for k, v in self.stack_positions.items() if k in new_hash_set }, ) class DepthMap: """A mapping of raw segments to depth and parent information. This class addresses two needs: - To understand configuration of segments with no whitespace within them - so the config is related to the parent and not the segment) - To map the depth of an indent points to apply some precedence for where to insert line breaks. The internals are structured around a list to do lookups and a dict (keyed with the raw segment UUID) to hold the rest. """ def __init__(self, raws_with_stack: Sequence[Tuple[RawSegment, List[PathStep]]]): self.depth_info = {} for raw, stack in raws_with_stack: self.depth_info[raw.uuid] = DepthInfo.from_raw_and_stack(raw, stack) @classmethod def from_parent(cls: Type["DepthMap"], parent: BaseSegment) -> "DepthMap": """Generate a DepthMap from all the children of a segment. NOTE: This is the most efficient way to construct a DepthMap due to caching in the BaseSegment. """ return cls(raws_with_stack=parent.raw_segments_with_ancestors) @classmethod def from_raws_and_root( cls: Type["DepthMap"], raw_segments: Sequence[RawSegment], root_segment: BaseSegment, ) -> "DepthMap": """Generate a DepthMap a sequence of raws and a root. NOTE: This is the less efficient way to construct a DepthMap as it doesn't take advantage of caching in the same way as `from_parent`. """ buff = [] for raw in raw_segments: stack = root_segment.path_to(raw) buff.append((raw, stack)) return cls(raws_with_stack=buff) def get_depth_info(self, raw: RawSegment) -> DepthInfo: """Get the depth info for a given segment.""" try: return self.depth_info[raw.uuid] except KeyError as err: # pragma: no cover reflow_logger.exception("Available UUIDS: %s", self.depth_info.keys()) raise KeyError( "Tried to get depth info for unknown " f"segment {raw} with UUID {raw.uuid}" ) from err def copy_depth_info( self, anchor: RawSegment, new_segment: RawSegment, trim: int = 0 ) -> None: """Copy the depth info for one segment and apply to another. This mutates the existing depth map. That's ok because it's an idempotent operation and uuids should be unique. This is used in edits to a reflow sequence when new segments are inserted and can't infer their own depth info. NOTE: we don't remove the old one because it causes no harm. """ self.depth_info[new_segment.uuid] = self.get_depth_info(anchor).trim(trim) sqlfluff-2.3.5/src/sqlfluff/utils/reflow/elements.py000066400000000000000000000774461451700765000226270ustar00rootroot00000000000000"""Dataclasses for reflow work.""" import logging from dataclasses import dataclass, field from itertools import chain from typing import Dict, List, Optional, Sequence, Set, Tuple, Type, Union, cast from sqlfluff.core.helpers.slice import slice_overlaps from sqlfluff.core.parser import PositionMarker from sqlfluff.core.parser.segments import ( BaseSegment, Indent, NewlineSegment, RawSegment, SourceFix, TemplateSegment, WhitespaceSegment, ) from sqlfluff.core.rules.base import LintFix, LintResult from sqlfluff.utils.reflow.config import ReflowConfig from sqlfluff.utils.reflow.depthmap import DepthInfo # Respace Algorithms from sqlfluff.utils.reflow.respace import ( determine_constraints, handle_respace__inline_with_space, handle_respace__inline_without_space, process_spacing, ) # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") def get_consumed_whitespace(segment: Optional[RawSegment]) -> Optional[str]: """A helper function to extract possible consumed whitespace. Args: segment (:obj:`RawSegment`, optional): A segment to test for suitability and extract the source representation of if appropriate. If passed None, then returns None. Returns: Returns the :code:`source_str` if the segment is of type :code:`placeholder` and has a :code:`block_type` of :code:`literal`. Otherwise None. """ if not segment or not segment.is_type("placeholder"): return None placeholder = cast(TemplateSegment, segment) if placeholder.block_type != "literal": return None return placeholder.source_str @dataclass(frozen=True) class ReflowElement: """Base reflow element class.""" segments: Tuple[RawSegment, ...] @staticmethod def _class_types(segments: Sequence[RawSegment]) -> Set[str]: return set(chain.from_iterable(seg.class_types for seg in segments)) @property def class_types(self) -> Set[str]: """Get the set of contained class types. Parallel to `BaseSegment.class_types` """ return self._class_types(self.segments) @property def raw(self) -> str: """Get the current raw representation.""" return "".join(seg.raw for seg in self.segments) @property def pos_marker(self) -> Optional[PositionMarker]: """Get the first position marker of the element.""" for seg in self.segments: if seg.pos_marker: return seg.pos_marker return None def num_newlines(self) -> int: """Return the number of newlines in this element. These newlines are either newline segments or contained within consumed sections of whitespace. This counts both. """ return sum( bool("newline" in seg.class_types) + (get_consumed_whitespace(seg) or "").count("\n") for seg in self.segments ) @dataclass(frozen=True) class ReflowBlock(ReflowElement): """Class for keeping track of elements to reflow. This class, and its sibling :obj:`ReflowPoint`, should not normally be manipulated directly by rules, but instead should be manipulated using :obj:`ReflowSequence`. It holds segments to reflow and also exposes configuration regarding how they are expected to reflow around others. Typically it holds only a single element, which is usually code or a templated element. Because reflow operations control spacing, it would be very unusual for this object to be modified; as such it exposes relatively few methods. The attributes exposed are designed to be "post configuration" i.e. they should reflect configuration appropriately. """ #: Desired spacing before this block. #: See :ref:`layoutspacingconfig` spacing_before: str #: Desired spacing after this block. #: See :ref:`layoutspacingconfig` spacing_after: str #: Desired line position for this block. #: See :ref:`layoutspacingconfig` line_position: Optional[str] #: Metadata on the depth of this segment within the parse tree #: which is used in inferring how and where line breaks should #: exist. depth_info: DepthInfo #: Desired spacing configurations for parent segments #: of the segment in this block. #: See :ref:`layoutspacingconfig` stack_spacing_configs: Dict[int, str] #: Desired line position configurations for parent segments #: of the segment in this block. #: See :ref:`layoutspacingconfig` line_position_configs: Dict[int, str] @classmethod def from_config( cls: Type["ReflowBlock"], segments, config: ReflowConfig, depth_info: DepthInfo ) -> "ReflowBlock": """Construct a ReflowBlock while extracting relevant configuration. This is the primary route to construct a ReflowBlock, as is allows all of the inference of the spacing and position configuration from the segments it contains and the appropriate config objects. """ block_config = config.get_block_config(cls._class_types(segments), depth_info) stack_spacing_configs = {} line_position_configs = {} for hash, class_types in zip( depth_info.stack_hashes, depth_info.stack_class_types ): cfg = config.get_block_config(class_types) if cfg.spacing_within: stack_spacing_configs[hash] = cfg.spacing_within if cfg.line_position: line_position_configs[hash] = cfg.line_position return cls( segments=segments, spacing_before=block_config.spacing_before, spacing_after=block_config.spacing_after, line_position=block_config.line_position, depth_info=depth_info, stack_spacing_configs=stack_spacing_configs, line_position_configs=line_position_configs, ) def _indent_description(indent: str): """Construct a human readable description of the indent. NOTE: We operate assuming that the "correct" indent is never a mix of tabs and spaces. That means if the provided indent *does* contain both that this description is likely a case where we are matching a pre-existing indent, and can assume that the *description* of that indent is non-critical. To handle that situation gracefully we just return "Mixed Indent". See: https://github.com/sqlfluff/sqlfluff/issues/4255 """ if indent == "": return "no indent" elif " " in indent and "\t" in indent: return "mixed indent" elif indent[0] == " ": assert all(c == " " for c in indent) return f"indent of {len(indent)} spaces" elif indent[0] == "\t": # pragma: no cover assert all(c == "\t" for c in indent) return f"indent of {len(indent)} tabs" else: # pragma: no cover raise NotImplementedError(f"Invalid indent construction: {indent!r}") @dataclass(frozen=True) class IndentStats: """Dataclass to hold summary of indents in a point. Attributes: impulse (int): The net change when summing the impulses of all the consecutive indent or dedent segments in a point. trough (int): The lowest point reached when summing the impulses (in order) of all the consecutive indent or dedent segments in a point. implicit_indents (tuple of int): The indent balance corresponding to any detected (and enabled) implicit indents. This follows the usual convention that indents are identified by their "uphill" side. A positive indent is identified by the indent balance _after_ and a negative indent is identified by the indent balance _before_. """ impulse: int trough: int # Defaults to an empty tuple if unset. implicit_indents: Tuple[int, ...] = () @classmethod def from_combination( cls, first: Optional["IndentStats"], second: "IndentStats" ) -> "IndentStats": """Create IndentStats from two consecutive IndentStats. This is mostly used for combining the effects of indent and dedent tokens either side of a comment. NOTE: The *first* is considered optional, because if we're calling this function, we're assuming that there's always a second. """ # First check for the trivial case that we only have one. if not first: return second # Otherwise, combine the two into one. return cls( first.impulse + second.impulse, min(first.trough, first.impulse + second.trough), second.implicit_indents, ) @dataclass(frozen=True, init=False) class ReflowPoint(ReflowElement): """Class for keeping track of editable elements in reflow. This class, and its sibling :obj:`ReflowBlock`, should not normally be manipulated directly by rules, but instead should be manipulated using :obj:`ReflowSequence`. It holds segments which can be changed during a reflow operation such as whitespace and newlines.It may also contain :obj:`Indent` and :obj:`Dedent` elements. It holds no configuration and is influenced by the blocks on either side, so that any operations on it usually have that configuration passed in as required. """ _stats: IndentStats = field(init=False) def __init__(self, segments: Tuple[RawSegment, ...]): """Override the init method to calculate indent stats.""" object.__setattr__(self, "segments", segments) object.__setattr__(self, "_stats", self._generate_indent_stats(segments)) def _get_indent_segment(self) -> Optional[RawSegment]: """Get the current indent segment (if there). NOTE: This only returns _untemplated_ indents. If templated newline or whitespace segments are found they are skipped. """ indent = None for seg in reversed(self.segments): if seg.pos_marker and not seg.pos_marker.is_literal(): # Skip any templated elements. # NOTE: It must _have_ a position marker at this # point however to take this route. A segment # without a position marker at all, is an edit # or insertion, and so should still be considered. continue elif seg.is_type("newline"): return indent elif seg.is_type("whitespace"): indent = seg elif "\n" in (get_consumed_whitespace(seg) or ""): # Consumed whitespace case. # NOTE: In this situation, we're not looking for # separate newline and indent segments, we're # making the assumption that they'll be together # which I think is a safe one for now. return seg # i.e. if we never find a newline, it's not an indent. return None def get_indent(self) -> Optional[str]: """Get the current indent (if there).""" # If no newlines, it's not an indent. Return None. if not self.num_newlines(): return None # If there are newlines but no indent segment. Return "". seg = self._get_indent_segment() consumed_whitespace = get_consumed_whitespace(seg) if consumed_whitespace: # pragma: no cover # Return last bit after newline. # NOTE: Not tested, because usually this would happen # directly via _get_indent_segment. return consumed_whitespace.split("\n")[-1] return seg.raw if seg else "" @staticmethod def _generate_indent_stats( segments: Sequence[RawSegment], ) -> IndentStats: """Generate the change in intended indent balance. This is the main logic which powers .get_indent_impulse() """ trough = 0 running_sum = 0 implicit_indents = [] for seg in segments: if seg.is_type("indent"): indent_seg = cast(Indent, seg) running_sum += indent_seg.indent_val # Do we need to add a new implicit indent? if indent_seg.is_implicit: implicit_indents.append(running_sum) # NOTE: We don't check for removal of implicit indents # because it's unlikely that one would be opened, and then # closed within the same point. That would probably be the # sign of a bug in the dialect. if running_sum < trough: trough = running_sum return IndentStats(running_sum, trough, tuple(implicit_indents)) def get_indent_impulse(self) -> IndentStats: """Get the change in intended indent balance from this point.""" return self._stats def indent_to( self, desired_indent: str, after: Optional[BaseSegment] = None, before: Optional[BaseSegment] = None, description: Optional[str] = None, source: Optional[str] = None, ) -> Tuple[List[LintResult], "ReflowPoint"]: """Coerce a point to have a particular indent. If the point currently contains no newlines, one will be introduced and any trailing whitespace will be effectively removed. More specifically, the newline is *inserted before* the existing whitespace, with the new indent being a *replacement* for that same whitespace. For placeholder newlines or indents we generate appropriate source fixes. """ assert "\n" not in desired_indent, "Newline found in desired indent." # Get the indent (or in the case of no newline, the last whitespace) indent_seg = self._get_indent_segment() reflow_logger.debug( "Coercing indent %s to %r. (newlines: %s)", indent_seg, desired_indent, self.num_newlines(), ) if indent_seg and indent_seg.is_type("placeholder"): # Handle the placeholder case. indent_seg = cast(TemplateSegment, indent_seg) # There should always be a newline, so assert that. assert "\n" in indent_seg.source_str # We should always replace the section _containing_ the # newline, rather than just bluntly inserting. This # makes slicing later easier. current_indent = indent_seg.source_str.split("\n")[-1] source_slice = slice( indent_seg.pos_marker.source_slice.stop - len(current_indent), indent_seg.pos_marker.source_slice.stop, ) for existing_source_fix in indent_seg.source_fixes: # pragma: no cover if slice_overlaps(existing_source_fix.source_slice, source_slice): reflow_logger.warning( "Creating overlapping source fix. Results may be " "unpredictable and this might be a sign of a bug. " "Please report this along with your query.\n" f"({existing_source_fix.source_slice} overlaps " f"{source_slice})" ) new_source_fix = SourceFix( desired_indent, source_slice, # The templated slice is going to be a zero slice _anyway_. indent_seg.pos_marker.templated_slice, ) if new_source_fix in indent_seg.source_fixes: # pragma: no cover # NOTE: If we're trying to reapply the same fix, don't. # Just return an error without the fixes. This is probably # a bug if we're taking this route, but this clause will help # catch bugs faster if they occur. reflow_logger.warning( "Attempted to apply a duplicate source fix to %r. " "Returning this time without fix.", indent_seg.pos_marker.source_str(), ) fixes = [] new_segments = self.segments else: if current_indent: new_source_str = ( indent_seg.source_str[: -len(current_indent)] + desired_indent ) else: new_source_str = indent_seg.source_str + desired_indent assert "\n" in new_source_str new_placeholder = indent_seg.edit( source_fixes=[new_source_fix], source_str=new_source_str, ) fixes = [LintFix.replace(indent_seg, [new_placeholder])] new_segments = tuple( new_placeholder if seg is indent_seg else seg for seg in self.segments ) return [ LintResult( indent_seg, fixes, description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint(new_segments) elif self.num_newlines(): # There is already a newline. Is there an indent? if indent_seg: # Coerce existing indent to desired. if indent_seg.raw == desired_indent: # Trivial case. Indent already correct return [], self elif desired_indent == "": idx = self.segments.index(indent_seg) return [ LintResult( indent_seg, # Coerce to no indent. We don't want the indent. Delete it. [LintFix.delete(indent_seg)], description=description or "Line should not be indented.", source=source, ) ], ReflowPoint(self.segments[:idx] + self.segments[idx + 1 :]) # Standard case of an indent change. new_indent = indent_seg.edit(desired_indent) idx = self.segments.index(indent_seg) return [ LintResult( indent_seg, [LintFix.replace(indent_seg, [new_indent])], description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint( self.segments[:idx] + (new_indent,) + self.segments[idx + 1 :] ) else: # There is a newline, but no indent. Make one after the newline # Find the index of the last newline (there _will_ be one because # we checked self.num_newlines() above). # Before going further, check we have a non-zero indent. if not desired_indent: # We're trying to coerce a non-existent indent to zero. This # means we're already ok. return [], self for idx in range(len(self.segments) - 1, -1, -1): # NOTE: Must be a _literal_ newline, not a templated one. # https://github.com/sqlfluff/sqlfluff/issues/4367 if self.segments[idx].is_type("newline"): if self.segments[idx].pos_marker.is_literal(): break new_indent = WhitespaceSegment(desired_indent) return [ LintResult( # The anchor for the *result* should be the segment # *after* the newline, otherwise the location of the fix # is confusing. # For this method, `before` is optional, but normally # passed. If it is there, use that as the anchor # instead. We fall back to the last newline if not. before if before else self.segments[idx], # Rather than doing a `create_after` here, we're # going to do a replace. This is effectively to give a hint # to the linter that this is safe to do before a templated # placeholder. This solves some potential bugs - although # it feels a bit like a workaround. [ LintFix.replace( self.segments[idx], [self.segments[idx], new_indent] ) ], description=description or f"Expected {_indent_description(desired_indent)}.", source=source, ) ], ReflowPoint( self.segments[: idx + 1] + (new_indent,) + self.segments[idx + 1 :] ) else: # There isn't currently a newline. new_newline = NewlineSegment() new_segs: List[RawSegment] # Check for whitespace ws_seg = None for seg in self.segments[::-1]: if seg.is_type("whitespace"): ws_seg = seg if not ws_seg: # Work out the new segments. Always a newline, only whitespace if # there's a non zero indent. new_segs = [new_newline] + ( [WhitespaceSegment(desired_indent)] if desired_indent else [] ) # There isn't a whitespace segment either. We need to insert one. # Do we have an anchor? if not before and not after: # pragma: no cover raise NotImplementedError( "Not set up to handle empty points in this " "scenario without provided before/after " f"anchor: {self.segments}" ) # Otherwise make a new indent, attached to the relevant anchor. # Prefer anchoring before because it makes the labelling better. elif before: before_raw = ( cast(TemplateSegment, before).source_str if before.is_type("placeholder") else before.raw ) fix = LintFix.create_before(before, new_segs) description = description or ( "Expected line break and " f"{_indent_description(desired_indent)} " f"before {before_raw!r}." ) else: assert after # mypy hint after_raw = ( cast(TemplateSegment, after).source_str if after.is_type("placeholder") else after.raw ) fix = LintFix.create_after(after, new_segs) description = description or ( "Expected line break and " f"{_indent_description(desired_indent)} " f"after {after_raw!r}." ) new_point = ReflowPoint(tuple(new_segs)) anchor = before else: # There is whitespace. Coerce it to the right indent and add # a newline _before_. In the edge case that we're coercing to # _no indent_, edit existing indent to be the newline and leave # it there. if desired_indent == "": new_segs = [new_newline] else: new_segs = [new_newline, ws_seg.edit(desired_indent)] idx = self.segments.index(ws_seg) if not description: # Prefer before, because it makes the anchoring better. if before: description = ( "Expected line break and " f"{_indent_description(desired_indent)} " f"before {before.raw!r}." ) elif after: description = ( "Expected line break and " f"{_indent_description(desired_indent)} " f"after {after.raw!r}." ) else: # pragma: no cover # NOTE: Doesn't have test coverage because there's # normally an `after` or `before` value, so this # clause is unused. description = ( "Expected line break and " f"{_indent_description(desired_indent)}." ) fix = LintFix.replace(ws_seg, new_segs) new_point = ReflowPoint( self.segments[:idx] + tuple(new_segs) + self.segments[idx + 1 :] ) anchor = ws_seg return [ LintResult(anchor, fixes=[fix], description=description, source=source) ], new_point def respace_point( self, prev_block: Optional[ReflowBlock], next_block: Optional[ReflowBlock], root_segment: BaseSegment, lint_results: List[LintResult], strip_newlines: bool = False, anchor_on: str = "before", ) -> Tuple[List[LintResult], "ReflowPoint"]: """Respace a point based on given constraints. NB: This effectively includes trailing whitespace fixes. Deletion and edit fixes are generated immediately, but creations are paused to the end and done in bulk so as not to generate conflicts. Note that the `strip_newlines` functionality exists here as a slight exception to pure respacing, but as a very simple case of positioning line breaks. The default operation of `respace` does not enable it, however it exists as a convenience for rules which wish to use it. """ existing_results = lint_results[:] pre_constraint, post_constraint, strip_newlines = determine_constraints( prev_block, next_block, strip_newlines ) reflow_logger.debug("* Respacing: %r @ %s", self.raw, self.pos_marker) # The buffer is used to create the new reflow point to return segment_buffer, last_whitespace, new_results = process_spacing( list(self.segments), strip_newlines ) # Check for final trailing whitespace (which otherwise looks like an indent). if next_block and "end_of_file" in next_block.class_types and last_whitespace: new_results.append( LintResult( last_whitespace, [LintFix.delete(last_whitespace)], description="Unnecessary trailing whitespace at end of file.", ) ) segment_buffer.remove(last_whitespace) last_whitespace = None # Is there a newline? # NOTE: We do this based on the segment buffer rather than self.class_types # because we may have just removed any present newlines in the buffer. if ( any(seg.is_type("newline") for seg in segment_buffer) and not strip_newlines ) or (next_block and "end_of_file" in next_block.class_types): # Most of this section should be handled as _Indentation_. # BUT: There is one case we should handle here. # If we find that the last whitespace has a newline # before it, and the position markers imply there was # a removal between them, then remove the whitespace. # This ensures a consistent indent. if last_whitespace: ws_idx = self.segments.index(last_whitespace) if ws_idx > 0: # NOTE: Iterate by index so that we don't slice the full range. for prev_seg_idx in range(ws_idx - 1, -1, -1): prev_seg = self.segments[prev_seg_idx] # Skip past any indents if not prev_seg.is_type("indent"): break if ( prev_seg.is_type("newline") # Not just unequal. Must be actively _before_. # NOTE: Based on working locations and prev_seg.get_end_loc() < last_whitespace.get_start_loc() ): reflow_logger.debug( " Removing non-contiguous whitespace post removal." ) segment_buffer.remove(last_whitespace) # Ideally we should attach to an existing result. # To do that effectively, we should look for the removed # segment in the existing results. temp_idx = last_whitespace.pos_marker.templated_slice.start for res in existing_results: if ( res.anchor and res.anchor.pos_marker and res.anchor.pos_marker.templated_slice.stop == temp_idx ): break else: # pragma: no cover raise NotImplementedError("Could not find removal result.") existing_results.remove(res) new_results.append( LintResult( res.anchor, fixes=res.fixes + [LintFix("delete", last_whitespace)], description=res.description, ) ) # Return the results. return existing_results + new_results, ReflowPoint(tuple(segment_buffer)) # Otherwise is this an inline case? (i.e. no newline) reflow_logger.debug( " Inline case. Constraints: %s <-> %s.", pre_constraint, post_constraint, ) # Do we at least have _some_ whitespace? if last_whitespace: # We do - is it the right size? segment_buffer, results = handle_respace__inline_with_space( pre_constraint, post_constraint, prev_block, next_block, root_segment, segment_buffer, last_whitespace, ) new_results.extend(results) else: # No. Should we insert some? # NOTE: This method operates on the existing fix buffer. segment_buffer, new_results, edited = handle_respace__inline_without_space( pre_constraint, post_constraint, prev_block, next_block, segment_buffer, existing_results + new_results, anchor_on=anchor_on, ) existing_results = [] if edited: reflow_logger.debug(" Modified result buffer: %s", new_results) # Only log if we actually made a change. if new_results: reflow_logger.debug(" New Results: %s", new_results) return existing_results + new_results, ReflowPoint(tuple(segment_buffer)) ReflowSequenceType = List[Union[ReflowBlock, ReflowPoint]] sqlfluff-2.3.5/src/sqlfluff/utils/reflow/helpers.py000066400000000000000000000036771451700765000224500ustar00rootroot00000000000000"""Helper utilities for reflow.""" import logging from itertools import chain from typing import Iterable, List from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.rules.base import LintFix, LintResult # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") def fixes_from_results(results: Iterable[LintResult]) -> List[LintFix]: """Return a list of fixes from an iterable of LintResult.""" return list(chain.from_iterable(result.fixes for result in results)) def pretty_segment_name(segment: BaseSegment) -> str: """Get a nicely formatted name of the segment.""" if segment.is_type("symbol"): # In a symbol reference, show the raw value and type. # (With underscores as spaces) return segment.get_type().replace("_", " ") + f" {segment.raw!r}" elif segment.is_type("keyword"): # Reference keywords as keywords. return f"{segment.raw!r} keyword" else: # Reference other segments just by their type. # (With underscores as spaces) return segment.get_type().replace("_", " ") def deduce_line_indent(raw_segment: RawSegment, root_segment: BaseSegment) -> str: """Given a raw segment, deduce the indent of its line.""" seg_idx = root_segment.raw_segments.index(raw_segment) indent_seg = None # Use range and a lookup here because it's more efficient than slicing # as we only need a subset of the long series. for idx in range(seg_idx, -1, -1): seg = root_segment.raw_segments[idx] if seg.is_code: indent_seg = None elif seg.is_type("whitespace"): indent_seg = seg elif seg.is_type("newline"): break reflow_logger.debug("Deduced indent for %s as %s", raw_segment, indent_seg) return indent_seg.raw if indent_seg else "" sqlfluff-2.3.5/src/sqlfluff/utils/reflow/rebreak.py000066400000000000000000000550331451700765000224120ustar00rootroot00000000000000"""Static methods to support ReflowSequence.rebreak().""" import logging from dataclasses import dataclass from typing import List, Tuple, Type, cast from sqlfluff.core.parser import BaseSegment from sqlfluff.core.rules import LintFix, LintResult from sqlfluff.utils.reflow.elements import ReflowBlock, ReflowPoint, ReflowSequenceType from sqlfluff.utils.reflow.helpers import ( deduce_line_indent, fixes_from_results, pretty_segment_name, ) # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") @dataclass(frozen=True) class _RebreakSpan: """A location within a sequence to consider rebreaking.""" target: BaseSegment start_idx: int end_idx: int line_position: str strict: bool @dataclass(frozen=True) class _RebreakIndices: """Indices of points for a _RebreakLocation.""" dir: int adj_pt_idx: int newline_pt_idx: int pre_code_pt_idx: int @classmethod def from_elements( cls: Type["_RebreakIndices"], elements: ReflowSequenceType, start_idx: int, dir: int, ) -> "_RebreakIndices": """Iterate through the elements to deduce important point indices.""" assert dir in (1, -1), "Direction must be a unit direction (i.e. 1 or -1)." # Limit depends on the direction limit = 0 if dir == -1 else len(elements) # The adjacent point is just the next one. adj_point_idx = start_idx + dir # The newline point is next. We hop in 2s because we're checking # only points, which alternate with blocks. for newline_point_idx in range(adj_point_idx, limit, 2 * dir): if "newline" in elements[newline_point_idx].class_types or any( seg.is_code for seg in elements[newline_point_idx + dir].segments ): break # Finally we look for the point preceding the next code element. for pre_code_point_idx in range(newline_point_idx, limit, 2 * dir): if any(seg.is_code for seg in elements[pre_code_point_idx + dir].segments): break return cls(dir, adj_point_idx, newline_point_idx, pre_code_point_idx) @dataclass(frozen=True) class _RebreakLocation: """A location within a sequence to rebreak, with metadata.""" target: BaseSegment prev: _RebreakIndices next: _RebreakIndices line_position: str strict: bool @classmethod def from_span( cls: Type["_RebreakLocation"], span: _RebreakSpan, elements: ReflowSequenceType ) -> "_RebreakLocation": """Expand a span to a location.""" return cls( span.target, _RebreakIndices.from_elements(elements, span.start_idx, -1), _RebreakIndices.from_elements(elements, span.end_idx, 1), span.line_position, span.strict, ) def pretty_target_name(self) -> str: """Get a nicely formatted name of the target.""" return pretty_segment_name(self.target) def has_templated_newline(self, elements: ReflowSequenceType) -> bool: """Is either side a templated newline? If either side has a templated newline, then that's ok too. The intent here is that if the next newline is a _templated_ one, then in the source there will be a tag ({{ tag }}), which acts like _not having a newline_. """ # Check the _last_ newline of the previous point. # Slice backward to search in reverse. for seg in elements[self.prev.newline_pt_idx].segments[::-1]: if seg.is_type("newline"): if not seg.pos_marker.is_literal(): return True break # Check the _first_ newline of the next point. for seg in elements[self.next.newline_pt_idx].segments: if seg.is_type("newline"): if not seg.pos_marker.is_literal(): return True break return False def has_inappropriate_newlines( self, elements: ReflowSequenceType, strict: bool = False ) -> bool: """Is the span surrounded by one (but not two) line breaks? Args: elements: The elements of the ReflowSequence this element is taken from to allow comparison. strict (:obj:`bool`): If set to true, this will not allow the case where there aren't newlines on either side. """ # Here we use the newline index, not # just the adjacent point, so that we can see past comments. n_prev_newlines = elements[self.prev.newline_pt_idx].num_newlines() n_next_newlines = elements[self.next.newline_pt_idx].num_newlines() newlines_on_neither_side = n_prev_newlines + n_next_newlines == 0 newlines_on_both_sides = n_prev_newlines > 0 and n_next_newlines > 0 return ( # If there isn't a newline on either side then carry # on, unless it's strict. (newlines_on_neither_side and not strict) # If there is a newline on BOTH sides. That's ok. or newlines_on_both_sides ) def identify_rebreak_spans( element_buffer: ReflowSequenceType, root_segment: BaseSegment ) -> List[_RebreakSpan]: """Identify areas in file to rebreak. A span here is a block, or group of blocks which have explicit configs for their line position, either directly as raw segments themselves or by virtue of one of their parent segments. """ spans: List[_RebreakSpan] = [] # We'll need at least two elements each side, so constrain # our range accordingly. for idx in range(2, len(element_buffer) - 2): # Only evaluate blocks: elem = element_buffer[idx] # Only evaluate blocks if not isinstance(elem, ReflowBlock): continue # Does the element itself have config? (The easy case) if elem.line_position: # We should check whether this is a valid place to break based # on whether it's in a templated tag. If it's not a literal, then skip # it. # TODO: We probably only care if the side of the element that we would # break at (i.e. the start if it's `leading` or the end if it's # `trailing`), but we'll go with the blunt logic for simplicity first. if not elem.segments[0].pos_marker.is_literal(): reflow_logger.debug( " ! Skipping rebreak span on %s because " "non-literal location.", elem.segments[0], ) continue # Blocks should only have one segment so it's easy to pick it. spans.append( _RebreakSpan( elem.segments[0], idx, idx, # NOTE: this isn't pretty but until it needs to be more # complex, this works. elem.line_position.split(":")[0], elem.line_position.endswith("strict"), ) ) # Do any of its parents have config, and are we at the start # of them? for key in elem.line_position_configs.keys(): # If we're not at the start of the segment, then pass. if elem.depth_info.stack_positions[key].idx != 0: continue # Can we find the end? # NOTE: It's safe to look right to the end here rather than up to # -2 because we're going to end up stepping back by two in the # complicated cases. for end_idx in range(idx, len(element_buffer)): end_elem = element_buffer[end_idx] final_idx = None if not isinstance(end_elem, ReflowBlock): continue elif key not in end_elem.depth_info.stack_positions: # If we get here, it means the last block was the end. # NOTE: This feels a little hacky, but it's because of a limitation # in detecting the "end" and "solo" markers effectively in larger # sections. final_idx = end_idx - 2 # pragma: no cover elif end_elem.depth_info.stack_positions[key].type in ("end", "solo"): final_idx = end_idx if final_idx is not None: # Found the end. Add it to the stack. # We reference the appropriate element from the parent stack. target_depth = elem.depth_info.stack_hashes.index(key) target = root_segment.path_to(element_buffer[idx].segments[0])[ target_depth ].segment spans.append( _RebreakSpan( target, idx, final_idx, # NOTE: this isn't pretty but until it needs to be more # complex, this works. elem.line_position_configs[key].split(":")[0], elem.line_position_configs[key].endswith("strict"), ) ) break # If we find the start, but not the end, it's not a problem, but # we won't be rebreaking this span. This is important so that we # don't rebreak part of something without the context of what's # in the rest of it. We continue without adding it to the buffer. return spans def rebreak_sequence( elements: ReflowSequenceType, root_segment: BaseSegment ) -> Tuple[ReflowSequenceType, List[LintResult]]: """Reflow line breaks within a sequence. Initially this only _moves_ existing segments around line breaks (e.g. for operators and commas), but eventually this method should also handle line length considerations too. This intentionally does *not* handle indentation, as the existing indents are assumed to be correct. """ lint_results: List[LintResult] = [] fixes: List[LintFix] = [] elem_buff: ReflowSequenceType = elements.copy() # Given a sequence we should identify the objects which # make sense to rebreak. That includes any raws with config, # but also and parent segments which have config and we can # find both ends for. Given those spans, we then need to find # the points either side of them and then the blocks either # side to respace them at the same time. # 1. First find appropriate spans. spans = identify_rebreak_spans(elem_buff, root_segment) # The spans give us the edges of operators, but for line positioning we need # to handle comments differently. There are two other important points: # 1. The next newline outward before code (but passing over comments). # 2. The point before the next _code_ segment (ditto comments). locations = [] for span in spans: try: locations.append(_RebreakLocation.from_span(span, elem_buff)) # If we try and create a location from an incomplete span (i.e. one # where we're unable to find the next newline effectively), then # we'll get an exception. If we do - skip that one - we won't be # able to effectively work with it even if we could construct it. except UnboundLocalError: pass # Handle each span: for loc in locations: reflow_logger.debug( "Handing Rebreak Span (%r: %s): %r", loc.line_position, loc.target, "".join( elem.raw for elem in elem_buff[ loc.prev.pre_code_pt_idx - 1 : loc.next.pre_code_pt_idx + 2 ] ), ) if loc.has_inappropriate_newlines(elem_buff, strict=loc.strict): continue if loc.has_templated_newline(elem_buff): continue # Points and blocks either side are just offsets from the indices. prev_point = cast(ReflowPoint, elem_buff[loc.prev.adj_pt_idx]) next_point = cast(ReflowPoint, elem_buff[loc.next.adj_pt_idx]) # So we know we have a preference, is it ok? if loc.line_position == "leading": if elem_buff[loc.prev.newline_pt_idx].num_newlines(): # We're good. It's already leading. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() if loc.strict: # pragma: no cover # TODO: The 'strict' option isn't widely tested yet. desc = f"{pretty_name.capitalize()} should always start a new line." else: desc = ( f"Found trailing {pretty_name}. Expected only leading " "near line breaks." ) # Is it the simple case with no comments between the # old and new desired locations and only a single following # whitespace? if ( loc.next.adj_pt_idx == loc.next.pre_code_pt_idx and elem_buff[loc.next.newline_pt_idx].num_newlines() == 1 ): reflow_logger.debug(" Trailing Easy Case") # Simple case. No comments. # Strip newlines from the next point. Apply the indent to # the previous point. new_results, prev_point = prev_point.indent_to( next_point.get_indent() or "", before=loc.target ) new_results, next_point = next_point.respace_point( cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point else: reflow_logger.debug(" Trailing Tricky Case") # Otherwise we've got a tricky scenario where there are comments # to negotiate around. In this case, we _move the target_ # rather than just adjusting the whitespace. # Delete the existing position of the target, and # the _preceding_ point. fixes.append(LintFix.delete(loc.target)) for seg in elem_buff[loc.prev.adj_pt_idx].segments: fixes.append(LintFix.delete(seg)) # We always reinsert after the first point, but respace # the inserted point to ensure it's the right size given # configs. new_results, new_point = ReflowPoint(()).respace_point( cast(ReflowBlock, elem_buff[loc.next.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.next.pre_code_pt_idx + 1]), root_segment=root_segment, lint_results=[], anchor_on="after", ) # Handle the potential case of an empty point. # https://github.com/sqlfluff/sqlfluff/issues/4184 for i in range(loc.next.pre_code_pt_idx): if elem_buff[loc.next.pre_code_pt_idx - i].segments: create_anchor = elem_buff[ loc.next.pre_code_pt_idx - i ].segments[-1] break else: # pragma: no cover # NOTE: We don't test this because we *should* always find # _something_ to anchor the creation on, even if we're # unlucky enough not to find it on the first pass. raise NotImplementedError("Could not find anchor for creation.") fixes.append( LintFix.create_after( create_anchor, [loc.target], ) ) elem_buff = ( elem_buff[: loc.prev.adj_pt_idx] + elem_buff[loc.next.adj_pt_idx : loc.next.pre_code_pt_idx + 1] + elem_buff[ loc.prev.adj_pt_idx + 1 : loc.next.adj_pt_idx ] # the target + [new_point] + elem_buff[loc.next.pre_code_pt_idx + 1 :] ) elif loc.line_position == "trailing": if elem_buff[loc.next.newline_pt_idx].num_newlines(): # We're good, it's already trailing. continue # Generate the text for any issues. pretty_name = loc.pretty_target_name() if loc.strict: # pragma: no cover # TODO: The 'strict' option isn't widely tested yet. desc = ( f"{pretty_name.capitalize()} should always be at the end of a line." ) else: desc = ( f"Found leading {pretty_name}. Expected only trailing " "near line breaks." ) # Is it the simple case with no comments between the # old and new desired locations and only one previous newline? if ( loc.prev.adj_pt_idx == loc.prev.pre_code_pt_idx and elem_buff[loc.prev.newline_pt_idx].num_newlines() == 1 ): reflow_logger.debug(" Leading Easy Case") # Simple case. No comments. # Strip newlines from the previous point. Apply the indent # to the next point. new_results, next_point = next_point.indent_to( prev_point.get_indent() or "", after=loc.target ) new_results, prev_point = prev_point.respace_point( cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx + 1]), root_segment=root_segment, lint_results=new_results, strip_newlines=True, ) # Update the points in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point elem_buff[loc.next.adj_pt_idx] = next_point else: reflow_logger.debug(" Leading Tricky Case") # Otherwise we've got a tricky scenario where there are comments # to negotiate around. In this case, we _move the target_ # rather than just adjusting the whitespace. # Delete the existing position of the target, and # the _following_ point. fixes.append(LintFix.delete(loc.target)) for seg in elem_buff[loc.next.adj_pt_idx].segments: fixes.append(LintFix.delete(seg)) # We always reinsert before the first point, but respace # the inserted point to ensure it's the right size given # configs. new_results, new_point = ReflowPoint(()).respace_point( cast(ReflowBlock, elem_buff[loc.prev.pre_code_pt_idx - 1]), cast(ReflowBlock, elem_buff[loc.prev.adj_pt_idx + 1]), root_segment=root_segment, lint_results=[], anchor_on="before", ) fixes.append( LintFix.create_before( elem_buff[loc.prev.pre_code_pt_idx].segments[0], [loc.target], ) ) elem_buff = ( elem_buff[: loc.prev.pre_code_pt_idx] + [new_point] + elem_buff[ loc.prev.adj_pt_idx + 1 : loc.next.adj_pt_idx ] # the target + elem_buff[loc.prev.pre_code_pt_idx : loc.prev.adj_pt_idx + 1] + elem_buff[loc.next.adj_pt_idx + 1 :] ) elif loc.line_position == "alone": # If we get here we can assume that the element is currently # either leading or trailing and needs to be moved onto its # own line. # Generate the text for any issues. pretty_name = loc.pretty_target_name() desc = ( f"{pretty_name.capitalize()}s should always have a line break " "both before and after." ) # First handle the following newlines first (easy). if not elem_buff[loc.next.newline_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline after in alone case") new_results, next_point = next_point.indent_to( deduce_line_indent(loc.target.raw_segments[-1], root_segment), after=loc.target, ) # Update the point in the buffer elem_buff[loc.next.adj_pt_idx] = next_point # Then handle newlines before. (hoisting past comments if needed). if not elem_buff[loc.prev.adj_pt_idx].num_newlines(): reflow_logger.debug(" Found missing newline before in alone case") # NOTE: In the case that there are comments _after_ the # target, they will be moved with it. This might break things # but there isn't an unambiguous way to do this, because we # can't be sure what the comments are referring to. # Given that, we take the simple option. new_results, prev_point = prev_point.indent_to( deduce_line_indent(loc.target.raw_segments[0], root_segment), before=loc.target, ) # Update the point in the buffer elem_buff[loc.prev.adj_pt_idx] = prev_point else: raise NotImplementedError( # pragma: no cover f"Unexpected line_position config: {loc.line_position}" ) # Consolidate results and consume fix buffer lint_results.append( LintResult( loc.target, fixes=fixes_from_results(new_results) + fixes, description=desc, ) ) fixes = [] return elem_buff, lint_results sqlfluff-2.3.5/src/sqlfluff/utils/reflow/reindent.py000066400000000000000000002701541451700765000226120ustar00rootroot00000000000000"""Methods for deducing and understanding indents.""" import logging from collections import defaultdict from dataclasses import dataclass from itertools import chain from typing import ( DefaultDict, Dict, FrozenSet, Iterator, List, Optional, Set, Tuple, cast, ) from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.helpers.slice import slice_length from sqlfluff.core.parser import ( BaseSegment, NewlineSegment, RawSegment, WhitespaceSegment, ) from sqlfluff.core.parser.segments import Indent, SourceFix from sqlfluff.core.parser.segments.meta import MetaSegment, TemplateSegment from sqlfluff.core.rules.base import LintFix, LintResult from sqlfluff.utils.reflow.elements import ( IndentStats, ReflowBlock, ReflowPoint, ReflowSequenceType, ) from sqlfluff.utils.reflow.helpers import fixes_from_results from sqlfluff.utils.reflow.rebreak import _RebreakSpan, identify_rebreak_spans # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") def has_untemplated_newline(point: ReflowPoint) -> bool: """Determine whether a point contains any literal newlines. NOTE: We check for standard literal newlines, but also potential placeholder newlines which have been consumed. """ # If there are no newlines (or placeholders) at all - then False. if not point.class_types.intersection({"newline", "placeholder"}): return False for seg in point.segments: # Make sure it's not templated. # NOTE: An insertion won't have a pos_marker. But that # also means it's not templated. if seg.is_type("newline") and ( not seg.pos_marker or seg.pos_marker.is_literal() ): return True if seg.is_type("placeholder"): seg = cast(TemplateSegment, seg) assert ( seg.block_type == "literal" ), "Expected only literal placeholders in ReflowPoint." if "\n" in seg.source_str: return True return False @dataclass(frozen=True) class _IndentPoint: """Temporary structure for holding metadata about an indented ReflowPoint. We only evaluate point which either *are* line breaks or contain Indent/Dedent segments. """ idx: int indent_impulse: int indent_trough: int initial_indent_balance: int last_line_break_idx: Optional[int] is_line_break: bool # NOTE: an "untaken indent" is referenced by the value we go *up* to. # i.e. An Indent segment which takes the balance from 1 to 2 but with # no newline is an untaken indent of value 2. # It also only covers untaken indents _before_ this point. If this point # is _also_ an untaken indent, we should be able to infer that ourselves. untaken_indents: Tuple[int, ...] @property def closing_indent_balance(self) -> int: return self.initial_indent_balance + self.indent_impulse @dataclass class _IndentLine: """Temporary structure for handing a line of indent points. Mutable so that we can adjust the initial indent balance for things like comments and templated elements, after constructing all the metadata for the points on the line. """ initial_indent_balance: int indent_points: List[_IndentPoint] def __repr__(self) -> str: """Compressed repr method to ease logging.""" return ( f"IndentLine(iib={self.initial_indent_balance}, ipts=[" + ", ".join( f"iPt@{ip.idx}({ip.indent_impulse}, {ip.indent_trough}, " f"{ip.initial_indent_balance}, {ip.last_line_break_idx}, " f"{ip.is_line_break}, {ip.untaken_indents})" for ip in self.indent_points ) + "])" ) @classmethod def from_points(cls, indent_points: List[_IndentPoint]) -> "_IndentLine": # Catch edge case for first line where we'll start with a # block if no initial indent. if indent_points[-1].last_line_break_idx: starting_balance = indent_points[0].closing_indent_balance else: starting_balance = 0 return cls(starting_balance, indent_points) def iter_blocks(self, elements: ReflowSequenceType) -> Iterator[ReflowBlock]: # Edge case for initial lines (i.e. where last_line_break is None) if self.indent_points[-1].last_line_break_idx is None: range_slice = slice(None, self.indent_points[-1].idx) else: range_slice = slice(self.indent_points[0].idx, self.indent_points[-1].idx) for element in elements[range_slice]: if isinstance(element, ReflowPoint): continue yield element def _iter_block_segments( self, elements: ReflowSequenceType ) -> Iterator[RawSegment]: for block in self.iter_blocks(elements): yield from block.segments def is_all_comments(self, elements: ReflowSequenceType) -> bool: """Is this line made up of just comments?""" block_segments = list(self._iter_block_segments(elements)) return bool(block_segments) and all( seg.is_type("comment") for seg in block_segments ) def is_all_templates(self, elements: ReflowSequenceType) -> bool: """Is this line made up of just template elements?""" block_segments = list(self._iter_block_segments(elements)) return bool(block_segments) and all( seg.is_type("placeholder", "template_loop") for seg in block_segments ) def desired_indent_units(self, forced_indents: List[int]) -> int: """Calculate the desired indent units. This is the heart of the indentation calculations. First we work out how many previous indents are untaken. In the easy case, we just use the number of untaken indents from previous points. The more complicated example is where *this point* has both dedents *and* indents. In this case we use the `indent_trough` to prune any previous untaken indents which were above the trough at this point. After that we calculate the indent from the incoming balance, minus any relevant untaken events *plus* any previously untaken indents which have been forced (i.e. inserted by the same operation). """ if self.indent_points[0].indent_trough: # This says - purge any untaken indents which happened before # the trough (or at least only _keep_ any which would have remained). # NOTE: Minus signs are really hard to get wrong here. relevant_untaken_indents = [ i for i in self.indent_points[0].untaken_indents if i <= self.initial_indent_balance - ( self.indent_points[0].indent_impulse - self.indent_points[0].indent_trough ) ] else: relevant_untaken_indents = list(self.indent_points[0].untaken_indents) desired_indent = ( self.initial_indent_balance - len(relevant_untaken_indents) + len(forced_indents) ) reflow_logger.debug( "Desired Indent Calculation: IB: %s, RUI: %s, UIL: %s, " "iII: %s, iIT: %s. = %s", self.initial_indent_balance, relevant_untaken_indents, self.indent_points[0].untaken_indents, self.indent_points[0].indent_impulse, self.indent_points[0].indent_trough, desired_indent, ) return desired_indent def closing_balance(self) -> int: """The closing indent balance of the line.""" return self.indent_points[-1].closing_indent_balance def opening_balance(self) -> int: """The opening indent balance of the line. NOTE: We use the first point for the starting balance rather than the line starting balance because we're using this to detect missing lines and if the line has been corrected then we don't want to do that. """ # Edge case for first line of a file (where starting indent must be zero). if self.indent_points[-1].last_line_break_idx is None: return 0 return self.indent_points[0].closing_indent_balance def _revise_templated_lines(lines: List[_IndentLine], elements: ReflowSequenceType): """Given an initial set of individual lines. Revise templated ones. NOTE: This mutates the `lines` argument. We do this to ensure that templated lines are _somewhat_ consistent. Total consistency is very hard, given templated elements can be used in a wide range of places. What we do here is to try and take a somewhat rules based approach, but also one which should fit mostly with user expectations. To do this we have three scenarios: 1. Template tags are already on the same indent. 2. Template tags aren't, but can be hoisted without effectively crossing code to be on the same indent. This effectively does the same as "reshuffling" placeholders, whitespace and indent segments but does so without requiring intervention on the parsed file. 3. Template tags which actively cut across the tree (i.e. start and end tags aren't at the same level and can't be hoisted). In this case the tags should be indented at the lowest indent of the matching set. In doing this we have to attempt to match up template tags. This might fail. As we battle-test this feature there may be some interesting bugs which come up! In addition to properly indenting block tags, we also filter out any jinja tags which contain newlines because if we try and fix them, we'll only fix the *initial* part of it. The rest won't be seen because it's within the tag. TODO: This could be an interesting way to extend the indentation algorithm to also cover indentation within jinja tags. """ reflow_logger.debug("# Revise templated lines.") # Because we want to modify the original lines, we're going # to use their list index to keep track of them. depths = defaultdict(list) grouped = defaultdict(list) for idx, line in enumerate(lines): if line.is_all_templates(elements): # We can't assume they're all a single block. # So handle all blocks on the line. for i in range(line.indent_points[0].idx, line.indent_points[-1].idx): if isinstance(elements[i], ReflowPoint): continue # We already checked that it's all templates. segment = cast(MetaSegment, elements[i].segments[0]) assert segment.is_type("placeholder", "template_loop") # If it's not got a block uuid, it's not a block, so it # should just be indented as usual. No need to revise. # e.g. comments or variables if segment.block_uuid: grouped[segment.block_uuid].append(idx) depths[segment.block_uuid].append(line.initial_indent_balance) reflow_logger.debug( " UUID: %s @ %s = %r", segment.block_uuid, idx, segment.pos_marker.source_str(), ) # Sort through the lines, so we do to *most* indented first. sorted_group_indices = sorted( grouped.keys(), key=lambda x: max(depths[x]), reverse=True ) reflow_logger.debug(" Sorted Group UUIDs: %s", sorted_group_indices) for group_idx, group_uuid in enumerate(sorted_group_indices): reflow_logger.debug(" Evaluating Group UUID: %s", group_uuid) group_lines = grouped[group_uuid] # Check for case 1. if len(set(lines[idx].initial_indent_balance for idx in group_lines)) == 1: reflow_logger.debug(" Case 1: All the same") continue # Check for case 2. # In this scenario, we only need to check the adjacent points. # If there's any wiggle room, we pick the lowest option. options: List[Set[int]] = [] for idx in group_lines: line = lines[idx] steps: Set[int] = {line.initial_indent_balance} # Run backward through the pre point. indent_balance = line.initial_indent_balance first_point_idx = line.indent_points[0].idx first_block = elements[first_point_idx + 1] assert first_block.segments first_segment = first_block.segments[0] if first_segment.is_type("template_loop"): # For template loops, don't count the line. They behave # strangely. continue for i in range(first_point_idx, 0, -1): if isinstance(elements[i], ReflowPoint): for seg in elements[i].segments[::-1]: if seg.is_type("indent"): # If it's the one straight away, after a block_end or # block_mid, skip it. We know this because it will have # block_uuid. if cast(Indent, seg).block_uuid: continue # Minus because we're going backward. indent_balance -= cast(Indent, seg).indent_val steps.add(indent_balance) # if it's anything other than a blank placeholder, break. # NOTE: We still need the forward version of this. elif not elements[i].segments[0].is_type("placeholder"): break elif cast(TemplateSegment, elements[i].segments[0]).block_type not in ( "block_start", "block_end", "skipped_source", "block_mid", ): # Recreating this condition is hard, but we shouldn't allow any # rendered content here. break # pragma: no cover # Run forward through the post point. indent_balance = line.initial_indent_balance last_point_idx = line.indent_points[-1].idx for seg in elements[last_point_idx].segments: if seg.is_type("indent"): # If it's the one straight away, after a block_start or # block_mid, skip it. We know this because it will have # block_uuid. if cast(Indent, seg).block_uuid: continue # Positive because we're going forward. indent_balance += cast(Indent, seg).indent_val steps.add(indent_balance) # NOTE: Edge case for consecutive blocks of the same type. # If we're next to another block which is "inner" (i.e.) has # already been handled. We can assume all options up to it's # new indent are open for use. _case_type = None if first_segment.is_type("placeholder"): _case_type = cast(TemplateSegment, first_segment).block_type if _case_type in ("block_start", "block_mid"): # Is following _line_ AND element also a block? # i.e. nothing else between. if ( idx + 1 < len(lines) and first_point_idx + 3 == lines[idx + 1].indent_points[0].idx + 1 ): seg = elements[first_point_idx + 3].segments[0] if seg.is_type("placeholder"): if cast(TemplateSegment, seg).block_type == "block_start": _inter_steps = list( range( line.initial_indent_balance, lines[idx + 1].initial_indent_balance, ) ) reflow_logger.debug( " Precedes block. Adding Steps: %s", _inter_steps ) steps.update(_inter_steps) if _case_type in ("block_end", "block_mid"): # Is preceding _line_ AND element also a block? # i.e. nothing else between. if first_point_idx - 1 == lines[idx - 1].indent_points[0].idx + 1: seg = elements[first_point_idx - 1].segments[0] if seg.is_type("placeholder"): if cast(TemplateSegment, seg).block_type == "block_end": _inter_steps = list( range( line.initial_indent_balance, lines[idx - 1].initial_indent_balance, ) ) reflow_logger.debug( " Follows block. Adding Steps: %s", _inter_steps ) steps.update(_inter_steps) reflow_logger.debug( " Line %s: Initial Balance: %s Options: %s", idx, lines[idx].initial_indent_balance, steps, ) options.append(steps) # We should also work out what all the indents are _between_ # these options and make sure we don't go above that. # Because there might be _outer_ loops, we look for spans # between blocks in this group which don't contain any blocks # from _outer_ loops. i.e. we can't just take all the lines from # first to last. last_group_line: Optional[int] = group_lines[0] # last = previous. net_balance = 0 balance_trough: Optional[int] = None temp_balance_trough: Optional[int] = None inner_lines = [] reflow_logger.debug(" Intermediate lines:") # NOTE: +1 on the last range to make sure we _do_ process the last one. for idx in range(group_lines[0] + 1, group_lines[-1] + 1): for grp in sorted_group_indices[group_idx + 1 :]: # found an "outer" group line, reset tracker. if idx in grouped[grp]: last_group_line = None net_balance = 0 temp_balance_trough = None # Unset the buffer break # Is it in this group? if idx in group_lines: # Stash the line indices of the inner lines. if last_group_line: _inner_lines = list(range(last_group_line + 1, idx)) reflow_logger.debug( " Extending Intermediates with %s", _inner_lines ) inner_lines.extend(_inner_lines) # if we have a temp balance - crystallise it if temp_balance_trough is not None: balance_trough = ( temp_balance_trough if balance_trough is None else min(balance_trough, temp_balance_trough) ) reflow_logger.debug( " + Save Trough: %s (min = %s)", temp_balance_trough, balance_trough, ) temp_balance_trough = None last_group_line = idx net_balance = 0 elif last_group_line: # It's not a group line, but we're still tracking. Update with impulses. is_subgroup_line = any( idx in grouped[grp] for grp in sorted_group_indices[:group_idx] ) for ip in lines[idx].indent_points[:-1]: # Don't count the trough on group lines we've already covered. if "placeholder" in elements[ip.idx + 1].class_types: _block_type = cast( TemplateSegment, elements[ip.idx + 1].segments[0] ).block_type if _block_type in ("block_end", "block_mid"): reflow_logger.debug( " Skipping trough before %r", _block_type ) continue if ip.indent_trough < 0 and not is_subgroup_line: # NOTE: We set it temporarily here, because if we're going # to pass an outer template loop then we should discard it. # i.e. only count intervals within inner loops. _this_through = net_balance + ip.indent_trough temp_balance_trough = ( _this_through if temp_balance_trough is None else min(temp_balance_trough, _this_through) ) reflow_logger.debug( " Stash Trough: %s (min = %s) @ %s", _this_through, temp_balance_trough, idx, ) # NOTE: We update net_balance _after_ the clause above. net_balance += ip.indent_impulse # Evaluate options. reflow_logger.debug(" Options: %s", options) overlap = set.intersection(*options) reflow_logger.debug(" Simple Overlap: %s", overlap) # Remove any options above the limit option. # We minus one from the limit, because if it comes into effect # we'll effectively remove the effects of the indents between the elements. # Is there a mutually agreeable option? reflow_logger.debug(" Balance Trough: %s", balance_trough) if not overlap or (balance_trough is not None and balance_trough <= 0): # Set the indent to the minimum of the existing ones. best_indent = min(lines[idx].initial_indent_balance for idx in group_lines) reflow_logger.debug( " Case 3: Best: %s. Inner Lines: %s", best_indent, inner_lines ) # Remove one indent from all intermediate lines. # This is because we're effectively saying that these # placeholders shouldn't impact the indentation within them. for idx in inner_lines: # MUTATION lines[idx].initial_indent_balance -= 1 else: best_indent = max(overlap) reflow_logger.debug( " Case 2: Best: %s, Overlap: %s", best_indent, overlap ) # Set all the lines to this indent for idx in group_lines: # MUTATION lines[idx].initial_indent_balance = best_indent # Finally, look for any of the lines which contain newlines # inside the placeholders. We use a slice to make sure # we're iterating through a copy so that we can safely # modify the underlying list. for idx, line in enumerate(lines[:]): # Get the first segment. first_seg = elements[line.indent_points[0].idx + 1].segments[0] src_str = first_seg.pos_marker.source_str() if src_str != first_seg.raw and "\n" in src_str: reflow_logger.debug( " Removing line %s from linting as placeholder " "contains newlines.", first_seg.pos_marker.working_line_no, ) lines.remove(line) def _revise_comment_lines(lines: List[_IndentLine], elements: ReflowSequenceType): """Given an initial set of individual lines. Revise comment ones. NOTE: This mutates the `lines` argument. We do this to ensure that lines with comments are aligned to the following non-comment element. """ reflow_logger.debug("# Revise comment lines.") comment_line_buffer: List[int] = [] # Slice to avoid copying for idx, line in enumerate(lines[:]): if line.is_all_comments(elements): comment_line_buffer.append(idx) else: # Not a comment only line, if there's a buffer anchor # to this one. for comment_line_idx in comment_line_buffer: reflow_logger.debug( " Comment Only Line: %s. Anchoring to %s", comment_line_idx, idx ) # Mutate reference lines to match this one. lines[ comment_line_idx ].initial_indent_balance = line.initial_indent_balance # Reset the buffer comment_line_buffer = [] # Any trailing comments should be anchored to the baseline. for comment_line_idx in comment_line_buffer: # Mutate reference lines to match this one. lines[comment_line_idx].initial_indent_balance = 0 reflow_logger.debug( " Comment Only Line: %s. Anchoring to baseline", comment_line_idx ) def construct_single_indent(indent_unit: str, tab_space_size: int) -> str: """Construct a single indent unit.""" if indent_unit == "tab": return "\t" elif indent_unit == "space": return " " * tab_space_size else: # pragma: no cover raise SQLFluffUserError( f"Expected indent_unit of 'tab' or 'space', instead got {indent_unit}" ) def _prune_untaken_indents( untaken_indents: Tuple[int, ...], incoming_balance: int, indent_stats: IndentStats, has_newline: bool, ) -> Tuple[int, ...]: """Update the tracking of untaken indents. This is an internal helper function for `_crawl_indent_points`. We use the `trough` of the given indent stats to remove any untaken indents which are now no longer relevant after balances are taken into account. """ # Strip any untaken indents above the new balance. # NOTE: We strip back to the trough, not just the end point # if the trough was lower than the impulse. ui = tuple( x for x in untaken_indents if x <= ( incoming_balance + indent_stats.impulse + indent_stats.trough if indent_stats.trough < indent_stats.impulse else incoming_balance + indent_stats.impulse ) ) # After stripping, we may have to add them back in. # NOTE: all the values in the indent_stats are relative to the incoming # indent, so we correct both of them here by using the incoming_balance. if indent_stats.impulse > indent_stats.trough and not has_newline: for i in range(indent_stats.trough, indent_stats.impulse): indent_val = incoming_balance + i + 1 if indent_val - incoming_balance not in indent_stats.implicit_indents: ui += (indent_val,) return ui def _update_crawl_balances( untaken_indents: Tuple[int, ...], incoming_balance: int, indent_stats: IndentStats, has_newline: bool, ) -> Tuple[int, Tuple[int, ...]]: """Update the tracking of untaken indents and balances. This is an internal helper function for `_crawl_indent_points`. """ new_untaken_indents = _prune_untaken_indents( untaken_indents, incoming_balance, indent_stats, has_newline ) new_balance = incoming_balance + indent_stats.impulse return new_balance, new_untaken_indents def _crawl_indent_points( elements: ReflowSequenceType, allow_implicit_indents: bool = False ) -> Iterator[_IndentPoint]: """Crawl through a reflow sequence, mapping existing indents. This is where *most* of the logic for smart indentation happens. The values returned here have a large impact on exactly how indentation is treated. NOTE: If a line ends with a comment, indent impulses are pushed to the point _after_ the comment rather than before to aid with indentation. This saves searching for them later. TODO: Once this function *works*, there's definitely headroom for simplification and optimisation. We should do that. """ last_line_break_idx = None indent_balance = 0 untaken_indents: Tuple[int, ...] = () cached_indent_stats: Optional[IndentStats] = None cached_point: Optional[_IndentPoint] = None for idx, elem in enumerate(elements): if isinstance(elem, ReflowPoint): # NOTE: The following line should never lead to an index error # because files should always have a trailing IndentBlock containing # an "end_of_file" marker, and so the final IndentPoint should always # have _something_ after it. indent_stats = IndentStats.from_combination( cached_indent_stats, elem.get_indent_impulse(), ) # If don't allow implicit indents we should remove them here. # Also, if we do - we should check for brackets. # NOTE: The reason we check following class_types is because # bracketed expressions behave a little differently and are an # exception to the normal implicit indent rules. For implicit # indents which precede bracketed expressions, the implicit indent # is treated as a normal indent. In this case the start_bracket # must be the start of the bracketed section which isn't closed # on the same line - if it _is_ closed then we keep the implicit # indents. if indent_stats.implicit_indents: unclosed_bracket = False if ( allow_implicit_indents and "start_bracket" in elements[idx + 1].class_types ): # Is it closed in the line? Iterate forward to find out. # get the stack depth next_elem = cast(ReflowBlock, elements[idx + 1]) depth = next_elem.depth_info.stack_depth for elem_j in elements[idx + 1 :]: if isinstance(elem_j, ReflowPoint): if elem_j.num_newlines() > 0: unclosed_bracket = True break elif ( "end_bracket" in elem_j.class_types and elem_j.depth_info.stack_depth == depth ): break else: # pragma: no cover unclosed_bracket = True if unclosed_bracket or not allow_implicit_indents: # Blank indent stats if not using them indent_stats = IndentStats( indent_stats.impulse, indent_stats.trough, () ) # Was there a cache? if cached_indent_stats: # If there was we can safely assume there is a cached point. assert cached_point # If there was, this is a signal that we need to yield two points. # The content of those points depends on the newlines that surround the # last segments (which will be comment block). # _leading_ comments (i.e. those preceded by a newline): Yield _before_ # _trailing_ comments (or rare "mid" comments): Yield _after_ # TODO: We might want to reconsider the treatment of comments in the # middle of lines eventually, but they're fairly unusual so not well # covered in tests as of writing. # We yield the first of those points here, and then manipulate the # indent_stats object to allow the following code to yield the other. # We can refer back to the cached point as a framework. In both # cases we use the combined impulse and trough, but we use the # current indent balance and untaken indents. if cached_point.is_line_break: # It's a leading comment. Yield all the info in that point. yield _IndentPoint( cached_point.idx, indent_stats.impulse, indent_stats.trough, indent_balance, cached_point.last_line_break_idx, True, untaken_indents, ) # Before zeroing, crystallise any effect on overall balances. indent_balance, untaken_indents = _update_crawl_balances( untaken_indents, indent_balance, indent_stats, True ) # Set indent stats to zero because we've already yielded. indent_stats = IndentStats(0, 0, indent_stats.implicit_indents) else: # It's a trailing (or mid) comment. Yield it in the next. yield _IndentPoint( cached_point.idx, 0, 0, indent_balance, cached_point.last_line_break_idx, False, untaken_indents, ) # No need to reset indent stats. It's already good. # Reset caches. cached_indent_stats = None has_newline = False cached_point = None # Do we have a newline? has_newline = has_untemplated_newline(elem) and idx != last_line_break_idx # Construct the point we may yield indent_point = _IndentPoint( idx, indent_stats.impulse, indent_stats.trough, indent_balance, last_line_break_idx, has_newline, untaken_indents, ) # Update the last newline index if this is a newline. # NOTE: We used the previous value in the construction of the # _IndentPoint above and we only reset after that construction. if has_newline: last_line_break_idx = idx # Is the next element a comment? If so - delay the decision until we've # got any indents from after the comment too. if "comment" in elements[idx + 1].class_types: cached_indent_stats = indent_stats # Create parts of a point to use later. cached_point = indent_point # We loop around so that we don't do the untaken indent calcs yet. continue # Is it meaningful as an indent point? # i.e. Is it a line break? AND not a templated one. # NOTE: a point at idx zero is meaningful because it's like an indent. # NOTE: Last edge case. If we haven't yielded yet, but the # next element is the end of the file. Yield. elif ( has_newline or indent_stats.impulse or indent_stats.trough or idx == 0 or elements[idx + 1].segments[0].is_type("end_of_file") ): yield indent_point # Update balances indent_balance, untaken_indents = _update_crawl_balances( untaken_indents, indent_balance, indent_stats, has_newline ) def _map_line_buffers( elements: ReflowSequenceType, allow_implicit_indents: bool = False ) -> Tuple[List[_IndentLine], List[int]]: """Map the existing elements, building up a list of _IndentLine. Returns: :obj:`tuple` of a :obj:`list` of :obj:`_IndentLine` and a :obj:`list` of :obj:`int`. The first is the main output and is designed to be used in assessing indents and their effect through a SQL file. The latter is a list of "imbalanced" indent locations, where the positive indent is untaken, but its corresponding negative indent *is* taken. """ # First build up the buffer of lines. lines = [] point_buffer = [] _previous_points = {} # Buffers to keep track of indents which are untaken on the way # up but taken on the way down. We track them explicitly so we # can force them later. #: dict of ints: maps indentation balance values to the last #: index location where they were seen. This is a working buffer #: and not directly returned by the function. untaken_indent_locs = {} #: list of ints: a list of element indices which contain untaken #: positive indents, that should be forced later because their #: corresponding negative indent _was_ taken. Several edge cases #: are excluded from this list and so not included. See code below. imbalanced_locs = [] for indent_point in _crawl_indent_points( elements, allow_implicit_indents=allow_implicit_indents ): # We evaluate all the points in a line at the same time, so # we first build up a buffer. point_buffer.append(indent_point) _previous_points[indent_point.idx] = indent_point if not indent_point.is_line_break: # If it's not a line break, we should still check whether it's # a positive untaken to keep track of them. # ...unless it's implicit. indent_stats = cast( ReflowPoint, elements[indent_point.idx] ).get_indent_impulse() if indent_point.indent_impulse > indent_point.indent_trough and not ( allow_implicit_indents and indent_stats.implicit_indents ): untaken_indent_locs[ indent_point.initial_indent_balance + indent_point.indent_impulse ] = indent_point.idx continue # If it *is* a line break, then store it. lines.append(_IndentLine.from_points(point_buffer)) # We should also evaluate whether this point inserts a newline at the close # of an indent which was untaken on the way up. # https://github.com/sqlfluff/sqlfluff/issues/4234 # Special case 1: # If we're at the end of the file we shouldn't interpret it as a line break # for problem indents, they're a bit of a special case. # Special case 2: # Bracketed expressions are a bit odd here. # e.g. # WHERE ( # foo = bar # ) # LIMIT 1 # # Technically there's an untaken indent before the opening bracket # but this layout is common practice so we're not going to force # one there even though there _is_ a line break after the closing # bracket. following_class_types = elements[indent_point.idx + 1].class_types if ( indent_point.indent_trough # End of file ends case. (Special case 1) and "end_of_file" not in following_class_types ): passing_indents = list( range( indent_point.initial_indent_balance, indent_point.initial_indent_balance + indent_point.indent_trough, -1, ) ) # There might be many indents at this point, but if any match, then # we should still force an indent # NOTE: We work _inward_ to check which have been taken. for i in reversed(passing_indents): # Was this outer one untaken? if i not in untaken_indent_locs: # No? Stop the loop. If we've a corresponding indent for # this dedent, we shouldn't use the same location to force # untaken indents at inner levels. break loc = untaken_indent_locs[i] # First check for bracket special case. It's less about whether # the section _ends_ with a lone bracket, and more about whether # the _starting point_ is a bracket which closes a line. If it # is, then skip this location. (Special case 2). # NOTE: We can safely "look ahead" here because we know all files # end with an IndentBlock, and we know here that `loc` refers to # an IndentPoint. if "start_bracket" in elements[loc + 1].class_types: continue # If the location was in the line we're just closing. That's # not a problem because it's an untaken indent which is closed # on the same line. if any(ip.idx == loc for ip in point_buffer): continue # If the only elements between current point and the end of the # reference line are comments, then don't trigger, it's a misplaced # indent. # First find the end of the reference line. for j in range(loc, indent_point.idx): _pt = _previous_points.get(j, None) if not _pt: continue if _pt.is_line_break: break assert _pt # Then check if all comments. if all( "comment" in elements[k].class_types for k in range(_pt.idx + 1, indent_point.idx, 2) ): # It is all comments. Ignore it. continue imbalanced_locs.append(loc) # Remove any which are now no longer relevant from the working buffer. for k in list(untaken_indent_locs.keys()): if k > indent_point.initial_indent_balance + indent_point.indent_trough: del untaken_indent_locs[k] # Reset the buffer point_buffer = [indent_point] # Handle potential final line if len(point_buffer) > 1: lines.append(_IndentLine.from_points(point_buffer)) return lines, imbalanced_locs def _deduce_line_current_indent( elements: ReflowSequenceType, last_line_break_idx: Optional[int] = None ) -> str: """Deduce the current indent string. This method accounts for both literal indents and indents consumed from the source as by potential templating tags. """ indent_seg = None if not elements[0].segments: return "" elif last_line_break_idx: indent_seg = cast( ReflowPoint, elements[last_line_break_idx] )._get_indent_segment() elif isinstance(elements[0], ReflowPoint) and elements[0].segments[ 0 ].pos_marker.working_loc == (1, 1): # No last_line_break_idx, but this is a point. It's the first line. # First check whether this is a first line with a leading # placeholder. if elements[0].segments[0].is_type("placeholder"): reflow_logger.debug(" Handling as initial leading placeholder") seg = cast(TemplateSegment, elements[0].segments[0]) # Is the placeholder a consumed whitespace? if seg.source_str.startswith((" ", "\t")): indent_seg = seg # Otherwise it's an initial leading literal whitespace. else: reflow_logger.debug(" Handling as initial leading whitespace") for indent_seg in elements[0].segments[::-1]: if indent_seg.is_type("whitespace") and not indent_seg.is_templated: break # Handle edge case of no whitespace, but with newline. if not indent_seg.is_type("whitespace"): indent_seg = None if not indent_seg: return "" # We have to check pos marker before checking is templated. # Insertions don't have pos_markers - so aren't templated, # but also don't support calling is_templated. if indent_seg.is_type("placeholder"): # It's a consumed indent. return cast(TemplateSegment, indent_seg).source_str.split("\n")[-1] or "" elif not indent_seg.pos_marker or not indent_seg.is_templated: # It's a literal assert "\n" not in indent_seg.raw, f"Found newline in indent: {indent_seg}" return indent_seg.raw else: # pragma: no cover # It's templated. This shouldn't happen. Segments returned by # _get_indent_segment, should be valid indents (i.e. whitespace # or placeholders for consumed whitespace). This is a bug. if indent_seg.pos_marker: reflow_logger.warning( "Segment position marker: %s: [SRC: %s, TMP:%s]", indent_seg.pos_marker, indent_seg.pos_marker.source_slice, indent_seg.pos_marker.templated_slice, ) raise NotImplementedError( "Unexpected templated indent. Report this as a bug on " f"GitHub. Segment: {indent_seg}\n" "https://github.com/sqlfluff/sqlfluff/issues/new/choose" ) def _lint_line_starting_indent( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: List[int], ) -> List[LintResult]: """Lint the indent at the start of a line. NOTE: This mutates `elements` to avoid lots of copying. """ indent_points = indent_line.indent_points # Set up the default anchor initial_point_idx = indent_points[0].idx anchor = {"before": elements[initial_point_idx + 1].segments[0]} # Find initial indent, and deduce appropriate string indent. current_indent = _deduce_line_current_indent( elements, indent_points[-1].last_line_break_idx ) desired_indent_units = indent_line.desired_indent_units(forced_indents) desired_starting_indent = desired_indent_units * single_indent initial_point = cast(ReflowPoint, elements[initial_point_idx]) if current_indent == desired_starting_indent: return [] if initial_point_idx > 0 and initial_point_idx < len(elements) - 1: # Edge case: Lone comments. Normally comments are anchored to the line # _after_ where they come. However, if the existing location _matches_ # the _preceding line_, then we will allow it. It's not the "expected" # location but it is allowable. if "comment" in elements[initial_point_idx + 1].class_types: last_indent = _deduce_line_current_indent( elements, indent_points[0].last_line_break_idx ) if len(current_indent) == len(last_indent): reflow_logger.debug(" Indent matches previous line. OK.") return [] # Edge case: Multiline comments. If the previous line was a multiline # comment and this line starts with a multiline comment, then we should # only lint the indent if it's _too small_. Otherwise we risk destroying # indentation which the logic here is not smart enough to handle. if ( "block_comment" in elements[initial_point_idx - 1].class_types and "block_comment" in elements[initial_point_idx + 1].class_types ): if len(current_indent) > len(desired_starting_indent): reflow_logger.debug(" Indent is bigger than required. OK.") return [] reflow_logger.debug( " Correcting indent @ line %s. Existing indent: %r -> %r", elements[initial_point_idx + 1].segments[0].pos_marker.working_line_no, current_indent, desired_starting_indent, ) # Initial point gets special handling if it has no newlines. if indent_points[0].idx == 0 and not indent_points[0].is_line_break: init_seg = elements[indent_points[0].idx].segments[0] if init_seg.is_type("placeholder"): init_seg = cast(TemplateSegment, init_seg) # If it's a placeholder initial indent, then modify the placeholder # to remove the indent from it. src_fix = SourceFix( "", source_slice=slice(0, len(current_indent) + 1), templated_slice=slice(0, 0), ) fixes = [ LintFix.replace( init_seg, [init_seg.edit(source_fixes=[src_fix], source_str="")], ) ] else: # Otherwise it's just initial whitespace. Remove it. fixes = [LintFix.delete(seg) for seg in initial_point.segments] new_results = [ LintResult( initial_point.segments[0], fixes, description="First line should not be indented.", source="reflow.indent.existing", ) ] new_point = ReflowPoint(()) # Placeholder indents also get special treatment else: new_results, new_point = initial_point.indent_to( desired_starting_indent, source="reflow.indent.existing", **anchor, # type: ignore ) elements[initial_point_idx] = new_point return new_results def _lint_line_untaken_positive_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, imbalanced_indent_locs: List[int], ) -> Tuple[List[LintResult], List[int]]: """Check for positive indents which should have been taken.""" # First check whether this line contains any of the untaken problem points. for ip in indent_line.indent_points: if ip.idx in imbalanced_indent_locs: # Force it at the relevant position. desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) ) reflow_logger.debug( " Detected imbalanced +ve break @ line %s. Indenting to %r", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[ip.idx]) results, new_point = target_point.indent_to( desired_indent, before=elements[ip.idx + 1].segments[0], source="reflow.indent.imbalance", ) elements[ip.idx] = new_point # Keep track of the indent we forced, by returning it. return results, [ip.closing_indent_balance] # If we don't close the line higher there won't be any. starting_balance = indent_line.opening_balance() last_ip = indent_line.indent_points[-1] # Check whether it closes the opening indent. if last_ip.initial_indent_balance + last_ip.indent_trough <= starting_balance: return [], [] # It's not, we don't close out an opened indent. # NOTE: Because trailing comments should always shift their any # surrounding indentation effects to _after_ their position, we # should just be able to evaluate them safely from the end of the line. indent_points = indent_line.indent_points # Account for the closing trough. closing_trough = last_ip.initial_indent_balance + ( last_ip.indent_trough or last_ip.indent_impulse ) # Edge case: Adjust closing trough for trailing indents # after comments disrupting closing trough. _bal = 0 for elem in elements[last_ip.idx + 1 :]: if not isinstance(elem, ReflowPoint): if "comment" not in elem.class_types: break continue # Otherwise it's a point stats = elem.get_indent_impulse() # If it's positive, stop. We likely won't find enough negative to come. if stats.impulse > 0: # pragma: no cover break closing_trough = _bal + stats.trough _bal += stats.impulse # On the way up we're looking for whether the ending balance # was an untaken indent or not. If it *was* untaken, there's # a good chance that we *should* take it. # NOTE: an implicit indent would not force a newline # because it wouldn't be in the untaken_indents. It's # considered _taken_ even if not. if closing_trough not in indent_points[-1].untaken_indents: # If the closing point doesn't correspond to an untaken # indent within the line (i.e. it _was_ taken), then # there won't be an appropriate place to force an indent. return [], [] # The closing indent balance *does* correspond to an # untaken indent on this line. We *should* force a newline # at that position. for ip in indent_points: if ip.closing_indent_balance == closing_trough: target_point_idx = ip.idx desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) ) break else: # pragma: no cover raise NotImplementedError("We should always find the relevant point.") reflow_logger.debug( " Detected missing +ve line break @ line %s. Indenting to %r", elements[target_point_idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[target_point_idx]) results, new_point = target_point.indent_to( desired_indent, before=elements[target_point_idx + 1].segments[0], source="reflow.indent.positive", ) elements[target_point_idx] = new_point # Keep track of the indent we forced, by returning it. return results, [closing_trough] def _lint_line_untaken_negative_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: List[int], ) -> List[LintResult]: """Check for negative indents which should have been taken.""" # If we don't close lower than we start, there won't be any. if indent_line.closing_balance() >= indent_line.opening_balance(): return [] results: List[LintResult] = [] # On the way down we're looking for indents which *were* taken on # the way up, but currently aren't on the way down. We slice so # that the _last_ point isn't evaluated, because that's fine. for ip in indent_line.indent_points[:-1]: # Is line break, or positive indent? if ip.is_line_break or ip.indent_impulse >= 0: continue # When using implicit indents, we may find untaken negatives which # aren't shallower than the line they're on. This is because they # were implicit on the way up and so not included in `untaken_indents`. # To catch them we also check that we're shallower than the start of # of the line. if ( ip.initial_indent_balance + ip.indent_trough >= indent_line.opening_balance() ): continue # It's negative, is it untaken? In the case of a multi-dedent # they must _all_ be untaken to take this route. covered_indents = set( range( ip.initial_indent_balance, ip.initial_indent_balance + ip.indent_trough, -1, ) ) untaken_indents = set(ip.untaken_indents).difference(forced_indents) if covered_indents.issubset(untaken_indents): # Yep, untaken. continue # Edge Case: Comments. Since introducing the code to push indent effects # to the point _after_ comments, we no longer need to detect an edge case # for them here. If we change that logic again in the future, so that # indent values are allowed before comments - that code should be # reintroduced here. # Edge Case: Semicolons. For now, semicolon placement is a little # more complicated than what we do here. For now we don't (by # default) introduce missing -ve indents before semicolons. # TODO: Review whether this is a good idea, or whether this should be # more configurable. # NOTE: This could potentially lead to a weird situation if two # statements are already on the same line. That's a bug to solve later. if elements[ip.idx + 1 :] and elements[ip.idx + 1].class_types.intersection( ("statement_terminator", "comma") ): reflow_logger.debug( " Detected missing -ve line break @ line %s, before " "semicolon or comma. Ignoring...", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, ) continue # Edge case: template blocks. These sometimes sit in odd places # in the parse tree so don't force newlines before them if elements[ip.idx + 1 :] and "placeholder" in elements[ip.idx + 1].class_types: # are any of those placeholders blocks? if any( cast(TemplateSegment, seg).block_type.startswith("block") for seg in elements[ip.idx + 1].segments if seg.is_type("placeholder") ): reflow_logger.debug( " Detected missing -ve line break @ line %s, before " "block placeholder. Ignoring...", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, ) continue # It's negative, not a line break and was taken on the way up. # This *should* be an indent! desired_indent = single_indent * ( ip.closing_indent_balance - len(ip.untaken_indents) + len(forced_indents) ) reflow_logger.debug( " Detected missing -ve line break @ line %s. Indenting to %r", elements[ip.idx + 1].segments[0].pos_marker.working_line_no, desired_indent, ) target_point = cast(ReflowPoint, elements[ip.idx]) new_results, new_point = target_point.indent_to( desired_indent, before=elements[ip.idx + 1].segments[0], source="reflow.indent.negative", ) elements[ip.idx] = new_point results += new_results return results def _lint_line_buffer_indents( elements: ReflowSequenceType, indent_line: _IndentLine, single_indent: str, forced_indents: List[int], imbalanced_indent_locs: List[int], ) -> List[LintResult]: """Evaluate a single set of indent points on one line. NOTE: This mutates the given `elements` and `forced_indents` input to avoid lots of copying. Order of operations: 1. Evaluate the starting indent for this line. 2. For points which aren't line breaks in the line, we evaluate them to see whether they *should* be. We separately address missing indents on the way *up* and then on the way *down*. - *Up* in this sense means where the indent balance goes up, but isn't closed again within the same line - e.g. :code:`SELECT a + (2 +` where the indent implied by the bracket isn't closed out before the end of the line. - *Down* in this sense means where we've dropped below the starting indent balance of the line - e.g. :code:`1 + 1) FROM foo` where the line starts within a bracket and then closes that *and* closes an apparent SELECT clause without a newline. This method returns fixes, including appropriate descriptions, to allow generation of LintResult objects directly from them. """ reflow_logger.info( " Line #%s [source line #%s]. idx=%s:%s. FI %s. UPI: %s.", elements[indent_line.indent_points[0].idx + 1] .segments[0] .pos_marker.working_line_no, elements[indent_line.indent_points[0].idx + 1] .segments[0] .pos_marker.source_position()[0], indent_line.indent_points[0].idx, indent_line.indent_points[-1].idx, forced_indents, imbalanced_indent_locs, ) reflow_logger.debug( " Line Content: %s", [ repr(elem.raw) for elem in elements[ indent_line.indent_points[0].idx : indent_line.indent_points[-1].idx ] ], ) reflow_logger.debug(" Evaluate Line: %s. FI %s", indent_line, forced_indents) results = [] # First, handle starting indent. results += _lint_line_starting_indent( elements, indent_line, single_indent, forced_indents ) # Second, handle potential missing positive indents. new_results, new_indents = _lint_line_untaken_positive_indents( elements, indent_line, single_indent, imbalanced_indent_locs ) # If we have any, bank them and return. We don't need to check for # negatives because we know we're on the way up. if new_results: results += new_results # Keep track of any indents we forced forced_indents.extend(new_indents) return results # Third, handle potential missing negative indents. results += _lint_line_untaken_negative_indents( elements, indent_line, single_indent, forced_indents ) # Lastly remove any forced indents above the closing balance. # Iterate through a slice so we're not editing the thing # that we're iterating through. for i in forced_indents[:]: if i > indent_line.closing_balance(): forced_indents.remove(i) return results def lint_indent_points( elements: ReflowSequenceType, single_indent: str, skip_indentation_in: FrozenSet[str] = frozenset(), allow_implicit_indents: bool = False, ) -> Tuple[ReflowSequenceType, List[LintResult]]: """Lint the indent points to check we have line breaks where we should. For linting indentation - we *first* need to make sure there are line breaks in all the places there should be. This takes an input set of indent points, and inserts additional line breaks in the necessary places to make sure indentation can be valid. Specifically we're addressing two things: 1. Any untaken indents. An untaken indent is only valid if it's corresponding dedent is on the same line. If that is not the case, there should be a line break at the location of the indent and dedent. 2. The indentation of lines. Given the line breaks are in the right place, is the line indented correctly. We do these at the same time, because we can't do the second without having line breaks in the right place, but if we're inserting a line break, we need to also know how much to indent by. """ # First map the line buffers. lines: List[_IndentLine] imbalanced_indent_locs: List[int] lines, imbalanced_indent_locs = _map_line_buffers( elements, allow_implicit_indents=allow_implicit_indents ) # Revise templated indents _revise_templated_lines(lines, elements) # Revise comment indents _revise_comment_lines(lines, elements) # Skip elements we're configured to not touch (i.e. scripts) for line in lines[:]: for block in line.iter_blocks(elements): if any( skip_indentation_in.intersection(types) for types in block.depth_info.stack_class_types ): reflow_logger.debug( "Skipping line %s because it is within one of %s", line, skip_indentation_in, ) lines.remove(line) break reflow_logger.debug("# Evaluate lines for indentation.") # Last: handle each of the lines. results: List[LintResult] = [] # NOTE: forced_indents is mutated by _lint_line_buffer_indents # It's used to pass from one call to the next. forced_indents: List[int] = [] elem_buffer = elements.copy() # Make a working copy to mutate. for line in lines: line_results = _lint_line_buffer_indents( elem_buffer, line, single_indent, forced_indents, imbalanced_indent_locs ) if line_results: reflow_logger.info(" PROBLEMS:") for res in line_results: reflow_logger.info(" %s @ %s", res.source, res.anchor) reflow_logger.info(" %s", res.description) results += line_results return elem_buffer, results def _source_char_len(elements: ReflowSequenceType): """Calculate length in the source file. NOTE: This relies heavily on the sequence already being split appropriately. It will raise errors if not. TODO: There's a good chance that this might not play well with other fixes. If we find segments without positions then it will probably error. Those will need ironing out. TODO: This probably needs more tests. It's already the source of quite a few fiddly sections. """ char_len = 0 last_source_slice: Optional[slice] = None for seg in chain.from_iterable(elem.segments for elem in elements): # Indent tokens occasionally have strange position markers. # They also don't have length so skip them. # TODO: This is actually caused by bugs and inconsistencies # in how the source_slice is generated for the position markers # of indent and dedent tokens. That's a job for another day # however. if seg.is_type("indent"): continue # Get the source position. If there is no source position then it's # a recent edit or modification. We shouldn't evaluate it until it's # been positioned. Without a source marker we don't know how to treat # it. if not seg.pos_marker: # pragma: no cover break source_slice = seg.pos_marker.source_slice # Is there a newline in the source string? source_str = seg.pos_marker.source_str() if "\n" in source_str: # There is. Stop here. It's probably a complicated # jinja tag, so it's safer to stop here. # TODO: In future, we should probably be a little # smarter about this, but for now this is ok. Without # an algorithm for layout out code _within_ jinja tags # we won't be able to suggest appropriate fixes. char_len += source_str.index("\n") break slice_len = slice_length(source_slice) # Only update the length if it's a new slice. if source_slice != last_source_slice: # If it's got size in the template but not in the source, it's # probably an insertion. if seg.raw and not slice_len: char_len += len(seg.raw) # NOTE: Don't update the last_source_slice. elif not slice_len: # If it's not got a raw and no length, it's # irrelevant. Ignore it. It's probably a meta. continue # Otherwise if we're literal, use the raw length # because it might be an edit. elif seg.pos_marker.is_literal(): char_len += len(seg.raw) last_source_slice = source_slice # Otherwise assume it's templated code. else: char_len += slice_length(source_slice) last_source_slice = source_slice return char_len def _rebreak_priorities(spans: List[_RebreakSpan]) -> Dict[int, int]: """Process rebreak spans into opportunities to split lines. The index to insert a potential indent at depends on the line_position of the span. Infer that here and store the indices in the elements. """ rebreak_priority = {} for span in spans: if span.line_position == "leading": rebreak_indices = [span.start_idx - 1] elif span.line_position == "trailing": rebreak_indices = [span.end_idx + 1] elif span.line_position == "alone": rebreak_indices = [span.start_idx - 1, span.end_idx + 1] else: # pragma: no cover raise NotImplementedError( "Unexpected line position: %s", span.line_position ) # NOTE: Operator precedence here is hard coded. It could be # moved to configuration in the layout section in the future. # Operator precedence is fairly consistent between dialects # so for now it feels ok that it's coded here - it also wouldn't # be a breaking change at that point so no pressure to release # it early. span_raw = span.target.raw_upper priority = 6 # Default to 6 for now i.e. the same as '+' # Override priority for specific precedence. if span_raw == ",": priority = 1 elif span.target.is_type("assignment_operator"): # This one is a little rarer so not covered in tests yet. # Logic is the same as others though. priority = 2 # pragma: no cover elif span_raw == "OR": priority = 3 elif span_raw == "AND": priority = 4 elif span.target.is_type("comparison_operator"): priority = 5 elif span_raw in ("*", "/", "%"): priority = 7 for rebreak_idx in rebreak_indices: rebreak_priority[rebreak_idx] = priority return rebreak_priority MatchedIndentsType = DefaultDict[float, List[int]] def _increment_balance( input_balance: int, indent_stats: IndentStats, elem_idx: int, ) -> Tuple[int, MatchedIndentsType]: """Logic for stepping through _match_indents. This is the part of that logic which is potentially fragile so is separated here into a more isolated function for better testing. It's very easy to get wrong and necessary so we don't mistake empty elements, but potentially fragile nonetheless. Returns: A tuple where the first element is the resulting balance and the second is a :obj:`defaultdict` of the new elements to add to `matched_indents`. Positive indent example: >>> _increment_balance(0, IndentStats(1, 0), 7) (1, defaultdict(, {1.0: [7]})) Negative indent example: >>> _increment_balance(3, IndentStats(-1, -1), 11) (2, defaultdict(, {3.0: [11]})) Double negative indent example: >>> _increment_balance(3, IndentStats(-2, -2), 16) (1, defaultdict(, {3.0: [16], 2.0: [16]})) Dip indent example: >>> _increment_balance(3, IndentStats(0, -1), 21) (3, defaultdict(, {3.0: [21]})) """ balance = input_balance matched_indents: MatchedIndentsType = defaultdict(list) if indent_stats.trough < 0: # NOTE: for negative, *trough* counts. # in case of more than one indent we loop and apply to all. for b in range(0, indent_stats.trough, -1): matched_indents[(balance + b) * 1.0].append(elem_idx) # NOTE: We carry forward the impulse, not the trough. # This is important for dedent+indent pairs. balance += indent_stats.impulse elif indent_stats.impulse > 0: # NOTE: for positive, *impulse* counts. # in case of more than one indent we loop and apply to all. for b in range(0, indent_stats.impulse): matched_indents[(balance + b + 1) * 1.0].append(elem_idx) balance += indent_stats.impulse return balance, matched_indents def _match_indents( line_elements: ReflowSequenceType, rebreak_priorities: Dict[int, int], newline_idx: int, allow_implicit_indents: bool = False, ) -> MatchedIndentsType: """Identify indent points, taking into account rebreak_priorities. Expect fractional keys, because of the half values for rebreak points. """ balance = 0 matched_indents: MatchedIndentsType = defaultdict(list) implicit_indents: Dict[int, Tuple[int, ...]] = {} for idx, e in enumerate(line_elements): # We only care about points, because only they contain indents. if not isinstance(e, ReflowPoint): continue # As usual, indents are referred to by their "uphill" side # so what number we store the point against depends on whether # it's positive or negative. # NOTE: Here we don't actually pass in the forward types because # we don't need them for the output. It doesn't make a difference. indent_stats = e.get_indent_impulse() e_idx = newline_idx - len(line_elements) + idx + 1 # Save any implicit indents. if indent_stats.implicit_indents: implicit_indents[e_idx] = indent_stats.implicit_indents balance, nmi = _increment_balance(balance, indent_stats, e_idx) # Incorporate nmi into matched_indents for b, indices in nmi.items(): matched_indents[b].extend(indices) # Something can be both an indent point AND a rebreak point. if idx in rebreak_priorities: # For potential rebreak options (i.e. ones without an indent) # we add 0.5 so that they sit *between* the varying indent # options. that means we split them before any of their # content, but don't necessarily split them when their # container is split. # Also to spread out the breaks within an indent, we further # add hints to distinguish between them. This is where operator # precedence (as defined above) actually comes into effect. priority = rebreak_priorities[idx] # Assume `priority` in range 0 - 50. So / 100 to add to 0.5. matched_indents[balance + 0.5 + (priority / 100)].append(e_idx) else: continue # Before working out the lowest option, we purge any which contain # ONLY the final point. That's because adding indents there won't # actually help the line length. There's *already* a newline there. for indent_level in list(matched_indents.keys()): if matched_indents[indent_level] == [newline_idx]: matched_indents.pop(indent_level) reflow_logger.debug( " purging balance of %s, it references only the final element.", indent_level, ) # ADDITIONALLY - if implicit indents are allowed we should # only use them if they match another untaken point (which isn't # implicit, or the end of the line). # NOTE: This logic might be best suited to be sited elsewhere # when (and if) we introduce smarter choices on where to add # indents. if allow_implicit_indents: for indent_level in list(matched_indents.keys()): major_points = set(matched_indents[indent_level]).difference( [newline_idx], implicit_indents.keys() ) if not major_points: matched_indents.pop(indent_level) reflow_logger.debug( " purging balance of %s, it references implicit indents " "or the final indent.", indent_level, ) return matched_indents def _fix_long_line_with_comment( line_buffer: ReflowSequenceType, elements: ReflowSequenceType, current_indent: str, line_length_limit: int, last_indent_idx: Optional[int], trailing_comments: str = "before", ) -> Tuple[ReflowSequenceType, List[LintFix]]: """Fix long line by moving trailing comments if possible. This method (unlike the ones for normal lines), just returns a new `elements` argument rather than mutating it. """ # If the comment contains a noqa, don't fix it. It's unsafe. if "noqa" in line_buffer[-1].segments[-1].raw: reflow_logger.debug(" Unfixable because noqa unsafe to move.") return elements, [] # If the comment is longer than the limit _anyway_, don't move # it. It will still be too long. if len(line_buffer[-1].segments[-1].raw) + len(current_indent) > line_length_limit: reflow_logger.debug(" Unfixable because comment too long anyway.") return elements, [] comment_seg = line_buffer[-1].segments[-1] first_seg = line_buffer[0].segments[0] last_elem_idx = elements.index(line_buffer[-1]) assert trailing_comments in ( "after", "before", ), f"Unexpected value for `trailing_comments`: {trailing_comments!r}" # The simpler case if if we're moving the comment to the line # _after_. In that case we just coerce the point before it to # be an indent. if trailing_comments == "after": anchor_point = cast(ReflowPoint, line_buffer[-2]) results, new_point = anchor_point.indent_to(current_indent, before=comment_seg) elements = ( elements[: last_elem_idx - 1] + [new_point] + elements[last_elem_idx:] ) return elements, fixes_from_results(results) # Otherwise we're moving it up and _before_ the line, which is # a little more involved (but also the default). fixes = [ # Remove the comment from it's current position, and any # whitespace in the previous point. LintFix.delete(comment_seg), *[ LintFix.delete(ws) for ws in line_buffer[-2].segments if ws.is_type("whitespace") ], ] # Are we at the start of the file? If so, there's no # indent, and also no previous segments to deal with. if last_indent_idx is None: new_point = ReflowPoint((NewlineSegment(),)) prev_elems = [] anchor = first_seg else: new_segments: Tuple[RawSegment, ...] = (NewlineSegment(),) if current_indent: new_segments += (WhitespaceSegment(current_indent),) new_point = ReflowPoint(new_segments) prev_elems = elements[: last_indent_idx + 1] anchor = elements[last_indent_idx + 1].segments[0] fixes.append( # NOTE: This looks a little convoluted, but we create # *before* a block here rather than *after* a point, # because the point may have been modified already by # reflow code and may not be a reliable anchor. LintFix.create_before( anchor, [ comment_seg, *new_point.segments, ], ) ) elements = ( prev_elems + [ line_buffer[-1], new_point, ] + line_buffer[:-2] + elements[last_elem_idx + 1 :] ) return elements, fixes def _fix_long_line_with_fractional_targets( elements: ReflowSequenceType, target_breaks: List[int], desired_indent: str ) -> List[LintResult]: """Work out fixes for splitting a long line at locations like operators. NOTE: This mutates `elements` to avoid copying. This is a helper function within .lint_line_length(). """ line_results = [] for e_idx in target_breaks: e = cast(ReflowPoint, elements[e_idx]) new_results, new_point = e.indent_to( desired_indent, after=elements[e_idx - 1].segments[-1], before=elements[e_idx + 1].segments[0], ) # NOTE: Mutation of elements. elements[e_idx] = new_point line_results += new_results return line_results def _fix_long_line_with_integer_targets( elements: ReflowSequenceType, target_breaks: List[int], line_length_limit: int, inner_indent: str, outer_indent: str, ) -> List[LintResult]: """Work out fixes for splitting a long line at locations like indents. NOTE: This mutates `elements` to avoid copying. This is a helper function within .lint_line_length(). """ line_results = [] # If we can get to the uphill indent of later break, and still be within # the line limit, then we can skip everything before it. purge_before = 0 for e_idx in target_breaks: # Is the following block already past the limit? # NOTE: We use the block because we know it will have segments. if not elements[e_idx + 1].segments[0].pos_marker: # If it doesn't have position - we should just bow out # now. It's too complicated. break # pragma: no cover if ( elements[e_idx + 1].segments[0].pos_marker.working_line_pos > line_length_limit ): # If we're past the line length limit, stop looking. break e = cast(ReflowPoint, elements[e_idx]) if e.get_indent_impulse().trough < 0: # It's negative. Skip onward. continue # If we get this far, then it's positive, but still within # the line limit. We can purge any pairs before this. purge_before = e_idx reflow_logger.debug(" ...breaks before %s unnecessary.", purge_before) # Only keep indices which are after the critical point. target_breaks = [e_idx for e_idx in target_breaks if e_idx >= purge_before] reflow_logger.debug(" Remaining breaks: %s.", target_breaks) for e_idx in target_breaks: e = cast(ReflowPoint, elements[e_idx]) indent_stats = e.get_indent_impulse() # NOTE: We check against the _impulse_ here rather than the # _trough_ because if we're about to step back up again then # it should still be indented. if indent_stats.impulse < 0: new_indent = outer_indent # NOTE: If we're about to insert a dedent before a # comma or semicolon ... don't. They are a bit special # in being allowed to trail. if elements[e_idx + 1].class_types.intersection( ("statement_terminator", "comma") ): reflow_logger.debug(" Skipping dedent before comma or semicolon.") # We break rather than continue because this is # necessarily a step back down. break else: new_indent = inner_indent new_results, new_point = e.indent_to( new_indent, after=elements[e_idx - 1].segments[-1], before=elements[e_idx + 1].segments[0], ) # NOTE: Mutation of elements. elements[e_idx] = new_point line_results += new_results # If the balance is *also* negative, then we should also stop. # We've indented a whole section - that's enough for now. # We've already skipped over any unnecessary sections, and they shouldn't # be reassessed on the next pass. If there are later sections which *also* # need to be reindented, then we'll catch them when we come back around. if indent_stats.trough < 0: reflow_logger.debug(" Stopping as we're back down.") break return line_results def lint_line_length( elements: ReflowSequenceType, root_segment: BaseSegment, single_indent: str, line_length_limit: int, allow_implicit_indents: bool = False, trailing_comments: str = "before", ) -> Tuple[ReflowSequenceType, List[LintResult]]: """Lint the sequence to lines over the configured length. NOTE: This assumes that `lint_indent_points` has already been run. The method won't necessarily *fail* but it does assume that the current indent is correct and that indents have already been inserted where they're missing. """ # First check whether we should even be running this check. if line_length_limit <= 0: reflow_logger.debug("# Line length check disabled.") return elements, [] reflow_logger.debug("# Evaluate lines for length.") # Make a working copy to mutate. elem_buffer: ReflowSequenceType = elements.copy() line_buffer: ReflowSequenceType = [] results: List[LintResult] = [] last_indent_idx = None for i, elem in enumerate(elem_buffer): # Are there newlines in the element? # If not, add it to the buffer and wait to evaluate the line. # If yes, it's time to evaluate the line. if isinstance(elem, ReflowPoint) and ( # Is it the end of the file? # NOTE: Here, we're actually looking to see whether we're # currently on the _point before the end of the file_ rather # than actually on the final block. This is important because # the following code assumes we're on a point and not a block. # We're safe from indexing errors if we're on a point, because # we know there's always a trailing block. "end_of_file" in elem_buffer[i + 1].class_types # Or is there a newline? or has_untemplated_newline(cast(ReflowPoint, elem)) ): # In either case we want to process this, so carry on. pass else: # Otherwise build up the buffer and loop around again. line_buffer.append(elem) continue # If we don't have a buffer yet, also carry on. Nothing to lint. if not line_buffer: continue # Evaluate a line # Get the current indent. if last_indent_idx is not None: current_indent = _deduce_line_current_indent(elem_buffer, last_indent_idx) else: current_indent = "" # Get the length of all the elements on the line (other than the indent). # NOTE: This is the length in the _source_, because that's the line # length that the reader is actually looking at. char_len = _source_char_len(line_buffer) # Is the line over the limit length? line_len = len(current_indent) + char_len # NOTE: We should be able to rely on the first elements of the line having # a non-zero number of segments. If this isn't the case we may need to add # a clause to handle that scenario here. assert line_buffer[0].segments first_seg = line_buffer[0].segments[0] line_no = first_seg.pos_marker.working_line_no if line_len <= line_length_limit: reflow_logger.info( " Line #%s. Length %s <= %s. OK.", line_no, line_len, line_length_limit, ) else: reflow_logger.info( " Line #%s. Length %s > %s. PROBLEM.", line_no, line_len, line_length_limit, ) # Potential places to shorten the line are either indent locations # or segments with a defined line position (like operators). # NOTE: We make a buffer including the closing point, because we're # looking for pairs of indents and dedents. The closing dedent for one # of those pairs might be in the closing point so if we don't have it # then we'll miss any locations which have their closing dedent at # the end of the line. line_elements = line_buffer + [elem] # Type hints fixes: List[LintFix] # Identify rebreak spans first so we can work out their indentation # in the next section. # NOTE: In identifying spans, we give the method a little more than # the line, so that it can correctly identify the ends of things # accurately. It's safe to go to i+1 because there is always an # end_of_file marker at the end which we could span into. spans = identify_rebreak_spans( line_elements + [elements[i + 1]], root_segment ) reflow_logger.debug(" spans: %s", spans) rebreak_priorities = _rebreak_priorities(spans) reflow_logger.debug(" rebreak_priorities: %s", rebreak_priorities) # Identify indent points second, taking into # account rebreak_priorities. matched_indents = _match_indents( line_elements, rebreak_priorities, i, allow_implicit_indents=allow_implicit_indents, ) reflow_logger.debug(" matched_indents: %s", matched_indents) # If we don't have any matched_indents, we don't have any options. # This could be for things like comment lines. desc = f"Line is too long ({line_len} > {line_length_limit})." # Easiest option are lines ending with comments, but that aren't *all* # comments and the comment itself is shorter than the limit. # The reason for that last clause is that if the comment (plus an indent) # is already longer than the limit, then there's no point just putting it # on a new line - it will still fail - so it doesn't actually fix the issue. # Deal with them first. if ( len(line_buffer) > 1 # We can only fix _inline_ comments in this way. Others should # just be flagged as issues. and line_buffer[-1].segments[-1].is_type("inline_comment") ): reflow_logger.debug(" Handling as inline comment line.") elem_buffer, fixes = _fix_long_line_with_comment( line_buffer, elem_buffer, current_indent, line_length_limit, last_indent_idx, trailing_comments=trailing_comments, ) # Then check for cases where we have no other options. elif not matched_indents: # NOTE: In this case we have no options for shortening the line. # We'll still report a linting issue - but no fixes are provided. reflow_logger.debug(" Handling as unfixable line.") fixes = [] # Lastly deal with the "normal" case. else: # For now, the algorithm we apply isn't particularly elegant # and just finds the "outermost" opportunity to add additional # line breaks and adds them. # TODO: Make this more elegant later. The two obvious directions # would be to potentially add a) line breaks at multiple levels # in a single pass and b) to selectively skip levels if they're # "trivial", or if there would be a more suitable inner indent # to add first (e.g. the case of "(((((((a)))))))"). reflow_logger.debug(" Handling as normal line.") # NOTE: Double indents (or more likely dedents) will be # potentially in *multiple* sets - don't double count them # if we start doing something more clever. target_balance = min(matched_indents.keys()) desired_indent = current_indent if target_balance >= 1: desired_indent += single_indent target_breaks = matched_indents[target_balance] reflow_logger.debug( " Targeting balance of %s, indent: %r for %s", target_balance, desired_indent, target_breaks, ) # Is one of the locations the final element? If so remove it. # There's already a line break there. if i in target_breaks: target_breaks.remove(i) # Is it an "integer" indent or a fractional indent? # Integer indents (i.e. 1.0, 2.0, ...) are based on Indent and # Dedent tokens. Fractional indents (i.e. 1.5, 1.52, ...) are # based more on rebreak spans (e.g. around commas and operators). # The latter is simpler in that it doesn't change the indents, # just adds line breaks. The former is more complicated. # NOTE: Both of these methods mutate the `elem_buffer`. if target_balance % 1 == 0: line_results = _fix_long_line_with_integer_targets( elem_buffer, target_breaks, line_length_limit, desired_indent, current_indent, ) else: line_results = _fix_long_line_with_fractional_targets( elem_buffer, target_breaks, desired_indent ) # Consolidate all the results for the line into one. fixes = fixes_from_results(line_results) results.append( LintResult( # First segment on the line is the result anchor. first_seg, fixes=fixes, description=desc, source="reflow.long_line", ) ) # Regardless of whether the line was good or not, clear # the buffers ready for the next line. line_buffer = [] last_indent_idx = i return elem_buffer, results sqlfluff-2.3.5/src/sqlfluff/utils/reflow/respace.py000066400000000000000000000564631451700765000224310ustar00rootroot00000000000000"""Static methods to support ReflowPoint.respace_point().""" import logging from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.parser import ( BaseSegment, PositionMarker, RawSegment, WhitespaceSegment, ) from sqlfluff.core.rules.base import LintFix, LintResult from sqlfluff.utils.reflow.helpers import pretty_segment_name if TYPE_CHECKING: # pragma: no cover from sqlfluff.utils.reflow.elements import ReflowBlock # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") def _unpack_constraint(constraint: str, strip_newlines: bool) -> Tuple[str, bool]: """Unpack a spacing constraint. Used as a helper function in `determine_constraints`. """ # Check for deprecated options. if constraint == "inline": # pragma: no cover reflow_logger.warning( "Found 'inline' specified as a 'spacing_within' constraint. " "This setting is deprecated and has been replaced by the more " "explicit 'touch:inline'. Upgrade your configuration to " "remove this warning." ) constraint = "touch:inline" # Unless align, split. if constraint.startswith("align"): modifier = "" else: constraint, _, modifier = constraint.partition(":") if not modifier: pass elif modifier == "inline": strip_newlines = True else: # pragma: no cover raise SQLFluffUserError(f"Unexpected constraint modifier: {constraint!r}") return constraint, strip_newlines def determine_constraints( prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], strip_newlines: bool = False, ) -> Tuple[str, str, bool]: """Given the surrounding blocks, determine appropriate constraints.""" # Start with the defaults. pre_constraint, strip_newlines = _unpack_constraint( prev_block.spacing_after if prev_block else "single", strip_newlines ) post_constraint, strip_newlines = _unpack_constraint( next_block.spacing_before if next_block else "single", strip_newlines ) # Work out the common parent segment and depth within_spacing = "" if prev_block and next_block: common = prev_block.depth_info.common_with(next_block.depth_info) # Just check the most immediate parent for now for speed. # TODO: Review whether just checking the parent is enough. # NOTE: spacing configs will be available on both sides if they're common # so it doesn't matter whether we get it from prev_block or next_block. idx = prev_block.depth_info.stack_hashes.index(common[-1]) within_constraint = prev_block.stack_spacing_configs.get(common[-1], None) if within_constraint: within_spacing, strip_newlines = _unpack_constraint( within_constraint, strip_newlines ) # If segments are expected to be touch within. Then modify # constraints accordingly. if within_spacing == "touch": # NOTE: We don't override if it's already "any" if pre_constraint != "any": pre_constraint = "touch" if post_constraint != "any": post_constraint = "touch" elif within_spacing == "any": pre_constraint = "any" post_constraint = "any" elif within_spacing == "single": pass elif within_spacing: # pragma: no cover assert prev_block raise SQLFluffUserError( f"Unexpected within constraint: {within_constraint!r} for " f"{prev_block.depth_info.stack_class_types[idx]}" ) return pre_constraint, post_constraint, strip_newlines def process_spacing( segment_buffer: List[RawSegment], strip_newlines: bool = False ) -> Tuple[List[RawSegment], Optional[RawSegment], List[LintResult]]: """Given the existing spacing, extract information and do basic pruning.""" removal_buffer: List[RawSegment] = [] result_buffer: List[LintResult] = [] last_whitespace: List[RawSegment] = [] # Loop through the existing segments looking for spacing. for seg in segment_buffer: # If it's whitespace, store it. if seg.is_type("whitespace"): last_whitespace.append(seg) # If it's a newline, react accordingly. # NOTE: This should only trigger on literal newlines. elif seg.is_type("newline", "end_of_file"): if seg.pos_marker and not seg.pos_marker.is_literal(): last_whitespace = [] reflow_logger.debug(" Skipping templated newline: %s", seg) continue # Are we stripping newlines? if strip_newlines and seg.is_type("newline"): reflow_logger.debug(" Stripping newline: %s", seg) removal_buffer.append(seg) result_buffer.append( LintResult( seg, [LintFix.delete(seg)], description="Unexpected line break." ) ) # Carry on as though it wasn't here. continue # Check if we've just passed whitespace. If we have, remove it # as trailing whitespace, both from the buffer and create a fix. if last_whitespace: reflow_logger.debug(" Removing trailing whitespace.") for ws in last_whitespace: removal_buffer.append(ws) result_buffer.append( LintResult( ws, [LintFix.delete(ws)], description="Unnecessary trailing whitespace.", ) ) # Regardless, unset last_whitespace. # We either just deleted it, or it's not relevant for any future # segments. last_whitespace = [] if len(last_whitespace) >= 2: reflow_logger.debug(" Removing adjoining whitespace.") # If we find multiple sequential whitespaces, it's the sign # that we've removed something. Only the first one should be # a valid indent (or the one we consider for constraints). # Remove all the following ones. for ws in last_whitespace[1:]: removal_buffer.append(ws) result_buffer.append( LintResult( seg, [LintFix.delete(seg)], description="Removing duplicate whitespace.", ) ) # Turn the removal buffer updated segment buffer, last whitespace # and associated fixes. return ( [s for s in segment_buffer if s not in removal_buffer], # We should have removed all other whitespace by now. last_whitespace[0] if last_whitespace else None, result_buffer, ) def _determine_aligned_inline_spacing( root_segment: BaseSegment, whitespace_seg: RawSegment, next_seg: RawSegment, next_pos: PositionMarker, segment_type: str, align_within: Optional[str], align_scope: Optional[str], ) -> str: """Work out spacing for instance of an `align` constraint.""" # Find the level of segment that we're aligning. # NOTE: Reverse slice parent_segment = None # Edge case: if next_seg has no position, we should use the position # of the whitespace for searching. if align_within: for ps in root_segment.path_to( next_seg if next_seg.pos_marker else whitespace_seg )[::-1]: if ps.segment.is_type(align_within): parent_segment = ps.segment if align_scope and ps.segment.is_type(align_scope): break if not parent_segment: reflow_logger.debug(" No Parent found for alignment case. Treat as single.") return " " # We've got a parent. Find some siblings. reflow_logger.debug(" Determining alignment within: %s", parent_segment) siblings = [] for sibling in parent_segment.recursive_crawl(segment_type): # Purge any siblings with a boundary between them if not align_scope or not any( ps.segment.is_type(align_scope) for ps in parent_segment.path_to(sibling) ): siblings.append(sibling) else: reflow_logger.debug( " Purging a sibling because they're blocked " "by a boundary: %s", sibling, ) # If the segment we're aligning, has position. Use that position. # If it doesn't, then use the provided one. We can't do sibling analysis without it. if next_seg.pos_marker: next_pos = next_seg.pos_marker # Purge any siblings which are either self, or on the same line but after it. _earliest_siblings: Dict[int, int] = {} for sibling in siblings[:]: _pos = sibling.pos_marker assert _pos _best_seen = _earliest_siblings.get(_pos.working_line_no, None) # If we've already seen an earlier sibling on this line, ignore the later one. if _best_seen is not None and _pos.working_line_pos > _best_seen: siblings.remove(sibling) continue # Update best seen _earliest_siblings[_pos.working_line_no] = _pos.working_line_pos # We should also purge the sibling which matches the target. if _pos.working_line_no == next_pos.working_line_no: # Is it in the same position? if _pos.working_line_pos != next_pos.working_line_pos: siblings.remove(sibling) # If there's only one sibling, we have nothing to compare to. Default to a single # space. if len(siblings) <= 1: desired_space = " " reflow_logger.debug( " desired_space: %r (based on no other siblings)", desired_space, ) return desired_space # Work out the current spacing before each. last_code = None max_desired_line_pos = 0 for seg in parent_segment.raw_segments: for sibling in siblings: # NOTE: We're asserting that there must have been # a last_code. Otherwise this won't work. if ( seg.pos_marker and sibling.pos_marker and seg.pos_marker.working_loc == sibling.pos_marker.working_loc and last_code ): loc = last_code.pos_marker.working_loc_after(last_code.raw) reflow_logger.debug( " loc for %s: %s from %s", sibling, loc, last_code, ) if loc[1] > max_desired_line_pos: max_desired_line_pos = loc[1] if seg.is_code: last_code = seg desired_space = " " * ( 1 + max_desired_line_pos - whitespace_seg.pos_marker.working_line_pos ) reflow_logger.debug( " desired_space: %r (based on max line pos of %s)", desired_space, max_desired_line_pos, ) return desired_space def _extract_alignment_config( constraint: str, ) -> Tuple[str, Optional[str], Optional[str]]: """Helper function to break apart an alignment config. >>> _extract_alignment_config("align:alias_expression") ('alias_expression', None, None) >>> _extract_alignment_config("align:alias_expression:statement") ('alias_expression', 'statement', None) >>> _extract_alignment_config("align:alias_expression:statement:bracketed") ('alias_expression', 'statement', 'bracketed') """ assert ":" in constraint alignment_config = constraint.split(":") assert alignment_config[0] == "align" seg_type = alignment_config[1] align_within = alignment_config[2] if len(alignment_config) > 2 else None align_scope = alignment_config[3] if len(alignment_config) > 3 else None reflow_logger.debug( " Alignment Config: %s, %s, %s", seg_type, align_within, align_scope, ) return seg_type, align_within, align_scope def handle_respace__inline_with_space( pre_constraint: str, post_constraint: str, prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], root_segment: BaseSegment, segment_buffer: List[RawSegment], last_whitespace: RawSegment, ) -> Tuple[List[RawSegment], List[LintResult]]: """Check inline spacing is the right size. This forms one of the cases handled by .respace_point(). This code assumes: - a ReflowPoint with no newlines. - a ReflowPoint which has _some_ whitespace. Given this we apply constraints to ensure the whitespace is of an appropriate size. """ # Get some indices so that we can reference around them ws_idx = segment_buffer.index(last_whitespace) # Do we have either side set to "any" if "any" in [pre_constraint, post_constraint]: # In this instance - don't change anything. # e.g. this could mean there is a comment on one side. return segment_buffer, [] # Do we have either side set to "touch"? if "touch" in [pre_constraint, post_constraint]: # In this instance - no whitespace is correct, This # means we should delete it. segment_buffer.pop(ws_idx) if next_block: description = ( "Unexpected whitespace before " f"{pretty_segment_name(next_block.segments[0])}." ) else: # pragma: no cover # This clause has no test coverage because next_block is # normally provided. description = "Unexpected whitespace" return segment_buffer, [ LintResult( last_whitespace, [LintFix.delete(last_whitespace)], # Should make description from constraints. description=description, ), ] # Handle left alignment & singles if ( post_constraint.startswith("align") and next_block ) or pre_constraint == post_constraint == "single": # Determine the desired spacing, either as alignment or as a single. if post_constraint.startswith("align") and next_block: seg_type, align_within, align_scope = _extract_alignment_config( post_constraint ) next_pos: Optional[PositionMarker] if next_block.segments[0].pos_marker: next_pos = next_block.segments[0].pos_marker elif last_whitespace.pos_marker: next_pos = last_whitespace.pos_marker.end_point_marker() # These second clauses are much less likely and so are excluded from # coverage. If we find a way of covering them, that would be great # but for now they exist as backups. elif prev_block and prev_block.segments[-1].pos_marker: # pragma: no cover next_pos = prev_block.segments[-1].pos_marker.end_point_marker() else: # pragma: no cover reflow_logger.info("Unable to find position marker for alignment.") next_pos = None desired_space = " " desc = ( "Expected only single space. " "Found " f"{last_whitespace.raw!r}." ) if next_pos: desired_space = _determine_aligned_inline_spacing( root_segment, last_whitespace, next_block.segments[0], next_pos, seg_type, align_within, align_scope, ) desc = ( f"{seg_type!r} elements are expected to be aligned. Found " "incorrect whitespace before " f"{pretty_segment_name(next_block.segments[0])}: " f"{last_whitespace.raw!r}." ) else: if next_block: desc = ( "Expected only single space before " f"{pretty_segment_name(next_block.segments[0])}. Found " f"{last_whitespace.raw!r}." ) else: # pragma: no cover # This clause isn't has no test coverage because next_block is # normally provided. desc = "Expected only single space. Found " f"{last_whitespace.raw!r}." desired_space = " " new_results: List[LintResult] = [] if last_whitespace.raw != desired_space: new_seg = last_whitespace.edit(desired_space) new_results.append( LintResult( last_whitespace, [ LintFix( "replace", anchor=last_whitespace, edit=[new_seg], ) ], description=desc, ) ) segment_buffer[ws_idx] = new_seg return segment_buffer, new_results raise NotImplementedError( # pragma: no cover f"Unexpected Constraints: {pre_constraint}, {post_constraint}" ) def handle_respace__inline_without_space( pre_constraint: str, post_constraint: str, prev_block: Optional["ReflowBlock"], next_block: Optional["ReflowBlock"], segment_buffer: List[RawSegment], existing_results: List[LintResult], anchor_on: str = "before", ) -> Tuple[List[RawSegment], List[LintResult], bool]: """Ensure spacing is the right size. This forms one of the cases handled by .respace_point(). This code assumes: - a ReflowPoint with no newlines. - a ReflowPoint which _no_ whitespace. Given this we apply constraints to either confirm no spacing is required or create some of the right size. """ # Do we have either side set to "touch" or "any" if {"touch", "any"}.intersection([pre_constraint, post_constraint]): # In this instance - no whitespace is correct. # Either because there shouldn't be, or because "any" # means we shouldn't check. return segment_buffer, existing_results, False # Are we supposed to be aligning? elif post_constraint.startswith("align"): reflow_logger.debug(" Inserting Aligned Whitespace.") # TODO: We currently rely on a second pass to align # insertions. This is where we could devise alignment # in advance, but most of the alignment code relies on # having existing position markers for those insertions. # https://github.com/sqlfluff/sqlfluff/issues/4492 desired_space = " " added_whitespace = WhitespaceSegment(desired_space) # Is it anything other than the default case? elif not (pre_constraint == post_constraint == "single"): # pragma: no cover # TODO: This will get test coverage when configuration routines # are in properly. raise NotImplementedError( f"Unexpected Constraints: {pre_constraint}, {post_constraint}" ) else: # Default to a single whitespace reflow_logger.debug(" Inserting Single Whitespace.") added_whitespace = WhitespaceSegment() # Add it to the buffer first (the easy bit). The hard bit # is to then determine how to generate the appropriate LintFix # objects. segment_buffer.append(added_whitespace) # So special handling here. If segments either side # already exist then we don't care which we anchor on # but if one is already an insertion (as shown by a lack) # of pos_marker, then we should piggy back on that pre-existing # fix. existing_fix = None insertion = None if prev_block and not prev_block.segments[-1].pos_marker: existing_fix = "after" insertion = prev_block.segments[-1] elif next_block and not next_block.segments[0].pos_marker: existing_fix = "before" insertion = next_block.segments[0] if existing_fix: reflow_logger.debug(" Detected existing fix %s", existing_fix) if not existing_results: # pragma: no cover raise ValueError( "Fixes detected, but none passed to .respace(). " "This will cause conflicts." ) # Find the fix assert insertion for res in existing_results: # Does it contain the insertion? # TODO: This feels ugly - eq for BaseSegment is different # to uuid matching for RawSegment. Perhaps this should be # more aligned. There might be a better way of doing this. for fix in res.fixes or []: if fix.edit and insertion.uuid in [elem.uuid for elem in fix.edit]: break else: # pragma: no cover continue break else: # pragma: no cover reflow_logger.warning("Results %s", existing_results) raise ValueError(f"Couldn't find insertion for {insertion}") # Mutate the existing fix assert res assert fix assert fix in res.fixes assert fix.edit # It's going to be an edit if we've picked it up. # Mutate the fix, it's still in the same result, and that result # is still in the existing_results. if existing_fix == "before": fix.edit = [cast(BaseSegment, added_whitespace)] + fix.edit elif existing_fix == "after": fix.edit = fix.edit + [cast(BaseSegment, added_whitespace)] # No need to add new results, because we mutated the existing. return segment_buffer, existing_results, True # Otherwise... reflow_logger.debug(" Not Detected existing fix. Creating new") if prev_block and next_block: desc = ( "Expected single whitespace between " f"{pretty_segment_name(prev_block.segments[-1])} " f"and {pretty_segment_name(next_block.segments[0])}." ) else: # pragma: no cover # Something to fall back on if prev_block and next_block not provided. desc = "Expected single whitespace." # Take into account hint on where to anchor if given. if prev_block and anchor_on != "after": new_result = LintResult( # We do this shuffle, because for the CLI it's clearer if the # anchor for the error is at the point that the insertion will # happen which is the *start* of the next segment, even if # we're anchoring the fix on the previous. next_block.segments[0] if next_block else prev_block.segments[-1], fixes=[ LintFix( "create_after", anchor=prev_block.segments[-1], edit=[WhitespaceSegment()], ) ], description=desc, ) elif next_block: new_result = LintResult( next_block.segments[0], fixes=[ LintFix( "create_before", anchor=next_block.segments[0], edit=[WhitespaceSegment()], ) ], description=desc, ) else: # pragma: no cover NotImplementedError("Not set up to handle a missing _after_ and _before_.") return segment_buffer, existing_results + [new_result], True sqlfluff-2.3.5/src/sqlfluff/utils/reflow/sequence.py000066400000000000000000000615641451700765000226150ustar00rootroot00000000000000"""Dataclasses for reflow work.""" import logging from itertools import chain from typing import Iterator, List, Optional, Sequence, Tuple, Type, cast from sqlfluff.core.config import FluffConfig from sqlfluff.core.parser import BaseSegment, RawSegment from sqlfluff.core.rules.base import LintFix, LintResult from sqlfluff.utils.reflow.config import ReflowConfig from sqlfluff.utils.reflow.depthmap import DepthMap from sqlfluff.utils.reflow.elements import ( ReflowBlock, ReflowPoint, ReflowSequenceType, get_consumed_whitespace, ) from sqlfluff.utils.reflow.helpers import fixes_from_results from sqlfluff.utils.reflow.rebreak import rebreak_sequence from sqlfluff.utils.reflow.reindent import ( construct_single_indent, lint_indent_points, lint_line_length, ) # We're in the utils module, but users will expect reflow # logs to appear in the context of rules. Hence it's a subset # of the rules logger. reflow_logger = logging.getLogger("sqlfluff.rules.reflow") class ReflowSequence: """Class for keeping track of elements in a reflow operation. This acts as the primary route into using the reflow routines. It acts in a way that plays nicely within a rule context in that it accepts segments and configuration, while allowing access to modified segments and a series of :obj:`LintFix` objects, which can be returned by the calling rule. Sequences are made up of alternating :obj:`ReflowBlock` and :obj:`ReflowPoint` objects (even if some points have no segments). This is validated on construction. Most operations also return :obj:`ReflowSequence` objects such that operations can be chained, and then the resultant fixes accessed at the last stage, for example: .. code-block:: py3 fixes = ( ReflowSequence.from_around_target( context.segment, root_segment=context.parent_stack[0], config=context.config, ) .rebreak() .get_fixes() ) """ def __init__( self, elements: ReflowSequenceType, root_segment: BaseSegment, reflow_config: ReflowConfig, depth_map: DepthMap, lint_results: Optional[List[LintResult]] = None, ): # First validate integrity self._validate_reflow_sequence(elements) # Then save self.elements = elements self.root_segment = root_segment self.reflow_config = reflow_config self.depth_map = depth_map # This keeps track of fixes generated in the chaining process. # Alternatively pictured: This is the list of fixes required # to generate this sequence. We can build on this as we edit # the sequence. # Rather than saving *fixes* directly, we package them into # LintResult objects to make it a little easier to expose them # in the CLI. self.lint_results: List[LintResult] = lint_results or [] def get_fixes(self) -> List[LintFix]: """Get the current fix buffer. We're hydrating them here directly from the LintResult objects, so for more accurate results, consider using .get_results(). This method is particularly useful when consolidating multiple results into one. """ return fixes_from_results(self.lint_results) def get_results(self) -> List[LintResult]: """Return the current result buffer.""" return self.lint_results def get_raw(self) -> str: """Get the current raw representation.""" return "".join(elem.raw for elem in self.elements) @staticmethod def _validate_reflow_sequence(elements: ReflowSequenceType) -> None: # An empty set of elements _is_ allowed as an edge case. if not elements: # Return early if so return None # Check odds and evens OddType = elements[0].__class__ EvenType = ReflowPoint if OddType is ReflowBlock else ReflowBlock try: # Check odds are all points assert all( isinstance(elem, OddType) for elem in elements[::2] ), f"Not all odd elements are {OddType.__name__}" # Check evens are all blocks assert all( isinstance(elem, EvenType) for elem in elements[1::2] ), f"Not all even elements are {EvenType.__name__}" return None except AssertionError as err: # pragma: no cover for elem in elements: reflow_logger.error(" - %s", elem) reflow_logger.exception("Assertion check on ReflowSequence failed.") raise err @staticmethod def _elements_from_raw_segments( segments: Sequence[RawSegment], reflow_config: ReflowConfig, depth_map: DepthMap ) -> ReflowSequenceType: """Construct reflow elements from raw segments. NOTE: ReflowBlock elements should only ever have one segment which simplifies iteration here. """ elem_buff: ReflowSequenceType = [] seg_buff: List[RawSegment] = [] for seg in segments: # NOTE: end_of_file is block-like rather than point-like. # This is to facilitate better evaluation of the ends of files. # NOTE: This also allows us to include literal placeholders for # whitespace only strings. if ( seg.is_type("whitespace", "newline", "indent") or (get_consumed_whitespace(seg) or "").isspace() ): # Add to the buffer and move on. seg_buff.append(seg) continue elif elem_buff or seg_buff: # There are elements. The last will have been a block. # Add a point before we add the block. NOTE: It may be empty. elem_buff.append(ReflowPoint(segments=tuple(seg_buff))) # Add the block, with config info. elem_buff.append( ReflowBlock.from_config( segments=[seg], config=reflow_config, depth_info=depth_map.get_depth_info(seg), ) ) # Empty the buffer seg_buff = [] # If we ended with a buffer, apply it. # TODO: Consider removing this clause? if seg_buff: # pragma: no cover elem_buff.append(ReflowPoint(segments=tuple(seg_buff))) return elem_buff @classmethod def from_raw_segments( cls: Type["ReflowSequence"], segments: Sequence[RawSegment], root_segment: BaseSegment, config: FluffConfig, depth_map: Optional[DepthMap] = None, ) -> "ReflowSequence": """Construct a ReflowSequence from a sequence of raw segments. This is intended as a base constructor, which others can use. In particular, if no `depth_map` argument is provided, this method will generate one in a potentially inefficient way. If the calling method has access to a better way of inferring a depth map (for example because it has access to a common root segment for all the content), it should do that instead and pass it in. """ reflow_config = ReflowConfig.from_fluff_config(config) if depth_map is None: depth_map = DepthMap.from_raws_and_root(segments, root_segment) return cls( elements=cls._elements_from_raw_segments( segments, reflow_config=reflow_config, # NOTE: This pathway is inefficient. Ideally the depth # map should be constructed elsewhere and then passed in. depth_map=depth_map, ), root_segment=root_segment, reflow_config=reflow_config, depth_map=depth_map, ) @classmethod def from_root( cls: Type["ReflowSequence"], root_segment: BaseSegment, config: FluffConfig ) -> "ReflowSequence": """Generate a sequence from a root segment. Args: root_segment (:obj:`BaseSegment`): The relevant root segment (usually the base :obj:`FileSegment`). config (:obj:`FluffConfig`): A config object from which to load the spacing behaviours of different segments. """ return cls.from_raw_segments( root_segment.raw_segments, root_segment, config=config, # This is the efficient route. We use it here because we can. depth_map=DepthMap.from_parent(root_segment), ) @classmethod def from_around_target( cls: Type["ReflowSequence"], target_segment: BaseSegment, root_segment: BaseSegment, config: FluffConfig, sides: str = "both", ) -> "ReflowSequence": """Generate a sequence around a target. Args: target_segment (:obj:`RawSegment`): The segment to center around when considering the sequence to construct. root_segment (:obj:`BaseSegment`): The relevant root segment (usually the base :obj:`FileSegment`). config (:obj:`FluffConfig`): A config object from which to load the spacing behaviours of different segments. sides (:obj:`str`): Limit the reflow sequence to just one side of the target. Default is two sided ("both"), but set to "before" or "after" to limit to either side. **NOTE**: We don't just expand to the first block around the target but to the first *code* element, which means we may swallow several `comment` blocks in the process. To evaluate reflow around a specific target, we need need to generate a sequence which goes for the preceding raw to the following raw. i.e. at least: block - point - block - point - block (where the central block is the target). """ # There's probably a more efficient way than immediately # materialising the raw_segments for the whole root, but # it works. Optimise later. all_raws = root_segment.raw_segments target_raws = target_segment.raw_segments assert target_raws pre_idx = all_raws.index(target_raws[0]) post_idx = all_raws.index(target_raws[-1]) + 1 initial_idx = (pre_idx, post_idx) if sides in ("both", "before"): # Catch at least the previous segment pre_idx -= 1 for pre_idx in range(pre_idx, -1, -1): if all_raws[pre_idx].is_code: break if sides in ("both", "after"): for post_idx in range(post_idx, len(all_raws)): if all_raws[post_idx].is_code: break # Capture one more after the whitespace. post_idx += 1 segments = all_raws[pre_idx:post_idx] reflow_logger.debug( "Generating ReflowSequence.from_around_target(). idx: %s. " "slice: %s:%s. raw: %r", initial_idx, pre_idx, post_idx, "".join(seg.raw for seg in segments), ) return cls.from_raw_segments(segments, root_segment, config=config) def _find_element_idx_with(self, target: RawSegment) -> int: for idx, elem in enumerate(self.elements): if target in elem.segments: return idx raise ValueError( # pragma: no cover f"Target [{target}] not found in ReflowSequence." ) def without(self, target: RawSegment) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` without the specified segment. This generates appropriate deletion :obj:`LintFix` objects to direct the linter to remove those elements. """ removal_idx = self._find_element_idx_with(target) if removal_idx == 0 or removal_idx == len(self.elements) - 1: raise NotImplementedError( # pragma: no cover "Unexpected removal at one end of a ReflowSequence." ) if isinstance(self.elements[removal_idx], ReflowPoint): raise NotImplementedError( # pragma: no cover "Not expected removal of whitespace in ReflowSequence." ) merged_point = ReflowPoint( segments=self.elements[removal_idx - 1].segments + self.elements[removal_idx + 1].segments, ) return ReflowSequence( elements=self.elements[: removal_idx - 1] + [merged_point] + self.elements[removal_idx + 2 :], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[LintResult(target, [LintFix.delete(target)])], ) def insert( self, insertion: RawSegment, target: RawSegment, pos: str = "before" ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` with the new element inserted. Insertion is always relative to an existing element. Either before or after it as specified by `pos`. This generates appropriate creation :obj:`LintFix` objects to direct the linter to insert those elements. """ assert pos in ("before", "after") target_idx = self._find_element_idx_with(target) # Are we trying to insert something whitespace-like? if insertion.is_type("whitespace", "indent", "newline"): # pragma: no cover raise ValueError( "ReflowSequence.insert() does not support direct insertion of " "spacing elements such as whitespace or newlines" ) # We're inserting something blocky. That means a new block AND a new point. # It's possible we try to _split_ a point by targeting a whitespace element # inside a larger point. For now this isn't supported. # NOTE: We use the depth info of the reference anchor, with the assumption # (I think reliable) that the insertion will be applied as a sibling of # the target. self.depth_map.copy_depth_info(target, insertion) new_block = ReflowBlock.from_config( segments=[insertion], config=self.reflow_config, depth_info=self.depth_map.get_depth_info(target), ) if isinstance(self.elements[target_idx], ReflowPoint): raise NotImplementedError( # pragma: no cover "Can't insert relative to whitespace for now." ) elif pos == "before": return ReflowSequence( elements=self.elements[:target_idx] + [new_block, ReflowPoint(())] + self.elements[target_idx:], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[ LintResult(target, [LintFix.create_before(target, [insertion])]) ], ) elif pos == "after": # pragma: no cover # TODO: This doesn't get coverage - should it even exist? # Re-evaluate whether this code path is ever taken once more rules use # this. return ReflowSequence( elements=self.elements[: target_idx + 1] + [ReflowPoint(()), new_block] + self.elements[target_idx + 1 :], root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, # Generate the fix to do the removal. lint_results=[ LintResult(target, [LintFix.create_after(target, [insertion])]) ], ) raise ValueError( f"Unexpected value for ReflowSequence.insert(pos): {pos}" ) # pragma: no cover def replace( self, target: BaseSegment, edit: Sequence[BaseSegment] ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` with `edit` elements replaced. This generates appropriate replacement :obj:`LintFix` objects to direct the linter to modify those elements. """ target_raws = target.raw_segments assert target_raws edit_raws = list(chain.from_iterable(seg.raw_segments for seg in edit)) # Add the new segments to the depth map at the same level as the target. # First work out how much to trim by. trim_amount = len(target.path_to(target_raws[0])) reflow_logger.debug( "Replacement trim amount: %s.", trim_amount, ) for edit_raw in edit_raws: # NOTE: if target raws has more than one segment we take the depth info # of the first one. We trim to avoid including the implications of removed # "container" segments. self.depth_map.copy_depth_info(target_raws[0], edit_raw, trim=trim_amount) # It's much easier to just totally reconstruct the sequence rather # than do surgery on the elements. # TODO: The surgery is actually a good idea for long sequences now that # we have the depth map. current_raws = list( chain.from_iterable(elem.segments for elem in self.elements) ) start_idx = current_raws.index(target_raws[0]) last_idx = current_raws.index(target_raws[-1]) return ReflowSequence( self._elements_from_raw_segments( current_raws[:start_idx] + edit_raws + current_raws[last_idx + 1 :], reflow_config=self.reflow_config, # NOTE: the depth map has been mutated to include the new segments. depth_map=self.depth_map, ), root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=[LintResult(target, [LintFix.replace(target, edit)])], ) def _iter_points_with_constraints( self, ) -> Iterator[Tuple[ReflowPoint, Optional[ReflowBlock], Optional[ReflowBlock]]]: for idx, elem in enumerate(self.elements): # Only evaluate points. if isinstance(elem, ReflowPoint): pre = None post = None if idx > 0: pre = cast(ReflowBlock, self.elements[idx - 1]) if idx < len(self.elements) - 1: post = cast(ReflowBlock, self.elements[idx + 1]) yield elem, pre, post def respace( self, strip_newlines: bool = False, filter: str = "all" ) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` with points respaced. Args: strip_newlines (:obj:`bool`): Optionally strip newlines before respacing. This is primarily used on focused sequences to coerce objects onto a single line. This does not apply any prioritisation to which line breaks to remove and so is not a substitute for the full `reindent` or `reflow` methods. filter (:obj:`str`): Optionally filter which reflow points to respace. Default configuration is `all`. Other options are `line_break` which only respaces points containing a `newline` or followed by an `end_of_file` marker, or `inline` which is the inverse of `line_break`. This is most useful for filtering between trailing whitespace and fixes between content on a line. **NOTE** this method relies on the embodied results being correct so that we can build on them. """ assert filter in ( "all", "newline", "inline", ), f"Unexpected value for filter: {filter}" # Use the embodied fixes as a starting point. lint_results = self.get_results() new_elements: ReflowSequenceType = [] for point, pre, post in self._iter_points_with_constraints(): # We filter on the elements POST RESPACE. This is to allow # strict respacing to reclaim newlines. new_lint_results, new_point = point.respace_point( prev_block=pre, next_block=post, root_segment=self.root_segment, lint_results=lint_results, strip_newlines=strip_newlines, ) # If filter has been set, optionally unset the returned values. if ( filter == "inline" if ( # NOTE: We test on the NEW point. any(seg.is_type("newline") for seg in new_point.segments) # Or if it's followed by the end of file or (post and "end_of_file" in post.class_types) ) else filter == "newline" ): # Reset the values reflow_logger.debug( " Filter %r applied. Resetting %s", filter, point ) new_point = point # Otherwise apply the new fixes else: lint_results = new_lint_results if pre and (not new_elements or new_elements[-1] != pre): new_elements.append(pre) new_elements.append(new_point) if post: new_elements.append(post) return ReflowSequence( elements=new_elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=lint_results, ) def rebreak(self) -> "ReflowSequence": """Returns a new :obj:`ReflowSequence` corrected line breaks. This intentionally **does not handle indentation**, as the existing indents are assumed to be correct. .. note:: Currently this only *moves* existing segments around line breaks (e.g. for operators and commas), but eventually this method will also handle line length considerations too. """ if self.lint_results: raise NotImplementedError( # pragma: no cover "rebreak cannot currently handle pre-existing embodied fixes." ) # Delegate to the rebreak algorithm elem_buff, lint_results = rebreak_sequence(self.elements, self.root_segment) return ReflowSequence( elements=elem_buff, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=lint_results, ) def reindent(self) -> "ReflowSequence": """Reindent lines within a sequence.""" if self.lint_results: raise NotImplementedError( # pragma: no cover "rebreak cannot currently handle pre-existing embodied fixes." ) single_indent = construct_single_indent( indent_unit=self.reflow_config.indent_unit, tab_space_size=self.reflow_config.tab_space_size, ) reflow_logger.info("# Evaluating indents.") elements, indent_results = lint_indent_points( self.elements, single_indent=single_indent, skip_indentation_in=self.reflow_config.skip_indentation_in, allow_implicit_indents=self.reflow_config.allow_implicit_indents, ) return ReflowSequence( elements=elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=indent_results, ) def break_long_lines(self): """Rebreak any remaining long lines in a sequence. This assumes that reindent() has already been applied. """ if self.lint_results: raise NotImplementedError( # pragma: no cover "break_long_lines cannot currently handle pre-existing " "embodied fixes." ) single_indent = construct_single_indent( indent_unit=self.reflow_config.indent_unit, tab_space_size=self.reflow_config.tab_space_size, ) reflow_logger.info("# Evaluating line lengths.") elements, length_results = lint_line_length( self.elements, self.root_segment, single_indent=single_indent, line_length_limit=self.reflow_config.max_line_length, allow_implicit_indents=self.reflow_config.allow_implicit_indents, trailing_comments=self.reflow_config.trailing_comments, ) return ReflowSequence( elements=elements, root_segment=self.root_segment, reflow_config=self.reflow_config, depth_map=self.depth_map, lint_results=length_results, ) sqlfluff-2.3.5/src/sqlfluff/utils/testing/000077500000000000000000000000001451700765000205765ustar00rootroot00000000000000sqlfluff-2.3.5/src/sqlfluff/utils/testing/__init__.py000066400000000000000000000000741451700765000227100ustar00rootroot00000000000000"""Testing utils we want to expose for usage by plugins.""" sqlfluff-2.3.5/src/sqlfluff/utils/testing/cli.py000066400000000000000000000016451451700765000217250ustar00rootroot00000000000000"""Testing utils for working with the CLIs.""" from typing import Any, Dict, List, Optional from click.testing import CliRunner, Result def invoke_assert_code( ret_code: int = 0, args: Optional[List[Any]] = None, kwargs: Optional[Dict[str, Any]] = None, cli_input: Optional[str] = None, mix_stderr: bool = True, output_contains: str = "", ) -> Result: """Invoke a command and check return code.""" args = args or [] kwargs = kwargs or {} if cli_input: kwargs["input"] = cli_input runner = CliRunner(mix_stderr=mix_stderr) result = runner.invoke(*args, **kwargs) # Output the CLI code for debugging print(result.output) # Check return codes if output_contains != "": assert output_contains in result.output if ret_code == 0: if result.exception: raise result.exception assert ret_code == result.exit_code return result sqlfluff-2.3.5/src/sqlfluff/utils/testing/logging.py000066400000000000000000000035651451700765000226070ustar00rootroot00000000000000"""This is a modified log capture mechanism which reliably works. So that logs are handled appropriately by the CLI, sqlfluff modifies the root logger in a way that can conflict with pytest. See: https://github.com/pytest-dev/pytest/issues/3697 This fixture returns a context manager to handle them better and enable testing of logs while working around the restrictions of setting the `propagate` attribute of the logger in each test. Code adapted from: https://github.com/pytest-dev/pytest/issues/3697#issuecomment-792129636 """ import logging from contextlib import contextmanager from typing import Iterator from _pytest.logging import LogCaptureHandler, _remove_ansi_escape_sequences class FluffLogHandler(LogCaptureHandler): """A modified LogCaptureHandler which also exposes some helper functions. The aim is to mimic some of the methods available on caplog. See: https://docs.pytest.org/en/7.1.x/_modules/_pytest/logging.html """ @property def text(self) -> str: """The formatted log text.""" return _remove_ansi_escape_sequences(self.stream.getvalue()) @contextmanager def fluff_log_catcher(level: int, logger_name: str) -> Iterator[FluffLogHandler]: """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the level is restored to its original value. Args: level (int): The lowest logging level to capture. logger_name (str): The name of the logger to capture. """ assert logger_name.startswith( "sqlfluff" ), "This should only be used with a SQLFluff logger." logger = logging.getLogger(logger_name) handler = FluffLogHandler() orig_level = logger.level logger.setLevel(level) logger.addHandler(handler) try: yield handler finally: logger.setLevel(orig_level) logger.removeHandler(handler) sqlfluff-2.3.5/src/sqlfluff/utils/testing/rules.py000066400000000000000000000211121451700765000222770ustar00rootroot00000000000000"""Testing utils for rule plugins.""" from glob import glob from typing import List, NamedTuple, Optional, Set, Tuple import pytest import yaml from sqlfluff.core import Linter from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLParseError, SQLTemplaterError from sqlfluff.core.rules import BaseRule, get_ruleset class RuleTestCase(NamedTuple): """Used like a dataclass by rule tests.""" rule: Optional[str] = None desc: Optional[str] = None pass_str: Optional[str] = None fail_str: Optional[str] = None violations: Optional[Set[dict]] = None fix_str: Optional[str] = None violations_after_fix: Optional[Set[dict]] = None configs: Optional[dict] = None skip: Optional[str] = None line_numbers: List[int] = [] def load_test_cases( test_cases_path: str, ) -> Tuple[List[str], List[RuleTestCase]]: """Load rule test cases from YAML files.""" ids = [] test_cases = [] for path in sorted(glob(test_cases_path)): with open(path) as f: raw = f.read() y = yaml.safe_load(raw) rule = y.pop("rule") global_config = y.pop("configs", None) if global_config: for i in y: if "configs" not in y[i].keys(): y[i].update({"configs": global_config}) ids.extend([rule + "_" + t for t in y]) test_cases.extend([RuleTestCase(rule=rule, **v) for k, v in y.items()]) return ids, test_cases def get_rule_from_set(code, config) -> BaseRule: """Fetch a rule from the rule set.""" for r in get_ruleset().get_rulepack(config=config).rules: if r.code == code: # pragma: no cover return r raise ValueError(f"{code!r} not in {get_ruleset()!r}") def assert_rule_fail_in_sql(code, sql, configs=None, line_numbers=None): """Assert that a given rule does fail on the given sql.""" print("# Asserting Rule Fail in SQL") # Set up the config to only use the rule we are testing. overrides = {"rules": code} if configs is None or "core" not in configs or "dialect" not in configs["core"]: overrides["dialect"] = "ansi" cfg = FluffConfig(configs=configs, overrides=overrides) # Lint it using the current config (while in fix mode) linted = Linter(config=cfg).lint_string(sql, fix=True) lerrs = linted.get_violations() print("Errors Found:") for e in lerrs: print(" " + repr(e)) if e.desc().startswith("Unexpected exception"): pytest.fail(f"Linter failed with {e.desc()}") # pragma: no cover parse_errors = list( filter(lambda v: isinstance(v, (SQLParseError, SQLTemplaterError)), lerrs) ) if parse_errors: pytest.fail(f"Found the following parse errors in test case: {parse_errors}") if not any(v.rule.code == code for v in lerrs): assert linted.tree print(f"Parsed File:\n{linted.tree.stringify()}") pytest.fail( f"No {code} failures found in query which should fail.", pytrace=False, ) if line_numbers: actual_line_numbers = [e.line_no for e in lerrs] if line_numbers != actual_line_numbers: # pragma: no cover pytest.fail( "Expected errors on lines {}, but got errors on lines {}".format( line_numbers, actual_line_numbers ) ) fixed, _ = linted.fix_string() return fixed, linted.violations def assert_rule_pass_in_sql(code, sql, configs=None, msg=None): """Assert that a given rule doesn't fail on the given sql.""" # Configs allows overrides if we want to use them. print("# Asserting Rule Pass in SQL") if configs is None: configs = {} core = configs.setdefault("core", {}) core["rules"] = code overrides = {} if "dialect" not in configs["core"]: overrides["dialect"] = "ansi" cfg = FluffConfig(configs=configs, overrides=overrides) linter = Linter(config=cfg) # This section is mainly for aid in debugging. rendered = linter.render_string(sql, fname="", config=cfg, encoding="utf-8") parsed = linter.parse_rendered(rendered) if parsed.violations: if msg: print(msg) # pragma: no cover assert parsed.tree pytest.fail(parsed.violations[0].desc() + "\n" + parsed.tree.stringify()) assert parsed.tree print(f"Parsed:\n {parsed.tree.stringify()}") # Note that lint_string() runs the templater and parser again, in order to # test the whole linting pipeline in the same way that users do. In other # words, the "rendered" and "parsed" variables above are irrelevant to this # line of code. lint_result = linter.lint_string(sql, config=cfg, fname="") lerrs = lint_result.violations if any(v.rule.code == code for v in lerrs): print("Errors Found:") for e in lerrs: print(" " + repr(e)) if msg: print(msg) # pragma: no cover pytest.fail(f"Found {code} failures in query which should pass.", pytrace=False) def assert_rule_raises_violations_in_file(rule, fpath, violations, fluff_config): """Assert that a given rule raises given errors in specific positions of a file.""" lntr = Linter(config=fluff_config) lnt = lntr.lint_path(fpath) # Reformat the test data to match the format we're expecting. We use # sets because we really don't care about order and if one is missing, # we don't care about the orders of the correct ones. assert set(lnt.check_tuples()) == {(rule, v[0], v[1]) for v in violations} def prep_violations(rule, violations): """Default to test rule if code is omitted.""" for v in violations: if "code" not in v: v["code"] = rule return violations def assert_violations_before_fix(test_case, violations_before_fix): """Assert that the given violations are found in the given sql.""" print("# Asserting Violations Before Fix") violation_info = [e.get_info_dict() for e in violations_before_fix] try: assert violation_info == prep_violations(test_case.rule, test_case.violations) except AssertionError: # pragma: no cover print("Actual violations:\n" + yaml.dump(violation_info)) raise def assert_violations_after_fix(test_case): """Assert that the given violations are found in the fixed sql.""" print("# Asserting Violations After Fix") _, violations_after_fix = assert_rule_fail_in_sql( test_case.rule, test_case.fix_str, configs=test_case.configs, line_numbers=test_case.line_numbers, ) violation_info = [e.get_info_dict() for e in violations_after_fix] try: assert violation_info == prep_violations( test_case.rule, test_case.violations_after_fix ) except AssertionError: # pragma: no cover print("Actual violations_after_fix:\n" + yaml.dump(violation_info)) raise def rules__test_helper(test_case): """Test that a rule passes/fails on a set of test_cases. Optionally, also test the fixed string if provided in the test case. """ if test_case.skip: pytest.skip(test_case.skip) if test_case.pass_str: assert_rule_pass_in_sql( test_case.rule, test_case.pass_str, configs=test_case.configs, ) if test_case.fail_str: res, violations_before_fix = assert_rule_fail_in_sql( test_case.rule, test_case.fail_str, configs=test_case.configs, line_numbers=test_case.line_numbers, ) if test_case.violations: assert_violations_before_fix(test_case, violations_before_fix) # If a `fixed` value is provided then check it matches if test_case.fix_str: assert res == test_case.fix_str if test_case.violations_after_fix: assert_violations_after_fix(test_case) else: assert_rule_pass_in_sql( test_case.rule, test_case.fix_str, configs=test_case.configs, msg="The SQL after fix is applied still contains rule violations. " "To accept a partial fix, violations_after_fix must be set " "listing the remaining, expected, violations.", ) else: # Check that tests without a fix_str do not apply any fixes. assert res == test_case.fail_str, ( "No fix_str was provided, but the rule modified the SQL. Where a fix " "can be applied by a rule, a fix_str must be supplied in the test." ) sqlfluff-2.3.5/test/000077500000000000000000000000001451700765000143275ustar00rootroot00000000000000sqlfluff-2.3.5/test/__init__.py000066400000000000000000000000311451700765000164320ustar00rootroot00000000000000"""Init PY for tests.""" sqlfluff-2.3.5/test/api/000077500000000000000000000000001451700765000151005ustar00rootroot00000000000000sqlfluff-2.3.5/test/api/__init__.py000066400000000000000000000000401451700765000172030ustar00rootroot00000000000000"""Tests for the public api.""" sqlfluff-2.3.5/test/api/classes_test.py000066400000000000000000000023331451700765000201470ustar00rootroot00000000000000"""Tests for use cases of the public api classes.""" from sqlfluff.core import Lexer, Linter, Parser test_query = "SELECt 1" def test__api__lexer(): """Basic checking of lexing functionality.""" tokens, violations = Lexer(dialect="ansi").lex(test_query) assert violations == [] assert isinstance(tokens, tuple) # The last element is the file end marker. assert [elem.raw for elem in tokens] == ["SELECt", " ", "1", ""] def test__api__parser(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) assert parsed.raw == test_query def test__api__linter_lint(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) violations = Linter(dialect="ansi").lint(parsed) assert [v.rule.code for v in violations] == ["CP01", "LT12"] def test__api__linter_fix(): """Basic checking of parsing functionality.""" tokens, _ = Lexer(dialect="ansi").lex(test_query) parsed = Parser(dialect="ansi").parse(tokens) fixed, _ = Linter(dialect="ansi").fix(parsed) assert fixed.raw == "SELECT 1\n" sqlfluff-2.3.5/test/api/info_test.py000066400000000000000000000016411451700765000174460ustar00rootroot00000000000000"""Test using sqlfluff to extract elements of queries.""" import sqlfluff from sqlfluff.core.linter import RuleTuple def test__api__info_dialects(): """Basic linting of dialects.""" dialects = sqlfluff.list_dialects() assert isinstance(dialects, list) assert ("ansi", "ansi", "nothing") in dialects def test__api__info_rules(): """Basic linting of dialects.""" rules = sqlfluff.list_rules() assert isinstance(rules, list) assert ( RuleTuple( code="LT01", name="layout.spacing", description="Inappropriate Spacing.", groups=("all", "core", "layout"), aliases=( "L001", "L005", "L006", "L008", "L023", "L024", "L039", "L048", "L071", ), ) in rules ) sqlfluff-2.3.5/test/api/simple_test.py000066400000000000000000000224611451700765000200070ustar00rootroot00000000000000"""Tests for simple use cases of the public api.""" import json import pytest import sqlfluff from sqlfluff.core.errors import SQLFluffUserError my_bad_query = "SeLEct *, 1, blah as fOO from myTable" lint_result = [ { "code": "AM04", "description": "Query produces an unknown number of result columns.", "line_no": 1, "line_pos": 1, "name": "ambiguous.column_count", }, { "code": "CP01", "line_no": 1, "line_pos": 1, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", }, { "code": "LT09", "description": "Select targets should be on a new line unless there is only " "one select target.", "line_no": 1, "line_pos": 1, "name": "layout.select_targets", }, { "code": "LT01", "description": "Expected only single space before star '*'. Found ' '.", "line_no": 1, "line_pos": 7, "name": "layout.spacing", }, { "code": "AL03", "line_no": 1, "line_pos": 12, "description": "Column expression without alias. Use explicit `AS` clause.", "name": "aliasing.expression", }, { "code": "CP01", "line_no": 1, "line_pos": 20, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", }, { "code": "LT01", "description": ( "Expected only single space before naked identifier. Found ' '." ), "line_no": 1, "line_pos": 22, "name": "layout.spacing", }, { "code": "CP02", "line_no": 1, "line_pos": 24, "description": "Unquoted identifiers must be consistently lower case.", "name": "capitalisation.identifiers", }, { "code": "LT01", "description": "Expected only single space before 'from' keyword. Found ' '.", "line_no": 1, "line_pos": 27, "name": "layout.spacing", }, { "code": "CP01", "line_no": 1, "line_pos": 29, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", }, { "code": "CP02", "line_no": 1, "line_pos": 34, "description": "Unquoted identifiers must be consistently lower case.", "name": "capitalisation.identifiers", }, { "code": "LT12", "line_no": 1, "line_pos": 41, "description": "Files must end with a single trailing newline.", "name": "layout.end_of_file", }, ] def test__api__lint_string_without_violations(): """Check lint functionality when there is no violation.""" result = sqlfluff.lint("select column from table\n") assert result == [] def test__api__lint_string(): """Basic checking of lint functionality.""" result = sqlfluff.lint(my_bad_query) # Check return types. assert isinstance(result, list) assert all(isinstance(elem, dict) for elem in result) # Check actual result assert result == lint_result def test__api__lint_string_specific(): """Basic checking of lint functionality.""" rules = ["CP02", "LT12"] result = sqlfluff.lint(my_bad_query, rules=rules) # Check which rules are found assert all(elem["code"] in rules for elem in result) def test__api__lint_string_specific_single(): """Basic checking of lint functionality.""" rules = ["CP02"] result = sqlfluff.lint(my_bad_query, rules=rules) # Check which rules are found assert all(elem["code"] in rules for elem in result) def test__api__lint_string_specific_exclude(): """Basic checking of lint functionality.""" exclude_rules = ["LT12", "CP01", "AL03", "CP02", "LT09", "LT01"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check only AM04 is found assert len(result) == 1 assert "AM04" == result[0]["code"] def test__api__lint_string_specific_exclude_single(): """Basic checking of lint functionality.""" exclude_rules = ["LT01"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check only AM04 is found assert len(result) == 9 set(["LT12", "CP01", "AL03", "CP02", "LT09", "AM04"]) == set( [r["code"] for r in result] ) def test__api__lint_string_specific_exclude_all_failed_rules(): """Basic checking of lint functionality.""" exclude_rules = ["LT12", "CP01", "AL03", "CP02", "LT09", "LT01", "AM04"] result = sqlfluff.lint(my_bad_query, exclude_rules=exclude_rules) # Check it passes assert result == [] def test__api__fix_string(): """Basic checking of lint functionality.""" result = sqlfluff.fix(my_bad_query) # Check return types. assert isinstance(result, str) # Check actual result assert ( result == """SELECT *, 1, blah AS foo FROM mytable """ ) def test__api__fix_string_specific(): """Basic checking of lint functionality with a specific rule.""" result = sqlfluff.fix(my_bad_query, rules=["CP01"]) # Check actual result assert result == "SELECT *, 1, blah AS fOO FROM myTable" def test__api__fix_string_specific_exclude(): """Basic checking of lint functionality with a specific rule exclusion.""" result = sqlfluff.fix(my_bad_query, exclude_rules=["LT09"]) # Check actual result assert result == "SELECT *, 1, blah AS foo FROM mytable\n" def test__api__fix_string_unparsable(): """Test behavior with parse errors.""" bad_query = """SELECT my_col FROM my_schema.my_table where processdate ! 3""" result = sqlfluff.fix(bad_query, rules=["CP01"]) # Check fix result: should be unchanged because of the parse error. assert result == bad_query def test__api__fix_string_unparsable_fix_even_unparsable(): """Test behavior with parse errors.""" bad_query = """SELECT my_col FROM my_schema.my_table where processdate ! 3""" result = sqlfluff.fix(bad_query, rules=["CP01"], fix_even_unparsable=True) # Check fix result: should be fixed because we overrode fix_even_unparsable. assert ( result == """SELECT my_col FROM my_schema.my_table WHERE processdate ! 3""" ) def test__api__parse_string(): """Basic checking of parse functionality.""" parsed = sqlfluff.parse(my_bad_query) # Check a JSON object is returned. assert isinstance(parsed, dict) # Load in expected result. with open("test/fixtures/api/parse_test/parse_test.json", "r") as f: expected_parsed = json.load(f) # Compare JSON from parse to expected result. assert parsed == expected_parsed def test__api__parse_fail(): """Basic failure mode of parse functionality.""" try: sqlfluff.parse("Select (1 + 2 +++) FROM mytable as blah blah") pytest.fail("sqlfluff.parse should have raised an exception.") except Exception as err: # Check it's the right kind of exception assert isinstance(err, sqlfluff.api.APIParsingError) # Check there are two violations in there. assert len(err.violations) == 2 # Check it prints nicely. assert ( str(err) == """Found 2 issues while parsing string. Line 1, Position 15: Found unparsable section: '+++' Line 1, Position 41: Found unparsable section: 'blah'""" ) def test__api__config_path(): """Test that we can load a specified config file in the Simple API.""" # Load test SQL file. with open("test/fixtures/api/config_path_test/config_path_test.sql", "r") as f: sql = f.read() # Pass a config path to the Simple API. parsed = sqlfluff.parse( sql, config_path="test/fixtures/api/config_path_test/extra_configs/.sqlfluff", ) # Load in expected result. with open("test/fixtures/api/config_path_test/config_path_test.json", "r") as f: expected_parsed = json.load(f) # Compare JSON from parse to expected result. assert parsed == expected_parsed @pytest.mark.parametrize( "kwargs,expected", [ ( # No override from API, so uses .sqlfluff value {}, set(), ), ( # API overrides, so it uses that dict(exclude_rules=["RF02"]), {"RF04"}, ), ], ) def test__api__config_override(kwargs, expected, tmpdir): """Test that parameters to lint() override .sqlfluff correctly (or not).""" config_path = "test/fixtures/api/config_override/.sqlfluff" sql = "SELECT TRIM(name) AS name FROM some_table" lint_results = sqlfluff.lint(sql, config_path=config_path, **kwargs) assert expected == {"RF02", "RF04"}.intersection( {lr["code"] for lr in lint_results} ) def test__api__invalid_dialect(): """Test that SQLFluffUserError is raised for a bad dialect.""" # Load test SQL file. with open("test/fixtures/api/config_path_test/config_path_test.sql", "r") as f: sql = f.read() # Pass a fake dialect to the API and test the correct error is raised. with pytest.raises(SQLFluffUserError) as err: sqlfluff.parse( sql, dialect="not_a_real_dialect", config_path="test/fixtures/api/config_path_test/extra_configs/.sqlfluff", ) assert str(err.value) == "Error: Unknown dialect 'not_a_real_dialect'" sqlfluff-2.3.5/test/cli/000077500000000000000000000000001451700765000150765ustar00rootroot00000000000000sqlfluff-2.3.5/test/cli/__init__.py000066400000000000000000000000361451700765000172060ustar00rootroot00000000000000"""Tests for sqlfluff.cli.""" sqlfluff-2.3.5/test/cli/autocomplete_test.py000066400000000000000000000013121451700765000212050ustar00rootroot00000000000000"""Test autocomplete commands.""" import pytest from sqlfluff.cli.autocomplete import dialect_shell_complete @pytest.mark.parametrize( "incomplete,expected", [ ["an", ["ansi"]], ["d", ["databricks", "db2", "duckdb"]], ["g", ["greenplum"]], ["s", ["snowflake", "soql", "sparksql", "sqlite"]], ["post", ["postgres"]], ], ) def test_dialect_click_type_shell_complete(incomplete, expected): """Check that autocomplete returns dialects as expected.""" completion_items = dialect_shell_complete( ctx="dummy_not_used", param="dummy_not_used", incomplete=incomplete ) actual = [c.value for c in completion_items] assert expected == actual sqlfluff-2.3.5/test/cli/commands_test.py000066400000000000000000002032131451700765000203110ustar00rootroot00000000000000"""The Test file for CLI (General).""" import configparser import json import logging import os import pathlib import re import shutil import stat import subprocess import sys import tempfile import textwrap from unittest.mock import MagicMock, patch import chardet # Testing libraries import pytest import yaml from click.testing import CliRunner # We import the library directly here to get the version import sqlfluff from sqlfluff.cli.commands import ( cli_format, dialects, fix, get_config, lint, parse, render, rules, version, ) from sqlfluff.core.parser import CommentSegment from sqlfluff.core.rules import BaseRule, LintFix, LintResult from sqlfluff.utils.testing.cli import invoke_assert_code re_ansi_escape = re.compile(r"\x1b[^m]*m") @pytest.fixture(autouse=True) def logging_cleanup(): """This gracefully handles logging issues at session teardown. Removes handlers from all loggers. Autouse applies this to all tests in this file (i.e. all the cli command tests), which should be all of the test cases where `set_logging_level` is called. https://github.com/sqlfluff/sqlfluff/issues/3702 https://github.com/pytest-dev/pytest/issues/5502#issuecomment-1190557648 """ yield # NOTE: This is a teardown function so the clearup code # comes _after_ the yield. # Get only the sqlfluff loggers (which we set in set_logging_level) loggers = [ logger for logger in logging.Logger.manager.loggerDict.values() if isinstance(logger, logging.Logger) and logger.name.startswith("sqlfluff") ] for logger in loggers: if not hasattr(logger, "handlers"): continue for handler in logger.handlers[:]: logger.removeHandler(handler) def contains_ansi_escape(s: str) -> bool: """Does the string contain ANSI escape codes (e.g. color)?""" return re_ansi_escape.search(s) is not None expected_output = """== [test/fixtures/linter/indentation_error_simple.sql] FAIL L: 2 | P: 1 | LT02 | Expected indent of 4 spaces. [layout.indent] L: 5 | P: 10 | CP01 | Keywords must be consistently upper case. | [capitalisation.keywords] """ def test__cli__command_directed(): """Basic checking of lint functionality.""" result = invoke_assert_code( ret_code=1, args=[ lint, [ "--disable-progress-bar", "test/fixtures/linter/indentation_error_simple.sql", ], ], ) # We should get a readout of what the error was check_a = "L: 2 | P: 1 | LT02" # NB: Skip the number at the end because it's configurable check_b = "ndentation" assert check_a in result.output assert check_b in result.output # Finally check the WHOLE output to make sure that unexpected newlines are not # added. The replace command just accounts for cross platform testing. assert result.output.replace("\\", "/").startswith(expected_output) def test__cli__command_dialect(): """Check the script raises the right exception on an unknown dialect.""" # The dialect is unknown should be a non-zero exit code invoke_assert_code( ret_code=2, args=[ lint, [ "-n", "--dialect", "faslkjh", "test/fixtures/linter/indentation_error_simple.sql", ], ], ) def test__cli__command_no_dialect(): """Check the script raises the right exception no dialect.""" # The dialect is unknown should be a non-zero exit code result = invoke_assert_code( ret_code=2, args=[ lint, ["-"], ], cli_input="SELECT 1", ) assert "User Error" in result.stdout assert "No dialect was specified" in result.stdout def test__cli__command_parse_error_dialect_explicit_warning(): """Check parsing error raises the right warning.""" # For any parsing error there should be a non-zero exit code # and a human-readable warning should be displayed. # Dialect specified as commandline option. result = invoke_assert_code( ret_code=1, args=[ parse, [ "-n", "--dialect", "postgres", "test/fixtures/cli/fail_many.sql", ], ], ) assert ( "WARNING: Parsing errors found and dialect is set to 'postgres'. " "Have you configured your dialect correctly?" in result.stdout ) def test__cli__command_parse_error_dialect_implicit_warning(): """Check parsing error raises the right warning.""" # For any parsing error there should be a non-zero exit code # and a human-readable warning should be displayed. # Dialect specified in .sqlfluff config. result = invoke_assert_code( ret_code=1, args=[ # Config sets dialect to tsql parse, [ "-n", "--config", "test/fixtures/cli/extra_configs/.sqlfluff", "test/fixtures/cli/fail_many.sql", ], ], ) assert ( "WARNING: Parsing errors found and dialect is set to 'tsql'. " "Have you configured your dialect correctly?" in result.stdout ) def test__cli__command_dialect_legacy(): """Check the script raises the right exception on a legacy dialect.""" result = invoke_assert_code( ret_code=2, args=[ lint, [ "-n", "--dialect", "exasol_fs", "test/fixtures/linter/indentation_error_simple.sql", ], ], ) assert "Please use the 'exasol' dialect instead." in result.stdout def test__cli__command_extra_config_fail(): """Check the script raises the right exception non-existent extra config path.""" result = invoke_assert_code( ret_code=2, args=[ lint, [ "--config", "test/fixtures/cli/extra_configs/.sqlfluffsdfdfdfsfd", "test/fixtures/cli/extra_config_tsql.sql", ], ], ) assert ( "Extra config 'test/fixtures/cli/extra_configs/.sqlfluffsdfdfdfsfd' does not " "exist." in result.stdout ) @pytest.mark.parametrize( "command", [ ( "-", "-n", ), ( "-", "-n", "-v", ), ( "-", "-n", "-vv", ), ( "-", "-vv", ), ], ) def test__cli__command_lint_stdin(command): """Check basic commands on a simple script using stdin. The subprocess command should exit without errors, as no issues should be found. """ with open("test/fixtures/cli/passing_a.sql") as test_file: sql = test_file.read() invoke_assert_code(args=[lint, ("--dialect=ansi",) + command], cli_input=sql) def test__cli__command_lint_empty_stdin(): """Check linting an empty file raises no exceptions. https://github.com/sqlfluff/sqlfluff/issues/4807 """ invoke_assert_code(args=[lint, ("-d", "ansi", "-")], cli_input="") def test__cli__command_render_stdin(): """Check render on a simple script using stdin.""" with open("test/fixtures/cli/passing_a.sql") as test_file: sql = test_file.read() result = invoke_assert_code(args=[render, ("--dialect=ansi", "-")], cli_input=sql) # Check we get back out the same file we input. assert result.output.startswith(sql) @pytest.mark.parametrize( "command", [ # Test basic linting ( lint, [ "-n", "test/fixtures/cli/passing_b.sql", "--exclude-rules", "AM05", ], ), # Basic render ( render, [ "test/fixtures/cli/passing_b.sql", ], ), # Original tests from test__cli__command_lint (lint, ["-n", "test/fixtures/cli/passing_a.sql"]), (lint, ["-n", "-v", "test/fixtures/cli/passing_a.sql"]), (lint, ["-n", "-vvvv", "test/fixtures/cli/passing_a.sql"]), (lint, ["-vvvv", "test/fixtures/cli/passing_a.sql"]), # Test basic linting with very high verbosity ( lint, [ "-n", "test/fixtures/cli/passing_b.sql", "-vvvvvvvvvvv", "--exclude-rules", "AM05", ], ), # Test basic linting with specific logger. # Also test short rule exclusion. ( lint, [ "-n", "test/fixtures/cli/passing_b.sql", "-vvv", "--logger", "parser", "-e", "AM05", ], ), # Check basic parsing ( parse, [ "-n", "test/fixtures/cli/passing_b.sql", "--exclude-rules", "AM05", ], ), # Test basic parsing with very high verbosity ( parse, [ "-n", "test/fixtures/cli/passing_b.sql", "-vvvvvvvvvvv", "-e", "AM05", ], ), # Check basic parsing, with the code only option (parse, ["-n", "test/fixtures/cli/passing_b.sql", "-c"]), # Check basic parsing, with the yaml output (parse, ["-n", "test/fixtures/cli/passing_b.sql", "-c", "-f", "yaml"]), (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--format", "yaml"]), # Check parsing with no output (used mostly for testing) (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--format", "none"]), # Check the benching commands (parse, ["-n", "test/fixtures/cli/passing_b.sql", "--bench"]), ( lint, [ "-n", "test/fixtures/cli/passing_b.sql", "--bench", "--exclude-rules", "AM05", ], ), ( fix, [ "-n", "test/fixtures/cli/passing_b.sql", "--bench", "--exclude-rules", "AM05", ], ), # Check linting works in specifying rules ( lint, [ "-n", "--rules", "CP01", "test/fixtures/linter/operator_errors.sql", ], ), # Check ignoring linting (multiprocess) # https://github.com/sqlfluff/sqlfluff/issues/5066 ( lint, [ "-n", "--ignore", "linting", "-p", "2", "test/fixtures/linter/operator_errors.sql", "test/fixtures/linter/comma_errors.sql", ], ), # Check linting works in specifying multiple rules ( lint, [ "-n", "--rules", "CP01,LT02", "test/fixtures/linter/operator_errors.sql", ], ), # Check linting works with both included and excluded rules ( lint, [ "-n", "--rules", "CP01,LT01", "--exclude-rules", "LT01,AL07", "test/fixtures/linter/operator_errors.sql", ], ), # Check linting works with just excluded rules ( lint, [ "-n", "--exclude-rules", "LT01,LT03,AL07", "test/fixtures/linter/operator_errors.sql", ], ), # Check that ignoring works (also checks that unicode files parse). ( lint, [ "-n", "--exclude-rules", "LT02,LT12,AL07", "--ignore", "parsing,lexing", "test/fixtures/linter/parse_lex_error.sql", ], ), # Check nofail works (lint, ["--nofail", "test/fixtures/linter/parse_lex_error.sql"]), # Check config works (sets dialect to tsql) ( lint, [ "--config", "test/fixtures/cli/extra_configs/.sqlfluff", "test/fixtures/cli/extra_config_tsql.sql", ], ), ( lint, [ "--config", "test/fixtures/cli/extra_configs/pyproject.toml", "test/fixtures/cli/extra_config_tsql.sql", ], ), # Check timing outputs doesn't raise exceptions (lint, ["test/fixtures/cli/passing_a.sql", "--persist-timing", "test.csv"]), # Check lint --help command doesn't raise exception. # NOTE: This tests the LazySequence in action. (lint, ["--help"]), ], ) def test__cli__command_lint_parse(command): """Check basic commands on a more complicated script.""" invoke_assert_code(args=command) @pytest.mark.parametrize( "command, ret_code", [ # Check the script doesn't raise an unexpected exception with badly formed # files. ( ( fix, [ "--rules", "LT01", "test/fixtures/cli/fail_many.sql", "-vvvvvvv", ], "y", ), 1, ), # Fix with a suffixs ( ( fix, [ "--rules", "LT01", "--fixed-suffix", "_fix", "test/fixtures/cli/fail_many.sql", ], "y", ), 1, ), # Fix without specifying rules ( ( fix, [ "--fixed-suffix", "_fix", "test/fixtures/cli/fail_many.sql", ], "y", ), 1, ), # Format ( ( cli_format, [ "--fixed-suffix", "_fix", "test/fixtures/linter/whitespace_errors.sql", ], ), 0, ), # Format with --persist-timing ( ( cli_format, [ "--fixed-suffix", "_fix", "test/fixtures/linter/whitespace_errors.sql", "--persist-timing", "test.csv", ], ), 0, ), # Format (specifying rules) ( ( cli_format, [ "--rules", "LT01", "--fixed-suffix", "_fix", "test/fixtures/linter/whitespace_errors.sql", ], ), 2, ), # Template syntax error in macro file ( ( lint, ["test/fixtures/cli/unknown_jinja_tag/test.sql"], ), 1, ), # Test overriding library path when it doesn't cause an issue ( ( lint, ["test/fixtures/cli/passing_a.sql", "--library-path", "none"], ), 0, ), # Test overriding library path when it DOES cause an issue # (because macros won't be found). ( ( # Render because that's the step where the issue will # occur. render, [ "test/fixtures/templater/jinja_r_library_in_macro/jinja.sql", "--library-path", "none", ], ), 1, ), # Test render fail ( ( render, ["test/fixtures/cli/fail_many.sql"], ), 1, ), # Test a longer lint fail with --bench # This tests the threshold rules clause ( ( lint, [ "test/fixtures/linter/autofix/bigquery/004_templating/before.sql", "--bench", ], ), 1, ), # Test that setting --quiet with --verbose raises an error. ( ( fix, [ "--quiet", "--verbose", "test/fixtures/cli/fail_many.sql", ], ), 2, ), ], ) def test__cli__command_lint_parse_with_retcode(command, ret_code): """Check commands expecting a non-zero ret code.""" invoke_assert_code(ret_code=ret_code, args=command) def test__cli__command_lint_warning_explicit_file_ignored(): """Check ignoring file works when file is in an ignore directory.""" runner = CliRunner() result = runner.invoke( lint, ["test/fixtures/linter/sqlfluffignore/path_b/query_c.sql"] ) assert result.exit_code == 0 assert ( "Exact file path test/fixtures/linter/sqlfluffignore/path_b/query_c.sql " "was given but it was ignored" ) in result.output.strip() def test__cli__command_lint_skip_ignore_files(): """Check "ignore file" is skipped when --disregard-sqlfluffignores flag is set.""" runner = CliRunner() result = runner.invoke( lint, [ "test/fixtures/linter/sqlfluffignore/path_b/query_c.sql", "--disregard-sqlfluffignores", ], ) assert result.exit_code == 1 assert "LT12" in result.output.strip() def test__cli__command_lint_ignore_local_config(): """Test that --ignore-local_config ignores .sqlfluff file as expected.""" runner = CliRunner() # First we test that not including the --ignore-local-config includes # .sqlfluff file, and therefore the lint doesn't raise AL02 result = runner.invoke( lint, [ "test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql", ], ) assert result.exit_code == 0 assert "AL02" not in result.output.strip() # Then repeat the same lint but this time ignoring the .sqlfluff file. # We should see AL02 raised. result = runner.invoke( lint, [ "--ignore-local-config", "--dialect=ansi", "test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql", ], ) assert result.exit_code == 1 assert "AL02" in result.output.strip() def test__cli__command_lint_warning(): """Test that configuring warnings works. For this test the warnings are configured using inline config in the file. That's more for simplicity however the code paths should be the same if it's configured in a file. """ runner = CliRunner() result = runner.invoke( lint, [ "test/fixtures/cli/warning_a.sql", ], ) # Because we're only warning. The command should pass. assert result.exit_code == 0 # The output should still say PASS. assert "PASS" in result.output.strip() # But should also contain the warnings. # NOTE: Not including the whole description because it's too long. assert ( "L: 4 | P: 9 | LT01 | WARNING: Expected single whitespace" in result.output.strip() ) def test__cli__command_versioning(): """Check version command.""" # Get the package version info pkg_version = sqlfluff.__version__ # Get the version info from the config file config = configparser.ConfigParser() config.read_file(open("setup.cfg")) config_version = config["metadata"]["version"] assert pkg_version == config_version # Get the version from the cli runner = CliRunner() result = runner.invoke(version) assert result.exit_code == 0 # We need to strip to remove the newline characters assert result.output.strip() == pkg_version def test__cli__command_version(): """Just check version command for exceptions.""" # Get the package version info pkg_version = sqlfluff.__version__ runner = CliRunner() result = runner.invoke(version) assert result.exit_code == 0 assert pkg_version in result.output # Check a verbose version result = runner.invoke(version, ["-v"]) assert result.exit_code == 0 assert pkg_version in result.output def test__cli__command_rules(): """Check rules command for exceptions.""" invoke_assert_code(args=[rules]) def test__cli__command_dialects(): """Check dialects command for exceptions.""" invoke_assert_code(args=[dialects]) def generic_roundtrip_test( source_file, rulestring, final_exit_code=0, force=True, fix_input=None, fix_exit_code=0, input_file_encoding="utf-8", output_file_encoding=None, ): """A test for roundtrip testing, take a file buffer, lint, fix and lint. This is explicitly different from the linter version of this, in that it uses the command line rather than the direct api. """ filename = "testing.sql" # Lets get the path of a file to use tempdir_path = tempfile.mkdtemp() filepath = os.path.join(tempdir_path, filename) # Open the example file and write the content to it with open(filepath, mode="w", encoding=input_file_encoding) as dest_file: for line in source_file: dest_file.write(line) status = os.stat(filepath) assert stat.S_ISREG(status.st_mode) old_mode = stat.S_IMODE(status.st_mode) # Check that we first detect the issue invoke_assert_code( ret_code=1, args=[lint, ["--dialect=ansi", "--rules", rulestring, filepath]], ) # Fix the file (in force mode) if force: fix_args = ["--rules", rulestring, "-f", filepath] else: fix_args = ["--rules", rulestring, filepath] fix_args.append("--dialect=ansi") invoke_assert_code( ret_code=fix_exit_code, args=[fix, fix_args], cli_input=fix_input ) # Now lint the file and check for exceptions invoke_assert_code( ret_code=final_exit_code, args=[lint, ["--dialect=ansi", "--rules", rulestring, filepath]], ) # Check the output file has the correct encoding after fix if output_file_encoding: with open(filepath, mode="rb") as f: data = f.read() assert chardet.detect(data)["encoding"] == output_file_encoding # Also check the file mode was preserved. status = os.stat(filepath) assert stat.S_ISREG(status.st_mode) new_mode = stat.S_IMODE(status.st_mode) assert new_mode == old_mode shutil.rmtree(tempdir_path) @pytest.mark.parametrize( "rule,fname", [ ("LT01", "test/fixtures/linter/indentation_errors.sql"), ("LT01", "test/fixtures/linter/whitespace_errors.sql"), ("LT01", "test/fixtures/linter/indentation_errors.sql"), # Really stretching the ability of the fixer to re-indent a file ("LT02", "test/fixtures/linter/indentation_error_hard.sql"), ], ) def test__cli__command__fix(rule, fname): """Test the round trip of detecting, fixing and then not detecting the rule.""" with open(fname) as test_file: generic_roundtrip_test(test_file, rule) @pytest.mark.parametrize( "sql,fix_args,fixed,exit_code", [ ( # - One lint error: "where" is lower case # - Not fixable because of parse error, hence error exit """ SELECT my_col FROM my_schema.my_table where processdate ! 3 """, ["--force", "--fixed-suffix", "FIXED", "--rules", "CP01"], None, 1, ), ( # - One lint error: "where" is lower case # - Not fixable because of templater error, hence error exit """ SELECT my_col FROM my_schema.my_table where processdate {{ condition }} """, # Test the short versions of the options. ["--force", "-x", "FIXED", "-r", "CP01"], None, 1, ), ( # - One lint error: "where" is lower case # - Not fixable because of parse error (even though "noqa"), hence # error exit """ SELECT my_col FROM my_schema.my_table where processdate ! 3 -- noqa: PRS """, # Test the short versions of the options. ["--force", "-x", "FIXED", "-r", "CP01"], None, 1, ), ( # - No lint errors # - Parse error not suppressed, hence error exit """ SELECT my_col FROM my_schema.my_table WHERE processdate ! 3 """, ["--force", "--fixed-suffix", "FIXED", "--rules", "CP01"], None, 1, ), ( # - No lint errors # - Parse error suppressed, hence success exit """ SELECT my_col FROM my_schema.my_table WHERE processdate ! 3 --noqa: PRS """, ["--force", "--fixed-suffix", "FIXED", "--rules", "CP01"], None, 0, ), ( # - One lint error: "where" is lower case # - Parse error not suppressed # - "--FIX-EVEN-UNPARSABLE", hence fix anyway & success exit """ SELECT my_col FROM my_schema.my_table where processdate ! 3 """, [ "--force", "--fixed-suffix", "FIXED", "--rules", "CP01", "--FIX-EVEN-UNPARSABLE", ], """ SELECT my_col FROM my_schema.my_table WHERE processdate ! 3 """, 0, ), ( # Two files: # File #1: # - One lint error: "where" is lower case # - Not fixable because of parse error # File #2: # - One lint error: "where" is lower case # - No parse error, thus fixable # Should fix the second file but not the first, and exit with an # error. [ """ SELECT my_col FROM my_schema.my_table where processdate ! 3 """, """SELECT my_col FROM my_schema.my_table where processdate != 3""", ], ["--force", "--fixed-suffix", "FIXED", "--rules", "CP01"], [ None, """SELECT my_col FROM my_schema.my_table WHERE processdate != 3""", ], 1, ), ], ids=[ "1_lint_error_1_unsuppressed_parse_error", "1_lint_error_1_unsuppressed_templating_error", "1_lint_error_1_suppressed_parse_error", "0_lint_errors_1_unsuppressed_parse_error", "0_lint_errors_1_suppressed_parse_error", "1_lint_error_1_unsuppressed_parse_error_FIX_EVEN_UNPARSABLE", "2_files_with_lint_errors_1_unsuppressed_parse_error", ], ) def test__cli__fix_error_handling_behavior(sql, fix_args, fixed, exit_code, tmpdir): """Tests how "fix" behaves wrt parse errors, exit code, etc.""" if not isinstance(sql, list): sql = [sql] if not isinstance(fixed, list): fixed = [fixed] assert len(sql) == len(fixed) tmp_path = pathlib.Path(str(tmpdir)) for idx, this_sql in enumerate(sql): filepath = tmp_path / f"testing{idx + 1}.sql" filepath.write_text(textwrap.dedent(this_sql)) with tmpdir.as_cwd(): with pytest.raises(SystemExit) as e: fix( fix_args + [ "-f", # Use the short dialect option "-d", "ansi", ] ) assert exit_code == e.value.code for idx, this_fixed in enumerate(fixed): fixed_path = tmp_path / f"testing{idx + 1}FIXED.sql" if this_fixed is not None: assert textwrap.dedent(this_fixed) == fixed_path.read_text() else: # A None value indicates "sqlfluff fix" should have skipped any # fixes for this file. To confirm this, we verify that the output # file WAS NOT EVEN CREATED. assert not fixed_path.is_file() @pytest.mark.parametrize( "method,fix_even_unparsable", [ ("command-line", False), ("command-line", True), ("config-file", False), ("config-file", True), ], ) def test_cli_fix_even_unparsable( method: str, fix_even_unparsable: bool, monkeypatch, tmpdir ): """Test the fix_even_unparsable option works from cmd line and config.""" sql_filename = "fix_even_unparsable.sql" sql_path = str(tmpdir / sql_filename) with open(sql_path, "w") as f: print( """SELECT my_col FROM my_schema.my_table where processdate ! 3 """, file=f, ) options = [ "--dialect", "ansi", "-f", "--fixed-suffix=FIXED", sql_path, ] if method == "command-line": if fix_even_unparsable: options.append("--FIX-EVEN-UNPARSABLE") else: assert method == "config-file" with open(str(tmpdir / ".sqlfluff"), "w") as f: print( f"[sqlfluff]\nfix_even_unparsable = {fix_even_unparsable}", file=f, ) # TRICKY: Switch current directory to the one with the SQL file. Otherwise, # the setting doesn't work. That's because SQLFluff reads it in # sqlfluff.cli.commands.fix(), prior to reading any file-specific settings # (down in sqlfluff.core.linter.Linter._load_raw_file_and_config()). monkeypatch.chdir(str(tmpdir)) invoke_assert_code( ret_code=0 if fix_even_unparsable else 1, args=[ fix, options, ], ) fixed_path = str(tmpdir / "fix_even_unparsableFIXED.sql") if fix_even_unparsable: with open(fixed_path, "r") as f: fixed_sql = f.read() assert ( fixed_sql == """SELECT my_col FROM my_schema.my_table WHERE processdate ! 3 """ ) else: assert not os.path.isfile(fixed_path) _old_eval = BaseRule._eval _fix_counter = 0 def _mock_eval(rule, context): # For test__cli__fix_loop_limit_behavior, we mock BaseRule.crawl(), # replacing it with this function. This function generates an infinite # sequence of fixes without ever repeating the same fix. This causes the # linter to hit the loop limit, allowing us to test that behavior. if context.segment.is_type("comment") and "Comment" in context.segment.raw: global _fix_counter _fix_counter += 1 fix = LintFix.replace( context.segment, [CommentSegment(f"-- Comment {_fix_counter}")] ) return LintResult(context.segment, fixes=[fix]) else: return _old_eval(rule, context) @pytest.mark.parametrize( "sql, exit_code", [ ("-- Comment A\nSELECT 1 FROM foo", 1), ("-- noqa: disable=all\n-- Comment A\nSELECT 1 FROM foo", 0), ], ) @patch("sqlfluff.rules.layout.LT01.Rule_LT01._eval", _mock_eval) def test__cli__fix_loop_limit_behavior(sql, exit_code, tmpdir): """Tests how "fix" behaves when the loop limit is exceeded.""" fix_args = ["--force", "--fixed-suffix", "FIXED", "--rules", "LT01"] tmp_path = pathlib.Path(str(tmpdir)) filepath = tmp_path / "testing.sql" filepath.write_text(textwrap.dedent(sql)) with tmpdir.as_cwd(): with pytest.raises(SystemExit) as e: fix( fix_args + [ "-f", "--dialect=ansi", ] ) assert exit_code == e.value.code # In both parametrized test cases, no output file should have been # created. # - Case #1: Hitting the loop limit is an error # - Case #2: "noqa" suppressed all lint errors, thus no fixes applied fixed_path = tmp_path / "testingFIXED.sql" assert not fixed_path.is_file() @pytest.mark.parametrize( "stdin,rules,stdout", [ ("select * from t", "LT02", "select * from t"), # no change ( " select * from t", "LT02", "select * from t", ), # fix preceding whitespace ], ) def test__cli__command_fix_stdin(stdin, rules, stdout): """Check stdin input for fix works.""" result = invoke_assert_code( args=[ fix, ("-", "--rules", rules, "--disable-progress-bar", "--dialect=ansi"), ], cli_input=stdin, ) assert result.output == stdout @pytest.mark.parametrize( "stdin,stdout", [ ("select * from t\n", "select * from t\n"), # no change ( " select * FRoM t ", "select * from t\n", ), ], ) def test__cli__command_format_stdin(stdin, stdout): """Check stdin input for fix works.""" result = invoke_assert_code( args=[ cli_format, ("-", "--disable-progress-bar", "--dialect=ansi"), ], cli_input=stdin, ) assert result.output == stdout def test__cli__command_fix_stdin_logging_to_stderr(monkeypatch): """Check that logging goes to stderr when stdin is passed to fix.""" perfect_sql = "select col from table" class MockLinter(sqlfluff.core.Linter): @classmethod def lint_fix_parsed(cls, *args, **kwargs): cls._warn_unfixable("") return super().lint_fix_parsed(*args, **kwargs) monkeypatch.setattr(sqlfluff.cli.commands, "Linter", MockLinter) result = invoke_assert_code( args=[fix, ("-", "--rules=LT02", "--dialect=ansi")], cli_input=perfect_sql, mix_stderr=False, ) assert result.stdout == perfect_sql assert "" in result.stderr def test__cli__command_fix_stdin_safety(): """Check edge cases regarding safety when fixing stdin.""" perfect_sql = "select col from table" # just prints the very same thing result = invoke_assert_code( args=[fix, ("-", "--disable-progress-bar", "--dialect=ansi")], cli_input=perfect_sql, ) assert result.output.strip() == perfect_sql @pytest.mark.parametrize( "sql,exit_code,params,output_contains", [ ( "create TABLE {{ params.dsfsdfds }}.t (a int)", 1, "-v", "Fix aborted due to unparsable template variables.", ), # template error ("create TABLE a.t (a int)", 0, "", ""), # fixable error ("create table a.t (a int)", 0, "", ""), # perfection ( "select col from a join b using (c)", 1, "-v", "Unfixable violations detected.", ), # unfixable error (using) ], ) def test__cli__command_fix_stdin_error_exit_code( sql, exit_code, params, output_contains ): """Check that the CLI fails nicely if fixing a templated stdin.""" if exit_code == 0: invoke_assert_code( args=[fix, ("--dialect=ansi", "-")], cli_input=sql, ) else: with pytest.raises(SystemExit) as exc_info: invoke_assert_code( args=[fix, (params, "--dialect=ansi", "-")], cli_input=sql, output_contains=output_contains, ) assert exc_info.value.args[0] == exit_code @pytest.mark.parametrize( "rule,fname,prompt,exit_code,fix_exit_code", [ ("LT01", "test/fixtures/linter/indentation_errors.sql", "y", 0, 0), ("LT01", "test/fixtures/linter/indentation_errors.sql", "n", 1, 1), ], ) def test__cli__command__fix_no_force(rule, fname, prompt, exit_code, fix_exit_code): """Round trip test, using the prompts.""" with open(fname) as test_file: generic_roundtrip_test( test_file, rule, force=False, final_exit_code=exit_code, fix_input=prompt, fix_exit_code=fix_exit_code, ) @pytest.mark.parametrize("serialize", ["yaml", "json"]) @pytest.mark.parametrize("write_file", [None, "outfile"]) def test__cli__command_parse_serialize_from_stdin(serialize, write_file, tmp_path): """Check that the parser serialized output option is working. This tests both output to stdout and output to file. Not going to test for the content of the output as that is subject to change. """ cmd_args = ("-", "--format", serialize, "--dialect=ansi") if write_file: target_file = os.path.join(tmp_path, write_file + "." + serialize) cmd_args += ("--write-output", target_file) result = invoke_assert_code( args=[parse, cmd_args], cli_input="select * from tbl", ) if write_file: with open(target_file, "r") as payload_file: result_payload = payload_file.read() else: result_payload = result.output if serialize == "json": result = json.loads(result_payload) elif serialize == "yaml": result = yaml.safe_load(result_payload) else: raise Exception result = result[0] # only one file assert result["filepath"] == "stdin" @pytest.mark.parametrize("serialize", ["yaml", "json", "none"]) @pytest.mark.parametrize( "sql,expected,exit_code", [ ("select * from tbl", [], 0), # empty list if no violations ( "SElect * from tbl", [ { "filepath": "stdin", "violations": [ { "code": "CP01", "line_no": 1, "line_pos": 1, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", }, { "code": "CP01", "line_no": 1, "line_pos": 10, "description": "Keywords must be consistently upper case.", "name": "capitalisation.keywords", }, ], } ], 1, ), ], ) def test__cli__command_lint_serialize_from_stdin(serialize, sql, expected, exit_code): """Check an explicit serialized return value for a single error.""" result = invoke_assert_code( args=[ lint, ( "-", "--rules", "CP01", "--format", serialize, "--disable-progress-bar", "--dialect=ansi", ), ], cli_input=sql, ret_code=exit_code, ) if serialize == "json": assert json.loads(result.output) == expected elif serialize == "yaml": assert yaml.safe_load(result.output) == expected elif serialize == "none": assert result.output == "" else: raise Exception @pytest.mark.parametrize( "command", [ [lint, ("this_file_does_not_exist.sql")], [fix, ("this_file_does_not_exist.sql")], ], ) def test__cli__command_fail_nice_not_found(command): """Check commands fail as expected when then don't find files.""" result = invoke_assert_code(args=command, ret_code=2) assert ( "User Error: Specified path does not exist. Check it/they " "exist(s): this_file_does_not_exist.sql" ) in result.output @patch("click.utils.should_strip_ansi") @patch("sys.stdout.isatty") def test__cli__command_lint_nocolor(isatty, should_strip_ansi, capsys, tmpdir): """Test the --nocolor option prevents color output.""" # Patch these two functions to make it think every output stream is a TTY. # In spite of this, the output should not contain ANSI color codes because # we specify "--nocolor" below. isatty.return_value = True should_strip_ansi.return_value = False fpath = "test/fixtures/linter/indentation_errors.sql" output_file = str(tmpdir / "result.txt") cmd_args = [ "--verbose", "--nocolor", "--dialect", "ansi", "--disable-progress-bar", fpath, "--write-output", output_file, ] with pytest.raises(SystemExit): lint(cmd_args) out = capsys.readouterr()[0] assert not contains_ansi_escape(out) with open(output_file, "r") as f: file_contents = f.read() assert not contains_ansi_escape(file_contents) @pytest.mark.parametrize( "serialize", ["human", "yaml", "json", "github-annotation", "github-annotation-native", "none"], ) @pytest.mark.parametrize("write_file", [None, "outfile"]) def test__cli__command_lint_serialize_multiple_files(serialize, write_file, tmp_path): """Test the output formats for multiple files. This tests runs both stdout checking and file checking. """ fpath1 = "test/fixtures/linter/indentation_errors.sql" fpath2 = "test/fixtures/linter/multiple_sql_errors.sql" cmd_args = ( fpath1, fpath2, "--format", serialize, "--disable-progress-bar", ) if write_file: ext = { "human": ".txt", "yaml": ".yaml", } target_file = os.path.join(tmp_path, write_file + ext.get(serialize, ".json")) cmd_args += ("--write-output", target_file) # note the file is in here twice. two files = two payloads. result = invoke_assert_code( args=[lint, cmd_args], ret_code=1, ) # NOTE: The "none" serializer doesn't write a file even if specified. if write_file and serialize != "none": with open(target_file, "r") as payload_file: result_payload = payload_file.read() else: result_payload = result.output # Print for debugging. payload_length = len(result_payload.split("\n")) print("=== BEGIN RESULT OUTPUT") print(result_payload) print("=== END RESULT OUTPUT") print("Result length:", payload_length) if serialize == "human": assert payload_length == 25 if write_file else 34 elif serialize == "none": assert payload_length == 1 # There will be a single newline. elif serialize == "json": result = json.loads(result_payload) assert len(result) == 2 elif serialize == "yaml": result = yaml.safe_load(result_payload) assert len(result) == 2 elif serialize == "github-annotation": result = json.loads(result_payload) filepaths = {r["file"] for r in result} assert len(filepaths) == 2 elif serialize == "github-annotation-native": result = result_payload.split("\n") # SQLFluff produces trailing newline if result[-1] == "": del result[-1] assert len(result) == 12 else: raise Exception def test__cli__command_lint_serialize_github_annotation(): """Test format of github-annotation output.""" fpath = "test/fixtures/linter/identifier_capitalisation.sql" result = invoke_assert_code( args=[ lint, ( fpath, "--format", "github-annotation", "--annotation-level", "warning", "--disable-progress-bar", ), ], ret_code=1, ) result = json.loads(result.output) assert result == [ { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "line": 2, "message": "RF02: Unqualified reference 'foo' found in select with more " "than one referenced table/view.", "start_column": 5, "end_column": 5, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "line": 3, "message": "LT02: Expected indent of 8 spaces.", "start_column": 1, "end_column": 1, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "line": 3, "message": "AL02: Implicit/explicit aliasing of columns.", "start_column": 5, "end_column": 5, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "line": 3, "message": "CP02: Unquoted identifiers must be consistently lower case.", "start_column": 5, "end_column": 5, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "line": 4, "message": "CP01: Keywords must be consistently lower case.", "start_column": 1, "end_column": 1, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "line": 4, "message": "CP02: Unquoted identifiers must be consistently lower case.", "start_column": 12, "end_column": 12, "title": "SQLFluff", }, { "annotation_level": "warning", # Normalise paths to control for OS variance "file": os.path.normpath( "test/fixtures/linter/identifier_capitalisation.sql" ), "line": 4, "message": "CP02: Unquoted identifiers must be consistently lower case.", "start_column": 18, "end_column": 18, "title": "SQLFluff", }, ] def test__cli__command_lint_serialize_github_annotation_native(): """Test format of github-annotation output.""" fpath = "test/fixtures/linter/identifier_capitalisation.sql" # Normalise paths to control for OS variance fpath_normalised = os.path.normpath(fpath) result = invoke_assert_code( args=[ lint, ( fpath, "--format", "github-annotation-native", "--annotation-level", "error", "--disable-progress-bar", ), ], ret_code=1, ) assert result.output == "\n".join( [ f"::error title=SQLFluff,file={fpath_normalised},line=2,col=5::" "RF02: Unqualified reference 'foo' found in select with more than one " "referenced table/view. [references.qualification]", f"::error title=SQLFluff,file={fpath_normalised},line=3,col=1::" "LT02: Expected indent of 8 spaces. [layout.indent]", f"::error title=SQLFluff,file={fpath_normalised},line=3,col=5::" "AL02: Implicit/explicit aliasing of columns. [aliasing.column]", f"::error title=SQLFluff,file={fpath_normalised},line=3,col=5::" "CP02: Unquoted identifiers must be consistently lower case. " "[capitalisation.identifiers]", f"::error title=SQLFluff,file={fpath_normalised},line=4,col=1::" "CP01: Keywords must be consistently lower case. " "[capitalisation.keywords]", f"::error title=SQLFluff,file={fpath_normalised},line=4,col=12::" "CP02: Unquoted identifiers must be consistently lower case. " "[capitalisation.identifiers]", f"::error title=SQLFluff,file={fpath_normalised},line=4,col=18::" "CP02: Unquoted identifiers must be consistently lower case. " "[capitalisation.identifiers]", "", # SQLFluff produces trailing newline ] ) @pytest.mark.parametrize("serialize", ["github-annotation", "github-annotation-native"]) def test__cli__command_lint_serialize_annotation_level_error_failure_equivalent( serialize, ): """Test format of github-annotation output.""" fpath = "test/fixtures/linter/identifier_capitalisation.sql" result_error = invoke_assert_code( args=[ lint, ( fpath, "--format", serialize, "--annotation-level", "error", "--disable-progress-bar", ), ], ret_code=1, ) result_failure = invoke_assert_code( args=[ lint, ( fpath, "--format", serialize, "--annotation-level", "failure", "--disable-progress-bar", ), ], ret_code=1, ) assert result_error.output == result_failure.output def test___main___help(): """Test that the CLI can be access via __main__.""" # nonzero exit is good enough subprocess.check_output( [sys.executable, "-m", "sqlfluff", "--help"], env=os.environ ) @pytest.mark.parametrize( "encoding_in,encoding_out", [ ("utf-8", "ascii"), # chardet will detect ascii as a subset of utf-8 ("utf-8-sig", "UTF-8-SIG"), ("utf-32", "UTF-32"), ], ) def test_encoding(encoding_in, encoding_out): """Check the encoding of the test file remains the same after fix is applied.""" with open("test/fixtures/linter/indentation_errors.sql", "r") as testfile: generic_roundtrip_test( testfile, "LT01", input_file_encoding=encoding_in, output_file_encoding=encoding_out, ) @pytest.mark.parametrize( "encoding,method,expect_success", [ ("utf-8", "command-line", False), ("utf-8-SIG", "command-line", True), ("utf-8", "config-file", False), ("utf-8-SIG", "config-file", True), ], ) def test_cli_encoding(encoding, method, expect_success, tmpdir): """Try loading a utf-8-SIG encoded file using the correct encoding via the cli.""" sql_path = "test/fixtures/cli/encoding_test.sql" if method == "command-line": options = [sql_path, "--encoding", encoding] else: assert method == "config-file" with open(str(tmpdir / ".sqlfluff"), "w") as f: print(f"[sqlfluff]\ndialect=ansi\nencoding = {encoding}", file=f) shutil.copy(sql_path, tmpdir) options = [str(tmpdir / "encoding_test.sql")] result = invoke_assert_code( ret_code=1, args=[ lint, options, ], ) raw_output = repr(result.output) # Incorrect encoding raises parsing and lexer errors. success1 = r"L: 1 | P: 1 | LXR |" not in raw_output success2 = r"L: 1 | P: 1 | PRS |" not in raw_output assert success1 == expect_success assert success2 == expect_success def test_cli_no_disable_noqa_flag(): """Test that unset --disable-noqa flag respects inline noqa comments.""" invoke_assert_code( ret_code=0, args=[ lint, ["test/fixtures/cli/disable_noqa_test.sql"], ], ) def test_cli_disable_noqa_flag(): """Test that --disable-noqa flag ignores inline noqa comments.""" result = invoke_assert_code( ret_code=1, args=[ lint, [ "test/fixtures/cli/disable_noqa_test.sql", "--disable-noqa", ], ], ) raw_output = repr(result.output) # Linting error is raised even though it is inline ignored. assert r"L: 6 | P: 11 | CP01 |" in raw_output def test_cli_warn_unused_noqa_flag(): """Test that --warn-unused-ignores flag works.""" result = invoke_assert_code( # Return value should still be success. ret_code=0, args=[ lint, [ "test/fixtures/cli/disable_noqa_test.sql", "--warn-unused-ignores", ], ], ) raw_output = repr(result.output) # Warning shown. assert r"L: 5 | P: 18 | NOQA | WARNING: Unused noqa: 'noqa: CP01'" in raw_output def test_cli_get_default_config(): """`nocolor` and `verbose` values loaded from config if not specified via CLI.""" config = get_config( "test/fixtures/config/toml/pyproject.toml", True, nocolor=None, verbose=None, require_dialect=False, ) assert config.get("nocolor") is True assert config.get("verbose") == 2 @patch( "sqlfluff.core.linter.linter.progress_bar_configuration", disable_progress_bar=False, ) class TestProgressBars: """Progress bars test cases. The tqdm package, used for handling progress bars, is able to tell when it is used in a not tty terminal (when `disable` is set to None). In such cases, it just does not render anything. To suppress that for testing purposes, we need to set implicitly that we don't want to disable it. Probably it would be better - cleaner - just to patch `isatty` at some point, but I didn't find a way how to do that properly. """ def test_cli_lint_disabled_progress_bar( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is disabled, nothing should be printed into output.""" result = invoke_assert_code( args=[ lint, [ "--disable-progress-bar", "test/fixtures/linter/passing.sql", ], ], ) raw_output = repr(result.output) assert "\rpath test/fixtures/linter/passing.sql:" not in raw_output assert "\rparsing: 0it" not in raw_output assert "\r\rlint by rules:" not in raw_output def test_cli_lint_disabled_progress_bar_deprecated_option( self, mock_disable_progress_bar: MagicMock ) -> None: """Same as above but checks additionally if deprecation warning is printed.""" result = invoke_assert_code( args=[ lint, [ "--disable_progress_bar", "test/fixtures/linter/passing.sql", ], ], ) raw_output = repr(result.output) assert "\rpath test/fixtures/linter/passing.sql:" not in raw_output assert "\rparsing: 0it" not in raw_output assert "\r\rlint by rules:" not in raw_output assert ( "DeprecationWarning: The option '--disable_progress_bar' is deprecated, " "use '--disable-progress-bar'" ) in raw_output def test_cli_lint_enabled_progress_bar( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is enabled, there should be some tracks in output.""" result = invoke_assert_code( args=[ lint, [ "test/fixtures/linter/passing.sql", ], ], ) raw_output = repr(result.output) assert r"\rlint by rules:" in raw_output assert r"\rrule LT01:" in raw_output assert r"\rrule CV05:" in raw_output def test_cli_lint_enabled_progress_bar_multiple_paths( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is enabled, there should be some tracks in output.""" result = invoke_assert_code( ret_code=1, args=[ lint, [ "test/fixtures/linter/passing.sql", "test/fixtures/linter/indentation_errors.sql", ], ], ) raw_output = repr(result.output) sep = os.sep if sys.platform == "win32": sep *= 2 assert ( r"\rfile test/fixtures/linter/passing.sql:".replace("/", sep) in raw_output ) assert ( r"\rfile test/fixtures/linter/indentation_errors.sql:".replace("/", sep) in raw_output ) assert r"\rlint by rules:" in raw_output assert r"\rrule LT01:" in raw_output assert r"\rrule CV05:" in raw_output def test_cli_lint_enabled_progress_bar_multiple_files( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is enabled, there should be some tracks in output.""" result = invoke_assert_code( args=[ lint, [ "test/fixtures/linter/multiple_files", ], ], ) raw_output = repr(result.output) sep = os.sep if sys.platform == "win32": sep *= 2 assert ( r"\rfile test/fixtures/linter/multiple_files/passing.1.sql:".replace( "/", sep ) in raw_output ) assert ( r"\rfile test/fixtures/linter/multiple_files/passing.2.sql:".replace( "/", sep ) in raw_output ) assert ( r"\rfile test/fixtures/linter/multiple_files/passing.3.sql:".replace( "/", sep ) in raw_output ) assert r"\rlint by rules:" in raw_output assert r"\rrule LT01:" in raw_output assert r"\rrule CV05:" in raw_output def test_cli_fix_disabled_progress_bar( self, mock_disable_progress_bar: MagicMock ) -> None: """When progress bar is disabled, nothing should be printed into output.""" result = invoke_assert_code( args=[ fix, [ "--disable-progress-bar", "test/fixtures/linter/passing.sql", ], ], ) raw_output = repr(result.output) assert ( "DeprecationWarning: The option '--disable_progress_bar' is deprecated, " "use '--disable-progress-bar'" ) not in raw_output def test_cli_fix_disabled_progress_bar_deprecated_option( self, mock_disable_progress_bar: MagicMock ) -> None: """Same as above but checks additionally if deprecation warning is printed.""" result = invoke_assert_code( args=[ fix, [ "--disable_progress_bar", "test/fixtures/linter/passing.sql", ], ], ) raw_output = repr(result.output) assert ( "DeprecationWarning: The option '--disable_progress_bar' is deprecated, " "use '--disable-progress-bar'" ) in raw_output multiple_expected_output = """==== finding fixable violations ==== == [test/fixtures/linter/multiple_sql_errors.sql] FAIL L: 12 | P: 1 | LT02 | Expected indent of 4 spaces. [layout.indent] L: 40 | P: 10 | ST09 | Joins should list the table referenced earlier first. | [structure.join_condition_order] ==== fixing violations ==== 2 fixable linting violations found Are you sure you wish to attempt to fix these? [Y/n] ... Invalid input, please enter 'Y' or 'N' Aborting... [4 unfixable linting violations found] """ def test__cli__fix_multiple_errors_no_show_errors(): """Test the fix output.""" result = invoke_assert_code( ret_code=1, args=[ fix, [ "--disable-progress-bar", "test/fixtures/linter/multiple_sql_errors.sql", ], ], ) # We should get a readout of what the error was check_a = "4 unfixable linting violations found" assert check_a in result.output # Finally check the WHOLE output to make sure that unexpected newlines are not # added. The replace command just accounts for cross platform testing. assert result.output.replace("\\", "/").startswith(multiple_expected_output) def test__cli__fix_multiple_errors_quiet_force(): """Test the fix --quiet option with --force.""" result = invoke_assert_code( ret_code=0, args=[ fix, [ "--disable-progress-bar", "test/fixtures/linter/multiple_sql_errors.sql", "--force", "--quiet", "-x", "_fix", ], ], ) normalised_output = result.output.replace("\\", "/") assert normalised_output.startswith( """== [test/fixtures/linter/multiple_sql_errors.sql] FIXED 2 fixable linting violations found""" ) def test__cli__fix_multiple_errors_quiet_no_force(): """Test the fix --quiet option without --force.""" result = invoke_assert_code( ret_code=0, args=[ fix, [ "--disable-progress-bar", "test/fixtures/linter/multiple_sql_errors.sql", "--quiet", "-x", "_fix", ], # Test with the confirmation step. "y", ], ) normalised_output = result.output.replace("\\", "/") assert normalised_output.startswith( """2 fixable linting violations found Are you sure you wish to attempt to fix these? [Y/n] ... == [test/fixtures/linter/multiple_sql_errors.sql] FIXED All Finished""" ) def test__cli__fix_multiple_errors_show_errors(): """Test the fix --show-lint-violations option.""" result = invoke_assert_code( ret_code=1, args=[ fix, [ "--disable-progress-bar", "--show-lint-violations", "test/fixtures/linter/multiple_sql_errors.sql", ], ], ) # We should get a readout of what the error was check_a = "4 unfixable linting violations found" assert check_a in result.output # Finally check the WHOLE output to make sure that unexpected newlines are not # added. The replace command just accounts for cross platform testing. assert "L: 12 | P: 1 | LT02 | Expected indent of 4 spaces." in result.output assert ( "L: 36 | P: 9 | RF02 | Unqualified reference 'package_id' found in " "select with more than" in result.output ) assert ( "L: 45 | P: 17 | RF02 | Unqualified reference 'owner_type' found in " "select with more than" in result.output ) assert ( "L: 45 | P: 50 | RF02 | Unqualified reference 'app_key' found in " "select with more than one" in result.output ) assert ( "L: 42 | P: 45 | RF02 | Unqualified reference 'owner_id' found in " "select with more than" in result.output ) def test__cli__multiple_files__fix_multiple_errors_show_errors(): """Basic check of lint ensures with multiple files, filenames are listed.""" sql_path = "test/fixtures/linter/multiple_sql_errors.sql" indent_path = "test/fixtures/linter/indentation_errors.sql" result = invoke_assert_code( ret_code=1, args=[ fix, [ "--disable-progress-bar", "--show-lint-violations", sql_path, indent_path, ], ], ) unfixable_error_msg = "==== lint for unfixable violations ====" assert unfixable_error_msg in result.output indent_pass_msg = f"== [{os.path.normpath(indent_path)}] PASS" multi_fail_msg = f"== [{os.path.normpath(sql_path)}] FAIL" unfix_err_log = result.output[result.output.index(unfixable_error_msg) :] assert indent_pass_msg in unfix_err_log assert multi_fail_msg in unfix_err_log # Assert that they are sorted in alphabetical order assert unfix_err_log.index(indent_pass_msg) < unfix_err_log.index(multi_fail_msg) def test__cli__render_fail(): """Basic how render fails.""" expected_render_output = ( "L: 3 | P: 8 | TMP | Undefined jinja template " "variable: 'something'" ) result = invoke_assert_code( ret_code=1, args=[ render, [ "test/fixtures/cli/fail_many.sql", ], ], ) # Check whole output. The replace command just accounts for # cross platform testing. assert result.output.replace("\\", "/").startswith(expected_render_output) def test__cli__render_pass(): """Basic how render works.""" expected_render_output = "SELECT 56 FROM sch1.tbl2" result = invoke_assert_code( ret_code=0, args=[ render, [ "test/fixtures/templater/jinja_a/jinja.sql", ], ], ) # Check whole output. The replace command just accounts for # cross platform testing. assert result.output.replace("\\", "/").startswith(expected_render_output) sqlfluff-2.3.5/test/cli/formatters_test.py000066400000000000000000000076071451700765000207070ustar00rootroot00000000000000"""The Test file for CLI Formatters.""" import pathlib import re import textwrap import pytest from sqlfluff.cli.commands import fix from sqlfluff.cli.formatters import OutputStreamFormatter from sqlfluff.cli.outputstream import FileOutput from sqlfluff.core import FluffConfig from sqlfluff.core.enums import Color from sqlfluff.core.errors import SQLLintError from sqlfluff.core.parser import RawSegment from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.rules import RuleGhost from sqlfluff.core.templaters.base import TemplatedFile def escape_ansi(line): """Remove ANSI color codes for testing.""" ansi_escape = re.compile("\u001b\\[[0-9]+(;[0-9]+)?m") return ansi_escape.sub("", line) def test__cli__formatters__filename_nocol(tmpdir): """Test formatting filenames.""" formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) res = formatter.format_filename("blahblah", success=True) assert escape_ansi(res) == "== [blahblah] PASS" def test__cli__formatters__violation(tmpdir): """Test formatting violations. NB Position is 1 + start_pos. """ s = RawSegment( "foobarbar", PositionMarker( slice(10, 19), slice(10, 19), TemplatedFile.from_string(" \n\n foobarbar"), ), ) r = RuleGhost("A", "some-name", "DESC") v = SQLLintError(description=r.description, segment=s, rule=r) formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) f = formatter.format_violation(v) # Position is 3, 3 because foobarbar is on the third # line (i.e. it has two newlines preceding it) and # it's at the third position in that line (i.e. there # are two characters between it and the preceding # newline). assert escape_ansi(f) == "L: 3 | P: 3 | A | DESC [some-name]" def test__cli__helpers__colorize(tmpdir): """Test ANSI colouring.""" formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) # Force color output for this test. formatter.plain_output = False assert formatter.colorize("foo", Color.red) == "\u001b[31mfoo\u001b[0m" def test__cli__helpers__cli_table(tmpdir): """Test making tables.""" vals = [("a", 3), ("b", "c"), ("d", 4.7654), ("e", 9)] formatter = OutputStreamFormatter( FileOutput(FluffConfig(require_dialect=False), str(tmpdir / "out.txt")), False ) txt = formatter.cli_table(vals, col_width=7, divider_char="|", label_color=None) # NB: No trailing newline assert txt == "a: 3|b: c\nd: 4.77|e: 9" @pytest.mark.parametrize( "sql,fix_args,expected", [ ( ( "CREATE TABLE IF NOT EXISTS vuln.software_name_dictionary(" "id SERIAL PRIMARY KEY" "rule VARCHAR(30)" ");" ), ["--force", "--dialect", "postgres", "--disable_progress_bar", "--nocolor"], ( "CREATE TABLE IF NOT EXISTS vuln.software_name_dictionary(" "id SERIAL PRIMARY KEY" "rule VARCHAR(30)" ");" ), ) ], ) def test__cli__fix_no_corrupt_file_contents(sql, fix_args, expected, tmpdir): """Test how the fix cli command creates files. Ensure there is no incorrect output from stderr that makes it to the file. """ tmp_path = pathlib.Path(str(tmpdir)) filepath = tmp_path / "testing.sql" filepath.write_text(textwrap.dedent(sql)) with tmpdir.as_cwd(): with pytest.raises(SystemExit): fix(fix_args) with open(tmp_path / "testing.sql", "r") as fin: actual = fin.read() # Ensure no corruption in formatted file assert actual.strip() == expected.strip() sqlfluff-2.3.5/test/cli/helpers_test.py000066400000000000000000000043531451700765000201560ustar00rootroot00000000000000"""The Test file for CLI helpers.""" import pytest from sqlfluff.cli.helpers import LazySequence, pad_line, wrap_elem, wrap_field @pytest.mark.parametrize( "in_str,length,res", [ ("abc", 5, ["abc"]), # Space wrap test ("how now brown cow", 10, ["how now", "brown cow"]), # Harder wrap test ("A hippopotamus came for tea", 10, ["A hippopot", "amus came", "for tea"]), # Harder wrap test, with a newline. ("A hippopotamus\ncame for tea", 10, ["A hippopot", "amus came", "for tea"]), ], ) def test__cli__helpers__wrap_elem(in_str, length, res): """Test wrapping.""" str_list = wrap_elem(in_str, length) assert str_list == res def test__cli__helpers__wrap_field_a(): """Test simple wrapping.""" dct = wrap_field("abc", "How Now Brown Cow", width=40) assert dct["label_list"] == ["abc"] assert dct["val_list"] == ["How Now Brown Cow"] assert "sep_char" in dct assert dct["lines"] == 1 assert dct["label_width"] == 3 def test__cli__helpers__wrap_field_b(): """Test simple wrapping with overlap avoidance.""" dct = wrap_field("abc", "How Now Brown Cow", width=23) assert dct["label_list"] == ["abc"] assert dct["val_list"] == ["How Now Brown Cow"] assert dct["label_width"] == 3 def test__cli__helpers__wrap_field_c(): """Test simple wrapping.""" dct = wrap_field("how now brn cow", "How Now Brown Cow", width=25) assert dct["label_list"] == ["how now", "brn cow"] assert dct["label_width"] == 7 assert dct["val_list"] == ["How Now Brown", "Cow"] assert dct["lines"] == 2 def test__cli__helpers__pad_line(): """Test line padding.""" assert pad_line("abc", 5) == "abc " assert pad_line("abcdef", 10, align="right") == " abcdef" def test_cli__helpers__lazy_sequence(): """Test the LazySequence.""" getter_run = False def _get_sequence(): nonlocal getter_run getter_run = True return [1, 2, 3] seq = LazySequence(_get_sequence) # Check the sequence isn't called on instantiation. assert not getter_run # Fetch an item... assert seq[2] == 3 # .. and that now it has run. assert getter_run # Check other methods work assert len(seq) == 3 sqlfluff-2.3.5/test/cli/test_click_deprecated_option.py000066400000000000000000000040711451700765000233460ustar00rootroot00000000000000"""The Test suite for `DeprecatedOption` - extension for click options.""" from typing import List import click import pytest from sqlfluff.cli.click_deprecated_option import ( DeprecatedOption, DeprecatedOptionsCommand, ) from test.cli.commands_test import invoke_assert_code class TestClickDeprecatedOption: """Tests for custom click's option `DeprecatedOption`.""" @pytest.mark.parametrize( "option, expected_output", [ ([], "{'old_option': False}\n"), ( ["--old_option"], "DeprecationWarning: The option '--old_option' is deprecated, " "use '--new_option'.\n{'old_option': True}\n", ), (["--new_option"], "{'old_option': True}\n"), ], ) def test_cli_deprecated_option( self, option: List[str], expected_output: str ) -> None: """Prepares command with option which has deprecated version and checks it.""" @click.command(cls=DeprecatedOptionsCommand) @click.option( "--old_option", "--new_option", is_flag=True, cls=DeprecatedOption, deprecated=["--old_option"], ) def some_command(**kwargs): click.echo("{}".format(kwargs)) result = invoke_assert_code(args=[some_command, option]) raw_output = result.output assert raw_output == expected_output def test_cli_deprecated_option_should_fail_when_missing_attr( self, ) -> None: """The DeprecatedOption needs to have specified deprecated attr.""" @click.command(cls=DeprecatedOptionsCommand) @click.option( "--old_option", "--new_option", is_flag=True, cls=DeprecatedOption, ) def some_command(**kwargs): click.echo("{}".format(kwargs)) with pytest.raises(ValueError) as exc: invoke_assert_code(args=[some_command, ["--old_option"]]) assert str(exc.value) == "Expected `deprecated` value for `'old_option'`" sqlfluff-2.3.5/test/conftest.py000066400000000000000000000230051451700765000165260ustar00rootroot00000000000000"""Common Test Fixtures.""" import hashlib import io import os from typing import List, NamedTuple, Tuple import pytest import yaml from sqlfluff.cli.commands import quoted_presenter from sqlfluff.core import FluffConfig from sqlfluff.core.linter import Linter from sqlfluff.core.parser import Lexer, Parser from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments import ( BaseSegment, CodeSegment, CommentSegment, Dedent, Indent, NewlineSegment, SymbolSegment, WhitespaceSegment, ) from sqlfluff.core.rules import BaseRule from sqlfluff.core.templaters import TemplatedFile # When writing YAML files, double quotes string values needing escapes. yaml.add_representer(str, quoted_presenter) class ParseExample(NamedTuple): """A tuple representing an example SQL file to parse.""" dialect: str sqlfile: str def get_parse_fixtures( fail_on_missing_yml=False, ) -> Tuple[List[ParseExample], List[Tuple[str, str, bool, str]]]: """Search for all parsing fixtures.""" parse_success_examples = [] parse_structure_examples = [] # Generate the filenames for each dialect from the parser test directory for d in os.listdir(os.path.join("test", "fixtures", "dialects")): # Ignore documentation if d.endswith(".md"): continue # assume that d is now the name of a dialect dirlist = os.listdir(os.path.join("test", "fixtures", "dialects", d)) for f in dirlist: has_yml = False if f.endswith(".sql"): root = f[:-4] # only look for sql files parse_success_examples.append(ParseExample(d, f)) # Look for the code_only version of the structure y = root + ".yml" if y in dirlist: parse_structure_examples.append((d, f, True, y)) has_yml = True # Look for the non-code included version of the structure y = root + "_nc.yml" if y in dirlist: parse_structure_examples.append((d, f, False, y)) has_yml = True if not has_yml and fail_on_missing_yml: raise ( Exception( f"Missing .yml file for {os.path.join(d, f)}. Run the " "test/generate_parse_fixture_yml.py script!" ) ) return parse_success_examples, parse_structure_examples def make_dialect_path(dialect, fname): """Work out how to find paths given a dialect and a file name.""" return os.path.join("test", "fixtures", "dialects", dialect, fname) def load_file(dialect, fname): """Load a file.""" with open(make_dialect_path(dialect, fname)) as f: raw = f.read() return raw def process_struct(obj): """Process a nested dict or dict-like into a check tuple.""" if isinstance(obj, dict): return tuple((k, process_struct(obj[k])) for k in obj) elif isinstance(obj, list): # If empty list, return empty tuple if not len(obj): return tuple() # We'll assume that it's a list of dicts if isinstance(obj[0], dict): buff = [process_struct(elem) for elem in obj] if any(len(elem) > 1 for elem in buff): raise ValueError(f"Not sure how to deal with multi key dict: {buff!r}") return tuple(elem[0] for elem in buff) else: raise TypeError(f"Did not expect a list of {type(obj[0])}: {obj[0]!r}") elif isinstance(obj, (str, int, float)): return str(obj) elif obj is None: return None else: raise TypeError(f"Not sure how to deal with type {type(obj)}: {obj!r}") def parse_example_file(dialect: str, sqlfile: str): """Parse example SQL file, return parse tree.""" config = FluffConfig(overrides=dict(dialect=dialect)) # Load the SQL raw = load_file(dialect, sqlfile) # Lex and parse the file tokens, _ = Lexer(config=config).lex(raw) tree = Parser(config=config).parse(tokens, fname=dialect + "/" + sqlfile) return tree def compute_parse_tree_hash(tree): """Given a parse tree, compute a consistent hash value for it.""" if tree: r = tree.as_record(code_only=True, show_raw=True) if r: r_io = io.StringIO() yaml.dump(r, r_io, sort_keys=False) result = hashlib.blake2s(r_io.getvalue().encode("utf-8")).hexdigest() return result return None def load_yaml(fpath): """Load a yaml structure and process it into a tuple.""" # Load raw file with open(fpath) as f: raw = f.read() # Parse the yaml obj = yaml.safe_load(raw) # Return the parsed and structured object _hash = None if obj: _hash = obj.pop("_hash", None) processed = process_struct(obj) if processed: return _hash, process_struct(obj)[0] else: return None, None @pytest.fixture() def yaml_loader(): """Return a yaml loading function.""" # Return a function return load_yaml def _generate_test_segments_func(elems): """Roughly generate test segments. This function isn't totally robust, but good enough for testing. Use with caution. """ buff = [] raw_file = "".join(elems) templated_file = TemplatedFile.from_string(raw_file) idx = 0 for elem in elems: if elem == "": buff.append( Indent(pos_marker=PositionMarker.from_point(idx, idx, templated_file)) ) continue elif elem == "": buff.append( Dedent(pos_marker=PositionMarker.from_point(idx, idx, templated_file)) ) continue seg_kwargs = {} if set(elem) <= {" ", "\t"}: SegClass = WhitespaceSegment elif set(elem) <= {"\n"}: SegClass = NewlineSegment elif elem == "(": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("start_bracket",)} elif elem == ")": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("end_bracket",)} elif elem == "[": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("start_square_bracket",)} elif elem == "]": SegClass = SymbolSegment seg_kwargs = {"instance_types": ("end_square_bracket",)} elif elem.startswith("--"): SegClass = CommentSegment seg_kwargs = {"instance_types": ("inline_comment",)} elif elem.startswith('"'): SegClass = CodeSegment seg_kwargs = {"instance_types": ("double_quote",)} elif elem.startswith("'"): SegClass = CodeSegment seg_kwargs = {"instance_types": ("single_quote",)} else: SegClass = CodeSegment # Set a none position marker which we'll realign at the end. buff.append( SegClass( raw=elem, pos_marker=PositionMarker( slice(idx, idx + len(elem)), slice(idx, idx + len(elem)), templated_file, ), **seg_kwargs, ) ) idx += len(elem) return tuple(buff) @pytest.fixture(scope="module") def generate_test_segments(): """Roughly generate test segments. This is a factory function so that it works as a fixture, but when actually used, this will return the inner function which is what you actually need. """ return _generate_test_segments_func @pytest.fixture def raise_critical_errors_after_fix(monkeypatch): """Raises errors that break the Fix process. These errors are otherwise swallowed to allow the lint messages to reach the end user. """ @staticmethod def _log_critical_errors(error: Exception): raise error monkeypatch.setattr(BaseRule, "_log_critical_errors", _log_critical_errors) @pytest.fixture(autouse=True) def fail_on_parse_error_after_fix(monkeypatch): """Cause tests to fail if a lint fix introduces a parse error. In production, we have a couple of functions that, upon detecting a bug in a lint rule, just log a warning. To catch bugs in new or modified rules, we want to be more strict during dev and CI/CD testing. Here, we patch in different functions which raise runtime errors, causing tests to fail if this happens. """ @staticmethod def raise_error_apply_fixes_check_issue(message, *args): # pragma: no cover raise ValueError(message % args) @staticmethod def raise_error_conflicting_fixes_same_anchor(message: str): # pragma: no cover raise ValueError(message) monkeypatch.setattr( BaseSegment, "_log_apply_fixes_check_issue", raise_error_apply_fixes_check_issue ) monkeypatch.setattr( Linter, "_report_conflicting_fixes_same_anchor", raise_error_conflicting_fixes_same_anchor, ) @pytest.fixture(autouse=True) def test_verbosity_level(request): """Report the verbosity level for a given pytest run. For example: $ pytest -vv Has a verbosity level of 2 While: $ pytest Has a verbosity level of 0 """ return request.config.getoption("verbose") def pytest_addoption(parser): """Allow to run test/rules/yaml_test_cases_test.py for a specific rule_id.""" parser.addoption("--rule_id", action="store", default="*", help="Rule id to run") sqlfluff-2.3.5/test/core/000077500000000000000000000000001451700765000152575ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/__init__.py000066400000000000000000000000421451700765000173640ustar00rootroot00000000000000"""Tests for the core library.""" sqlfluff-2.3.5/test/core/config_test.py000066400000000000000000000472371451700765000201520ustar00rootroot00000000000000"""Tests for the configuration routines.""" import logging import os import sys from pathlib import Path from unittest.mock import call, patch import appdirs import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.config import ( REMOVED_CONFIGS, ConfigLoader, ) from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.templaters import ( JinjaTemplater, PlaceholderTemplater, PythonTemplater, RawTemplater, ) from sqlfluff.utils.testing.logging import fluff_log_catcher config_a = { "core": {"testing_val": "foobar", "testing_int": 4, "dialect": "mysql"}, "bar": {"foo": "barbar"}, } config_b = { "core": {"rules": "LT03", "dialect": "ansi"}, "layout": { "type": {"comma": {"line_position": "trailing", "spacing_before": "touch"}} }, } config_c = { "core": {"rules": "LT03", "dialect": "ansi"}, # NOTE: # - NOT_A_RULE doesn't match anything. # - L001 is an alias, but no longer a rule. # - layout is a group and but doesn't match any individual rule. "rules": { "NOT_A_RULE": {"foo": "bar"}, "L001": {"foo": "bar"}, "layout": {"foo": "bar"}, }, } @pytest.fixture def mock_xdg_home(monkeypatch): """Sets the XDG_CONFIG_HOME variable.""" monkeypatch.setenv("XDG_CONFIG_HOME", "~/.config/my/special/path") def test__config__load_file_dir(): """Test loading config from a directory path.""" c = ConfigLoader() cfg = c.load_config_at_path( os.path.join("test", "fixtures", "config", "inheritance_a") ) assert cfg == config_a def test__config__load_from_string(): """Test loading config from a string.""" c = ConfigLoader() # Load a string with open( os.path.join("test", "fixtures", "config", "inheritance_a", ".sqlfluff") ) as f: config_string = f.read() cfg = c.load_config_string(config_string) assert cfg == config_a def test__config__from_strings(): """Test loading config from multiple strings.""" strings = [ "[sqlfluff]\ndialect=mysql\ntesting_val=foobar", "[sqlfluff]\ndialect=postgres\ntesting_val2=bar", "[sqlfluff]\ndialect=mysql\ntesting_val=foo", ] cfg = FluffConfig.from_strings(*strings) assert cfg.get("dialect") == "mysql" assert cfg.get("testing_val2") == "bar" assert cfg.get("testing_val") == "foo" def test__config__load_file_f(): """Test loading config from a file path.""" c = ConfigLoader() cfg = c.load_config_at_path( os.path.join("test", "fixtures", "config", "inheritance_a", "testing.sql") ) assert cfg == config_a def test__config__load_nested(): """Test nested overwrite and order of precedence of config files.""" c = ConfigLoader() cfg = c.load_config_up_to_path( os.path.join( "test", "fixtures", "config", "inheritance_a", "nested", "blah.sql" ) ) assert cfg == { "core": { "dialect": "mysql", "testing_val": "foobar", "testing_int": 1, "testing_bar": 7.698, }, "bar": {"foo": "foobar"}, "fnarr": {"fnarr": {"foo": "foobar"}}, } def test__config__iter_config_elems_from_dict(): """Test nested overwrite and order of precedence of config files.""" c = ConfigLoader._iter_config_elems_from_dict( {"a": {"b": {"c": 123, "d": 456}, "f": 6}} ) assert list(c) == [ (("a", "b", "c"), 123), (("a", "b", "d"), 456), (("a", "f"), 6), ] def test__config__load_toml(): """Test loading config from a pyproject.toml file.""" c = ConfigLoader() cfg = c.load_config_file( os.path.join("test", "fixtures", "config", "toml"), "pyproject.toml", ) assert cfg == { "core": { "nocolor": True, "verbose": 2, "testing_int": 5, "testing_bar": 7.698, "testing_bool": False, "testing_arr": ["a", "b", "c"], "rules": ["LT03", "LT09"], "testing_inline_table": {"x": 1}, }, "bar": {"foo": "foobar"}, "fnarr": {"fnarr": {"foo": "foobar"}}, "rules": {"capitalisation.keywords": {"capitalisation_policy": "upper"}}, } def test__config__load_placeholder_cfg(): """Test loading a sqlfluff configuration file for placeholder templater.""" c = ConfigLoader() cfg = c.load_config_file( os.path.join("test", "fixtures", "config", "placeholder"), ".sqlfluff-placeholder", ) assert cfg == { "core": { "testing_val": "foobar", "testing_int": 4, }, "bar": {"foo": "barbar"}, "templater": { "placeholder": { "param_style": "flyway_var", "flyway:database": "test_db", } }, } def test__config__iter_config_paths_right_order(): """Test that config paths are fetched ordered by priority.""" c = ConfigLoader() cfg_paths = c.iter_config_locations_up_to_path( os.path.join( "test", "fixtures", "config", "inheritance_a", "nested", "blah.sql" ), working_path="test/fixtures", ) assert list(cfg_paths) == [ str(Path(p).resolve()) for p in [ "test/fixtures", "test/fixtures/config", "test/fixtures/config/inheritance_a", "test/fixtures/config/inheritance_a/nested", ] ] def test__config__find_sqlfluffignore_in_same_directory(): """Test find ignore file in the same directory as sql file.""" ignore_files = ConfigLoader.find_ignore_config_files( path="test/fixtures/linter/sqlfluffignore/path_b/query_b.sql", working_path="test/fixtures/linter/sqlfluffignore/", ) assert ignore_files == { os.path.abspath("test/fixtures/linter/sqlfluffignore/path_b/.sqlfluffignore"), os.path.abspath("test/fixtures/linter/sqlfluffignore/.sqlfluffignore"), } def test__config__nested_config_tests(): """Test linting with overridden config in nested paths. This looks like a linter test but it's actually a config test. """ lntr = Linter( # Exclude CP02 in overrides (similar to cli --exclude-rules) config=FluffConfig(overrides=dict(exclude_rules="CP02", dialect="ansi")) ) lnt = lntr.lint_path("test/fixtures/config/inheritance_b") violations = lnt.check_tuples(by_path=True) for k in violations: if k.endswith("nested\\example.sql"): # CP01 is enabled in the .sqlfluff file and not excluded. assert ("CP01", 1, 4) in violations[k] # LT02 is enabled in the .sqlfluff file and not excluded. assert ("LT02", 1, 1) in violations[k] # CP02 is enabled in the .sqlfluff file but excluded by the # override above. assert "CP02" not in [c[0] for c in violations[k]] elif k.endswith("inheritance_b\\example.sql"): # CP01 is enabled because while disabled in the tox.ini file, # the exclude-rules option is overridden by the override above # which effectively sets the exclude to CP02 and in effect # re-enables CP01. # This may seem counter-intuitive but is in line with current # documentation on how to use `rules` and `exclude-rules`. # https://docs.sqlfluff.com/en/latest/configuration.html#enabling-and-disabling-rules assert ("CP01", 1, 4) in violations[k] # CP02 is disabled because of the override above. assert "CP02" not in [c[0] for c in violations[k]] # LT02 is disabled because it is not in the `rules` of tox.ini assert "LT02" not in [c[0] for c in violations[k]] @patch("os.path.exists") @patch("os.listdir") @pytest.mark.skipif(sys.platform == "win32", reason="Not applicable on Windows") def test__config__load_user_appdir_config( mock_listdir, mock_path_exists, mock_xdg_home ): """Test loading config from user appdir.""" xdg_config_path = os.environ.get("XDG_CONFIG_HOME") + "/sqlfluff" def path_exists(x): if x == os.path.expanduser("~/.config/sqlfluff"): return False if x == xdg_config_path: return False else: return True mock_path_exists.side_effect = path_exists c = ConfigLoader() with patch.object(appdirs, attribute="system", new="darwin"): resolved_path = c._get_user_config_dir_path() c.load_user_appdir_config() assert resolved_path == os.path.expanduser("~/Library/Application Support/sqlfluff") mock_path_exists.assert_has_calls( [ call(xdg_config_path), call(os.path.expanduser("~/Library/Application Support/sqlfluff")), ] ) def test__config__templater_selection(): """Test template selection by name.""" cfg = FluffConfig(overrides={"dialect": "ansi"}) assert cfg.get_templater().__class__ is JinjaTemplater assert cfg.get_templater("raw").__class__ is RawTemplater assert cfg.get_templater("python").__class__ is PythonTemplater assert cfg.get_templater("jinja").__class__ is JinjaTemplater assert cfg.get_templater("placeholder").__class__ is PlaceholderTemplater with pytest.raises(ValueError): cfg.get_templater("afefhlsakufe") def test__config__glob_exclude_config_tests(): """Test linting with a glob pattern in exclude_rules. This looks like a linter test but it's actually a config test. """ lntr = Linter(config=FluffConfig.from_path("test/fixtures/config/glob_exclude")) lnt = lntr.lint_path("test/fixtures/config/glob_exclude/test.sql") violations = lnt.check_tuples(by_path=True) for k in violations: assert ("AM04", 12, 1) in violations[k] assert "RF02" not in [c[0] for c in violations[k]] assert "LT13" not in [c[0] for c in violations[k]] assert "AM05" not in [c[0] for c in violations[k]] assert "CV06" not in [c[0] for c in violations[k]] def test__config__glob_include_config_tests(): """Test linting with a glob pattern in rules. This looks like a linter test but it's actually a config test. """ lntr = Linter(config=FluffConfig.from_path("test/fixtures/config/glob_include")) lnt = lntr.lint_path("test/fixtures/config/glob_include/test.sql") violations = lnt.check_tuples(by_path=True) for k in violations: assert ("LT13", 1, 1) in violations[k] assert ("AM05", 14, 1) in violations[k] assert ("CV06", 14, 9) in violations[k] assert ("RF02", 12, 8) in violations[k] assert "AM04" not in [c[0] for c in violations[k]] def test__config__rules_set_to_none(): """Test linting when rules are set to 'None'. Ensure that all rules are still run. """ lntr = Linter( config=FluffConfig.from_path("test/fixtures/config/rules_set_to_none") ) lnt = lntr.lint_path("test/fixtures/config/rules_set_to_none/test.sql") violations = lnt.check_tuples(by_path=True) for k in violations: assert ("LT13", 1, 1) in violations[k] assert ("AM04", 12, 1) in violations[k] assert ("CP01", 12, 10) in violations[k] def test__config__rules_group_with_exclude(): """Test linting when a rules group is selected and rules are excluded.""" lntr = Linter( config=FluffConfig.from_path("test/fixtures/config/rules_group_with_exclude") ) lnt = lntr.lint_path("test/fixtures/config/rules_group_with_exclude/test.sql") violations = lnt.check_tuples(by_path=True) for k in violations: assert ("CP01", 15, 1) in violations[k] assert "LT04" not in [c[0] for c in violations[k]] def test__config__get_section(): """Test FluffConfig.get_section method.""" cfg = FluffConfig(config_b) assert cfg.get_section("core").get("rules", None) == "LT03" assert cfg.get_section(["layout", "type", "comma"]) == { "line_position": "trailing", "spacing_before": "touch", } assert cfg.get_section("non_existent") is None def test__config__get(): """Test FluffConfig.get method.""" cfg = FluffConfig(config_b) assert cfg.get("rules") == "LT03" assert cfg.get("rulez") is None assert cfg.get("rulez", section="core", default=123) == 123 assert ( cfg.get("line_position", section=["layout", "type", "comma"], default=None) == "trailing" ) assert ( cfg.get("line_position", section=["layout", "type", "ASDFSDG007"], default=None) is None ) def test__config__from_kwargs(): """Test from_kwargs method of FluffConfig.""" # Instantiate config object. cfg = FluffConfig.from_kwargs( dialect="snowflake", rules=["LT01", "LT02"], exclude_rules=["CP01", "AL01"], ) # Verify we can later retrieve the config values. assert cfg.get("dialect") == "snowflake" assert cfg.get("rules") == "LT01,LT02" assert cfg.get("exclude_rules") == "CP01,AL01" def test__config__from_string(): """Test from_string method of FluffConfig.""" with open( os.path.join("test", "fixtures", "config", "inheritance_a", ".sqlfluff") ) as f: config_string = f.read() cfg = FluffConfig.from_string(config_string) # Verify we can later retrieve the config values. assert cfg.get("testing_val") == "foobar" assert cfg.get("dialect") == "mysql" def test__config_missing_dialect(): """Verify an exception is thrown if no dialect was specified.""" with pytest.raises(SQLFluffUserError) as e: FluffConfig.from_kwargs() assert "must configure a dialect" in str(e.value) def test__config__validate_configs_direct(): """Test _validate_configs method of ConfigLoader directly.""" # Make sure there _are_ removed configs. assert REMOVED_CONFIGS # Make sure all raise an error if validated for k in REMOVED_CONFIGS: print(k) if k.translation_func and k.new_path: res = ConfigLoader._validate_configs([(k.old_path, "foo")], "") print(res) # Check that it's reassigned. assert not any(elem[0] == k.old_path for elem in res) assert any(elem[0] == k.new_path for elem in res) # Really we should check that it's output here, but logging config # seems to make that hard. else: with pytest.raises(SQLFluffUserError) as excinfo: ConfigLoader._validate_configs([(k.old_path, "foo")], "") assert "set an outdated config" in str(excinfo.value) assert k.warning in str(excinfo.value) def test__config__validate_configs_indirect(): """Test _validate_configs method of FluffConfig indirectly.""" # Instantiate config object. with pytest.raises(SQLFluffUserError): FluffConfig( configs={ "core": {"dialect": "ansi"}, # This is a known removed value. "rules": {"L003": {"lint_templated_tokens": True}}, } ) @pytest.mark.parametrize( "raw_sql", [ ( # "types" not "type" "-- sqlfluff:layout:types:comma:line_position:leading\n" "SELECT 1" ), ( # Unsupported layout config length "-- sqlfluff:layout:foo\n" "SELECT 1" ), ( # Unsupported layout config length "-- sqlfluff:layout:type:comma:bar\n" "SELECT 1" ), ( # Unsupported layout config key ("foo") "-- sqlfluff:layout:type:comma:foo:bar\n" "SELECT 1" ), ( # Unsupported layout config key ("foo") [no space] "--sqlfluff:layout:type:comma:foo:bar\n" "SELECT 1" ), ], ) def test__config__validate_configs_inline_layout(raw_sql): """Test _validate_configs method of FluffConfig when used on a file. This test covers both the validation of inline config directives but also the validation of layout configs. """ # Instantiate config object. cfg = FluffConfig(configs={"core": {"dialect": "ansi"}}) # Try to process an invalid inline config. Make sure we get an error. with pytest.raises(SQLFluffUserError): cfg.process_raw_file_for_config(raw_sql, "test.sql") def test__config__validate_configs_precedence_same_file(): """Test _validate_configs method of FluffConfig where there's a conflict.""" # Check with a known conflicted value old_key = ("rules", "LT03", "operator_new_lines") new_key = ("layout", "type", "binary_operator", "line_position") # Check it's still conflicted. assert any( k.old_path == old_key and k.new_path == new_key for k in REMOVED_CONFIGS ), ( "This test depends on this key still being removed. Update the test to " "one that is if this one isn't." ) # Test config test_config = [(new_key, "foo"), (old_key, "foo")] assert len(test_config) == 2 res = ConfigLoader._validate_configs(test_config, "") assert len(res) == 1 # Check that the old key isn't there. assert not any(k == old_key for k, _ in res) def test__config__toml_list_config(): """Test Parsing TOML list of values.""" c = ConfigLoader() loaded_config = c.load_config_file( os.path.join("test", "fixtures", "config", "toml"), "pyproject.toml", ) loaded_config["core"]["dialect"] = "ansi" cfg = FluffConfig(loaded_config) # Verify we can later retrieve the config values. assert cfg.get("dialect") == "ansi" assert cfg.get("rules") == ["LT03", "LT09"] def test__config__warn_unknown_rule(): """Test warnings when rules are unknown.""" lntr = Linter(config=FluffConfig(config_c)) with fluff_log_catcher(logging.WARNING, "sqlfluff.rules") as caplog: lntr.get_rulepack() # Check we get a warning on the unrecognised rule. assert ( "Rule configuration contain a section for unexpected rule 'NOT_A_RULE'." ) in caplog.text # Check we get a warning for the deprecated rule. assert ( "Rule configuration contain a section for unexpected rule 'L001'." ) in caplog.text # Check we get a hint for the matched rule. assert "match for rule LT01 with name 'layout.spacing'" in caplog.text # Check we get a warning for the group name. assert ( "Rule configuration contain a section for unexpected rule 'layout'." ) in caplog.text # Check we get a hint for the matched rule group. # NOTE: We don't check the set explicitly because we can't assume ordering. assert ("The reference was found as a match for multiple rules: {") in caplog.text assert ("LT01") in caplog.text assert ("LT02") in caplog.text def test__process_inline_config(): """Test the processing of inline in-file configuration directives.""" cfg = FluffConfig(config_b) assert cfg.get("rules") == "LT03" cfg.process_inline_config("-- sqlfluff:rules:LT02", "test.sql") assert cfg.get("rules") == "LT02" assert cfg.get("tab_space_size", section="indentation") == 4 cfg.process_inline_config("-- sqlfluff:indentation:tab_space_size:20", "test.sql") assert cfg.get("tab_space_size", section="indentation") == 20 assert cfg.get("dialect") == "ansi" assert cfg.get("dialect_obj").name == "ansi" cfg.process_inline_config("-- sqlfluff:dialect:postgres", "test.sql") assert cfg.get("dialect") == "postgres" assert cfg.get("dialect_obj").name == "postgres" assert cfg.get("rulez") is None cfg.process_inline_config("-- sqlfluff:rulez:LT06", "test.sql") assert cfg.get("rulez") == "LT06" # Check that Windows paths don't get mangled cfg.process_inline_config("-- sqlfluff:jinja:my_path:c:\\foo", "test.sql") assert cfg.get("my_path", section="jinja") == "c:\\foo" sqlfluff-2.3.5/test/core/errors_test.py000066400000000000000000000043541451700765000202120ustar00rootroot00000000000000"""Tests pickling and unpickling of errors.""" import copy import pickle import pytest from sqlfluff.core.errors import SQLBaseError, SQLLexError, SQLLintError, SQLParseError from sqlfluff.core.parser import PositionMarker, RawSegment from sqlfluff.core.rules import BaseRule from sqlfluff.core.templaters import TemplatedFile class Rule_T078(BaseRule): """A dummy rule.""" groups = ("all",) def _eval(self, context): pass def assert_pickle_robust(err: SQLBaseError): """Test that the class remains the same through copying and pickling.""" # First try copying (and make sure they still compare equal) err_copy = copy.copy(err) assert err_copy == err # Then try picking (and make sure they also still compare equal) pickled = pickle.dumps(err) pickle_copy = pickle.loads(pickled) assert pickle_copy == err @pytest.mark.parametrize( "ignore", [True, False], ) def test__lex_error_pickle(ignore): """Test lexing error pickling.""" template = TemplatedFile.from_string("foobar") err = SQLLexError("Foo", pos=PositionMarker(slice(0, 6), slice(0, 6), template)) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err) @pytest.mark.parametrize( "ignore", [True, False], ) def test__parse_error_pickle(ignore): """Test parse error pickling.""" template = TemplatedFile.from_string("foobar") segment = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) err = SQLParseError("Foo", segment=segment) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err) @pytest.mark.parametrize( "ignore", [True, False], ) def test__lint_error_pickle(ignore): """Test lint error pickling.""" template = TemplatedFile.from_string("foobar") segment = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) err = SQLLintError("Foo", segment=segment, rule=Rule_T078) # Set ignore to true if configured. # NOTE: This not copying was one of the reasons for this test. err.ignore = ignore assert_pickle_robust(err) sqlfluff-2.3.5/test/core/file_helpers_test.py000066400000000000000000000024501451700765000213320ustar00rootroot00000000000000"""Test the helpers.""" import pytest from sqlfluff.core import FluffConfig from sqlfluff.core.file_helpers import get_encoding @pytest.mark.parametrize( "fname,config_encoding,result", [ ( "test/fixtures/linter/encoding-utf-8.sql", "autodetect", "ascii", # ascii is a subset of utf-8, this is valid ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "autodetect", "UTF-8-SIG", ), ( "test/fixtures/linter/encoding-utf-8.sql", "utf-8", "utf-8", ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "utf-8", "utf-8", ), ( "test/fixtures/linter/encoding-utf-8.sql", "utf-8-sig", "utf-8-sig", ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "utf-8-sig", "utf-8-sig", ), ], ) def test__parser__helper_get_encoding(fname, config_encoding, result): """Test get_encoding.""" assert ( get_encoding( fname=fname, config=FluffConfig( overrides={"encoding": config_encoding, "dialect": "ansi"} ), ) == result ) sqlfluff-2.3.5/test/core/helpers/000077500000000000000000000000001451700765000167215ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/helpers/dict_test.py000066400000000000000000000015341451700765000212600ustar00rootroot00000000000000"""Tests for dict helpers.""" from sqlfluff.core.helpers.dict import dict_diff, nested_combine def test__helpers_dict__nested_combine(): """Test combination of two config dicts.""" a = {"a": {"b": {"c": 123, "d": 456}}} b = {"b": {"b": {"c": 123, "d": 456}}} c = {"a": {"b": {"c": 234, "e": 456}}} r = nested_combine(a, b, c) assert r == { "a": {"b": {"c": 234, "e": 456, "d": 456}}, "b": {"b": {"c": 123, "d": 456}}, } def test__helpers_dict__dict_diff(): """Test diffs between two config dicts.""" a = {"a": {"b": {"c": 123, "d": 456, "f": 6}}} b = {"b": {"b": {"c": 123, "d": 456}}} c = {"a": {"b": {"c": 234, "e": 456, "f": 6}}} assert dict_diff(a, b) == a assert dict_diff(a, c) == {"a": {"b": {"c": 123, "d": 456}}} assert dict_diff(c, a) == {"a": {"b": {"c": 234, "e": 456}}} sqlfluff-2.3.5/test/core/helpers/slice_test.py000066400000000000000000000032701451700765000214330ustar00rootroot00000000000000"""Test the slice helpers.""" import pytest from sqlfluff.core.helpers.slice import slice_overlaps @pytest.mark.parametrize( "s1,s2,result", [ # Identity case (slice(0, 1), slice(0, 1), True), # Adjoining zero length slices aren't overlaps (slice(1, 1), slice(0, 1), False), (slice(0, 0), slice(0, 1), False), (slice(0, 1), slice(1, 1), False), (slice(0, 1), slice(0, 0), False), # Contained slices are overlaps (slice(0, 3), slice(1, 2), True), (slice(1, 2), slice(0, 3), True), # ...even if they're zero length (slice(0, 3), slice(1, 1), True), (slice(1, 1), slice(0, 3), True), # Easy cases of non-overlaps (slice(1, 2), slice(3, 4), False), (slice(3, 4), slice(1, 2), False), (slice(1, 2), slice(2, 3), False), (slice(2, 3), slice(1, 2), False), # Partial overlaps are overlaps (slice(1, 3), slice(2, 4), True), (slice(2, 4), slice(1, 3), True), ], ) def test__parser__slice_overlaps_result(s1, s2, result): """Test _findall.""" assert slice_overlaps(s1, s2) == result @pytest.mark.parametrize( "s1,s2", [ # Check None situations (slice(None, 1), slice(0, 1)), (slice(0, None), slice(0, 1)), (slice(0, 1), slice(None, 1)), (slice(0, 1), slice(0, None)), (slice(None, None), slice(None, None)), # Check positivity (slice(1, 0), slice(0, 1)), (slice(0, 1), slice(1, 0)), ], ) def test__parser__slice_overlaps_error(s1, s2): """Test assertions of slice_overlaps.""" with pytest.raises(AssertionError): slice_overlaps(s1, s2) sqlfluff-2.3.5/test/core/helpers/string_test.py000066400000000000000000000016241451700765000216430ustar00rootroot00000000000000"""Test the string helpers.""" import pytest from sqlfluff.core.helpers.string import findall, split_comma_separated_string @pytest.mark.parametrize( "mainstr,substr,positions", [ ("", "", []), ("a", "a", [0]), ("foobar", "o", [1, 2]), ("bar bar bar bar", "bar", [0, 4, 8, 12]), ], ) def test__helpers_string__findall(mainstr, substr, positions): """Test _findall.""" assert list(findall(substr, mainstr)) == positions @pytest.mark.parametrize( "raw_str, expected", [ ("AL01,LT08,AL07", ["AL01", "LT08", "AL07"]), ("\nAL01,\nLT08,\nAL07,", ["AL01", "LT08", "AL07"]), (["AL01", "LT08", "AL07"], ["AL01", "LT08", "AL07"]), ], ) def test__helpers_string__split_comma_separated_string(raw_str, expected): """Tests that string and lists are output correctly.""" assert split_comma_separated_string(raw_str) == expected sqlfluff-2.3.5/test/core/linter/000077500000000000000000000000001451700765000165545ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/linter/__init__.py000066400000000000000000000000461451700765000206650ustar00rootroot00000000000000"""Tests for sqlfluff.core.linter.""" sqlfluff-2.3.5/test/core/linter/linted_file_test.py000066400000000000000000000345611451700765000224540ustar00rootroot00000000000000"""Tests covering the LintedFile class and it's methods.""" import logging import pytest from sqlfluff.core.linter import LintedFile from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.parser.segments import ( BaseSegment, RawSegment, TemplateSegment, ) from sqlfluff.core.parser.segments.raw import SourceFix from sqlfluff.core.rules.fix import FixPatch from sqlfluff.core.templaters import RawFileSlice, TemplatedFile from sqlfluff.core.templaters.base import TemplatedFileSlice @pytest.mark.parametrize( "source_slices,source_patches,raw_source_string,expected_result", # NOTE: For all of these examples we're not setting the patch_category # of the fix patches. They're not used at this step so irrelevant for # testing. [ # Trivial example ([slice(0, 1)], [], "a", "a"), # Simple replacement ( [slice(0, 1), slice(1, 2), slice(2, 3)], [FixPatch(slice(1, 2), "d", "", slice(1, 2), "b", "b")], "abc", "adc", ), # Simple insertion ( [slice(0, 1), slice(1, 1), slice(1, 2)], [FixPatch(slice(1, 1), "b", "", slice(1, 1), "", "")], "ac", "abc", ), # Simple deletion ( [slice(0, 1), slice(1, 2), slice(2, 3)], [FixPatch(slice(1, 2), "", "", slice(1, 2), "b", "b")], "abc", "ac", ), # Illustrative templated example (although practically at # this step, the routine shouldn't care if it's templated). ( [slice(0, 2), slice(2, 7), slice(7, 9)], [FixPatch(slice(2, 3), "{{ b }}", "", slice(2, 7), "b", "{{b}}")], "a {{b}} c", "a {{ b }} c", ), ], ) def test__linted_file__build_up_fixed_source_string( source_slices, source_patches, raw_source_string, expected_result, caplog ): """Test _build_up_fixed_source_string. This is part of fix_string(). """ with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): result = LintedFile._build_up_fixed_source_string( source_slices, source_patches, raw_source_string ) assert result == expected_result @pytest.mark.parametrize( "source_patches,source_only_slices,raw_source_string,expected_result", # NOTE: For all of these examples we're not setting the patch_category # of the fix patches. They're not used at this step so irrelevant for # testing. [ # Trivial example. # No edits in a single character file. Slice should be one # character long. ([], [], "a", [slice(0, 1)]), # Simple replacement. # We've yielded a patch to change a single character. This means # we should get only slices for that character, and for the # unchanged file around it. ( [FixPatch(slice(1, 2), "d", "", slice(1, 2), "b", "b")], [], "abc", [slice(0, 1), slice(1, 2), slice(2, 3)], ), # Templated no fixes. # A templated file, but with no fixes, so no subdivision of the # file is required and we should just get a single slice. ( [], [], "a {{ b }} c", [slice(0, 11)], ), # Templated example with a source-only slice. # A templated file, but with no fixes, so no subdivision of the # file is required and we should just get a single slice. While # there is handling for "source only" slices like template # comments, in this case no additional slicing is required # because no edits have been made. ( [], [RawFileSlice("{# b #}", "comment", 2)], "a {# b #} c", [slice(0, 11)], ), # Templated fix example with a source-only slice. # We're making an edit adjacent to a source only slice. Edits # _before_ source only slices currently don't trigger additional # slicing. This is fine. ( [FixPatch(slice(0, 1), "a ", "", slice(0, 1), "a", "a")], [RawFileSlice("{# b #}", "comment", 1)], "a{# b #}c", [slice(0, 1), slice(1, 9)], ), # Templated fix example with a source-only slice. # We've made an edit directly _after_ a source only slice # which should trigger the logic to ensure that the source # only slice isn't included in the source mapping of the # edit. # TODO: given that the logic is based on the _type_ # of the slice (e.g. comment), would we handle a # template tag which returns an empty string correctly? ( [FixPatch(slice(1, 2), " c", "", slice(8, 9), "c", "c")], [RawFileSlice("{# b #}", "comment", 1)], "a{# b #}cc", [slice(0, 1), slice(1, 8), slice(8, 9), slice(9, 10)], ), # Templated example with a source-only slice. # Here we're making the fix to the templated slice. This # checks that we don't duplicate or fumble the slice # generation when we're explicitly trying to edit the source. # TODO: Should we be using the fix type (e.g. "source") # to somehow determine whether the fix is "safe"? ( [FixPatch(slice(2, 2), "{# fixed #}", "", slice(2, 9), "", "")], [RawFileSlice("{# b #}", "comment", 2)], "a {# b #} c", [slice(0, 2), slice(2, 9), slice(9, 11)], ), # Illustrate potential templating bug (case from JJ01). # In this case we have fixes for all our tempolated sections # and they are all close to each other and so may be either # skipped or duplicated if the logic is not precise. ( [ FixPatch( templated_slice=slice(14, 14), fixed_raw="{%+ if true -%}", patch_category="source", source_slice=slice(14, 27), templated_str="", source_str="{%+if true-%}", ), FixPatch( templated_slice=slice(14, 14), fixed_raw="{{ ref('foo') }}", patch_category="source", source_slice=slice(28, 42), templated_str="", source_str="{{ref('foo')}}", ), FixPatch( templated_slice=slice(17, 17), fixed_raw="{%- endif %}", patch_category="source", source_slice=slice(43, 53), templated_str="", source_str="{%-endif%}", ), ], [ RawFileSlice( raw="{%+if true-%}", slice_type="block_start", source_idx=14, block_idx=0, ), RawFileSlice( raw="{%-endif%}", slice_type="block_end", source_idx=43, block_idx=1, ), ], "SELECT 1 from {%+if true-%} {{ref('foo')}} {%-endif%}", [ slice(0, 14), slice(14, 27), slice(27, 28), slice(28, 42), slice(42, 43), slice(43, 53), ], ), ], ) def test__linted_file__slice_source_file_using_patches( source_patches, source_only_slices, raw_source_string, expected_result, caplog ): """Test _slice_source_file_using_patches. This is part of fix_string(). """ with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): result = LintedFile._slice_source_file_using_patches( source_patches, source_only_slices, raw_source_string ) assert result == expected_result templated_file_1 = TemplatedFile.from_string("abc") templated_file_2 = TemplatedFile( "{# blah #}{{ foo }}bc", "", "abc", [ TemplatedFileSlice("comment", slice(0, 10), slice(0, 0)), TemplatedFileSlice("templated", slice(10, 19), slice(0, 1)), TemplatedFileSlice("literal", slice(19, 21), slice(1, 3)), ], [ RawFileSlice("{# blah #}", "comment", 0), RawFileSlice("{{ foo }}", "templated", 10), RawFileSlice("bc", "literal", 19), ], ) @pytest.mark.parametrize( "tree,templated_file,expected_result", [ # Trivial example ( RawSegment( "abc", PositionMarker(slice(0, 3), slice(0, 3), templated_file_1), "code", ), templated_file_1, [], ), # Simple literal edit example ( RawSegment( "abz", PositionMarker(slice(0, 3), slice(0, 3), templated_file_1), "code", ), templated_file_1, [FixPatch(slice(0, 3), "abz", "literal", slice(0, 3), "abc", "abc")], ), # Nested literal edit example ( BaseSegment( [ RawSegment( "a", PositionMarker(slice(0, 1), slice(0, 1), templated_file_1), "code", ), RawSegment( "b", PositionMarker(slice(1, 2), slice(1, 2), templated_file_1), "code", ), RawSegment( "z", PositionMarker(slice(2, 3), slice(2, 3), templated_file_1), "code", ), ] ), templated_file_1, [FixPatch(slice(0, 3), "abz", "literal", slice(0, 3), "abc", "abc")], ), # More complicated templating example ( BaseSegment( [ TemplateSegment( PositionMarker(slice(0, 10), slice(0, 0), templated_file_2), "{# blah #}", "comment", ), RawSegment( "a", PositionMarker(slice(10, 20), slice(0, 1), templated_file_2), "code", ), RawSegment( "b", PositionMarker(slice(19, 20), slice(1, 2), templated_file_2), "code", ), RawSegment( "z", PositionMarker(slice(20, 21), slice(2, 3), templated_file_2), "code", ), ] ), templated_file_2, [FixPatch(slice(2, 3), "z", "literal", slice(20, 21), "c", "c")], ), # Templating example with fixes ( BaseSegment( [ TemplateSegment( PositionMarker(slice(0, 10), slice(0, 0), templated_file_2), "{# blah #}", "comment", source_fixes=[ SourceFix("{# fixed #}", slice(0, 10), slice(0, 0)) ], ), RawSegment( "a", PositionMarker(slice(10, 19), slice(0, 1), templated_file_2), "code", source_fixes=[ SourceFix("{{ bar }}", slice(10, 19), slice(0, 1)) ], ), RawSegment( "b", PositionMarker(slice(19, 20), slice(1, 2), templated_file_2), "code", ), RawSegment( "z", PositionMarker(slice(20, 21), slice(2, 3), templated_file_2), "code", ), ] ), templated_file_2, [ FixPatch( slice(0, 0), "{# fixed #}", "source", slice(0, 10), "", "{# blah #}" ), FixPatch( slice(0, 1), "{{ bar }}", "source", slice(10, 19), "a", "{{ foo }}" ), FixPatch(slice(2, 3), "z", "literal", slice(20, 21), "c", "c"), ], ), ], ) def test__linted_file__generate_source_patches( tree, templated_file, expected_result, caplog ): """Test _generate_source_patches. This is part of fix_string(). """ with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): result = LintedFile._generate_source_patches(tree, templated_file) assert result == expected_result @pytest.mark.parametrize( "case", [ dict( name="utf8_create", fname="test.sql", encoding="utf-8", existing=None, update="def", expected="def", ), dict( name="utf8_update", fname="test.sql", encoding="utf-8", existing="abc", update="def", expected="def", ), dict( name="utf8_special_char", fname="test.sql", encoding="utf-8", existing="abc", update="→", # Special utf-8 character expected="→", ), dict( name="incorrect_encoding", fname="test.sql", encoding="Windows-1252", existing="abc", update="→", # Not valid in Windows-1252 expected="abc", # File should be unchanged ), ], ids=lambda case: case["name"], ) def test_safe_create_replace_file(case, tmp_path): """Test creating or updating .sql files, various content and encoding.""" p = tmp_path / case["fname"] if case["existing"]: p.write_text(case["existing"]) try: LintedFile._safe_create_replace_file( str(p), str(p), case["update"], case["encoding"] ) except: # noqa: E722 pass actual = p.read_text(encoding=case["encoding"]) assert case["expected"] == actual sqlfluff-2.3.5/test/core/linter/linter_test.py000066400000000000000000000423671451700765000214760ustar00rootroot00000000000000"""Tests for the Linter class and LintingResult class.""" import logging import os from unittest.mock import patch import pytest from sqlfluff.cli.formatters import OutputStreamFormatter from sqlfluff.cli.outputstream import make_output_stream from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import ( SQLBaseError, SQLFluffSkipFile, SQLFluffUserError, SQLLexError, SQLLintError, ) from sqlfluff.core.linter import LintingResult, runner from sqlfluff.core.linter.runner import get_runner from sqlfluff.utils.testing.logging import fluff_log_catcher class DummyLintError(SQLBaseError): """Fake lint error used by tests, similar to SQLLintError.""" def __init__(self, line_no: int, code: str = "LT01"): self._code = code super().__init__(line_no=line_no) def normalise_paths(paths): """Test normalising paths. NB Paths on difference platforms might look different, so this makes them comparable. """ return {pth.replace("/", ".").replace("\\", ".") for pth in paths} def test__linter__path_from_paths__dir(): """Test extracting paths from directories.""" lntr = Linter() paths = lntr.paths_from_path("test/fixtures/lexer") assert normalise_paths(paths) == { "test.fixtures.lexer.block_comment.sql", "test.fixtures.lexer.inline_comment.sql", "test.fixtures.lexer.basic.sql", } def test__linter__path_from_paths__default(): """Test .sql files are found by default.""" lntr = Linter() paths = normalise_paths(lntr.paths_from_path("test/fixtures/linter")) assert "test.fixtures.linter.passing.sql" in paths assert "test.fixtures.linter.passing_cap_extension.SQL" in paths assert "test.fixtures.linter.discovery_file.txt" not in paths def test__linter__path_from_paths__exts(): """Test configuration of file discovery.""" lntr = Linter( config=FluffConfig(overrides={"sql_file_exts": ".txt", "dialect": "ansi"}) ) paths = normalise_paths(lntr.paths_from_path("test/fixtures/linter")) assert "test.fixtures.linter.passing.sql" not in paths assert "test.fixtures.linter.passing_cap_extension.SQL" not in paths assert "test.fixtures.linter.discovery_file.txt" in paths def test__linter__path_from_paths__file(): """Test extracting paths from a file path.""" lntr = Linter() paths = lntr.paths_from_path("test/fixtures/linter/indentation_errors.sql") assert normalise_paths(paths) == {"test.fixtures.linter.indentation_errors.sql"} @pytest.mark.parametrize("filesize,raises_skip", [(0, False), (5, True), (2000, False)]) def test__linter__skip_large_bytes(filesize, raises_skip): """Test extracting paths from a file path.""" config = FluffConfig( overrides={"large_file_skip_byte_limit": filesize, "dialect": "ansi"} ) # First check the function directly if raises_skip: with pytest.raises(SQLFluffSkipFile) as excinfo: Linter.load_raw_file_and_config( "test/fixtures/linter/indentation_errors.sql", config ) assert "Skipping" in str(excinfo.value) assert f"over the limit of {filesize}" in str(excinfo.value) # If NOT raises, then we'll catch the raise an error and the test will fail. # Then check that it either is or isn't linted appropriately via lint_paths. lntr = Linter(config) result = lntr.lint_paths( ("test/fixtures/linter/indentation_errors.sql",), ) if raises_skip: assert not result.get_violations() else: assert result.get_violations() # Same again via parse_path, which is the other entry point. result = list( lntr.parse_path( "test/fixtures/linter/indentation_errors.sql", ) ) if raises_skip: assert not result else: assert result def test__linter__path_from_paths__not_exist(): """Test that the right errors are raise when a file doesn't exist.""" lntr = Linter() with pytest.raises(SQLFluffUserError): lntr.paths_from_path("asflekjfhsakuefhse") def test__linter__path_from_paths__not_exist_ignore(): """Test extracting paths from a file path.""" lntr = Linter() paths = lntr.paths_from_path("asflekjfhsakuefhse", ignore_non_existent_files=True) assert len(paths) == 0 def test__linter__path_from_paths__explicit_ignore(): """Test ignoring files that were passed explicitly.""" lntr = Linter() paths = lntr.paths_from_path( "test/fixtures/linter/sqlfluffignore/path_a/query_a.sql", ignore_non_existent_files=True, ignore_files=True, working_path="test/fixtures/linter/sqlfluffignore/", ) assert len(paths) == 0 def test__linter__path_from_paths__sqlfluffignore_current_directory(): """Test that .sqlfluffignore in the current directory is read when dir given.""" oldcwd = os.getcwd() try: os.chdir("test/fixtures/linter/sqlfluffignore") lntr = Linter() paths = lntr.paths_from_path( "path_a/", ignore_non_existent_files=True, ignore_files=True, working_path="test/fixtures/linter/sqlfluffignore/", ) assert len(paths) == 0 finally: os.chdir(oldcwd) def test__linter__path_from_paths__dot(): """Test extracting paths from a dot.""" lntr = Linter() paths = lntr.paths_from_path(".") # Use set theory to check that we get AT LEAST these files assert normalise_paths(paths) >= { "test.fixtures.lexer.block_comment.sql", "test.fixtures.lexer.inline_comment.sql", "test.fixtures.lexer.basic.sql", } @pytest.mark.parametrize( "path", [ "test/fixtures/linter/sqlfluffignore", "test/fixtures/linter/sqlfluffignore/", "test/fixtures/linter/sqlfluffignore/.", ], ) def test__linter__path_from_paths__ignore(path): """Test extracting paths from a dot.""" lntr = Linter() paths = lntr.paths_from_path(path) # We should only get query_b, because of the sqlfluffignore files. assert normalise_paths(paths) == { "test.fixtures.linter.sqlfluffignore.path_b.query_b.sql" } @pytest.mark.parametrize( "path", [ "test/fixtures/linter/indentation_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ], ) def test__linter__lint_string_vs_file(path): """Test the linter finds the same things on strings and files.""" with open(path) as f: sql_str = f.read() lntr = Linter(dialect="ansi") assert ( lntr.lint_string(sql_str).check_tuples() == lntr.lint_path(path).check_tuples() ) @pytest.mark.parametrize( "rules,num_violations", [(None, 6), ("CP01", 2), (("LT01", "LT12"), 1)] ) def test__linter__get_violations_filter_rules(rules, num_violations): """Test filtering violations by which rules were violated.""" lntr = Linter(dialect="ansi") lint_result = lntr.lint_string("select a, b FROM tbl c order BY d") assert len(lint_result.get_violations(rules=rules)) == num_violations def test__linter__linting_result__sum_dicts(): """Test the summing of dictionaries in the linter.""" lr = LintingResult() i = {} a = dict(a=3, b=123, f=876.321) b = dict(a=19, b=321.0, g=23478) r = dict(a=22, b=444.0, f=876.321, g=23478) assert lr.sum_dicts(a, b) == r # Check the identity too assert lr.sum_dicts(r, i) == r def test__linter__linting_result__combine_dicts(): """Test the combination of dictionaries in the linter.""" lr = LintingResult() a = dict(a=3, b=123, f=876.321) b = dict(h=19, i=321.0, j=23478) r = dict(z=22) assert lr.combine_dicts(a, b, r) == dict( a=3, b=123, f=876.321, h=19, i=321.0, j=23478, z=22 ) @pytest.mark.parametrize("by_path,result_type", [(False, list), (True, dict)]) def test__linter__linting_result_check_tuples_by_path(by_path, result_type): """Test that a LintingResult can partition violations by the source files.""" lntr = Linter() result = lntr.lint_paths( [ "test/fixtures/linter/comma_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ] ) check_tuples = result.check_tuples(by_path=by_path) isinstance(check_tuples, result_type) @pytest.mark.parametrize("processes", [1, 2]) def test__linter__linting_result_get_violations(processes): """Test that we can get violations from a LintingResult.""" lntr = Linter() result = lntr.lint_paths( ( "test/fixtures/linter/comma_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ), processes=processes, ) all([isinstance(v, SQLLintError) for v in result.get_violations()]) @pytest.mark.parametrize("force_error", [False, True]) def test__linter__linting_parallel_thread(force_error, monkeypatch): """Run linter in parallel mode using threads. Similar to test__linter__linting_result_get_violations but uses a thread pool of 1 worker to test parallel mode without subprocesses. This lets the tests capture code coverage information for the backend parts of parallel execution without having to jump through hoops. """ if not force_error: monkeypatch.setattr(Linter, "allow_process_parallelism", False) else: def _create_pool(*args, **kwargs): class ErrorPool: def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def imap_unordered(self, *args, **kwargs): yield runner.DelayedException(ValueError()) return ErrorPool() monkeypatch.setattr(runner.MultiProcessRunner, "_create_pool", _create_pool) config = FluffConfig(overrides={"dialect": "ansi"}) output_stream = make_output_stream(config, None, os.devnull) lntr = Linter( formatter=OutputStreamFormatter(output_stream, False, verbosity=0), dialect="ansi", ) result = lntr.lint_paths( # NOTE: Lint more than one file to make sure we enabled the multithreaded # code path. ( "test/fixtures/linter/comma_errors.sql", "test/fixtures/linter/whitespace_errors.sql", ), processes=2, ) all([isinstance(v, SQLLintError) for v in result.get_violations()]) @patch("sqlfluff.core.linter.Linter.lint_rendered") def test_lint_path_parallel_wrapper_exception(patched_lint): """Tests the error catching behavior of _lint_path_parallel_wrapper(). Test on MultiThread runner because otherwise we have pickling issues. """ patched_lint.side_effect = ValueError("Something unexpected happened") for result in runner.MultiThreadRunner( Linter(), FluffConfig(overrides={"dialect": "ansi"}), processes=1 ).run( ["test/fixtures/linter/passing.sql"], fix=False, ): assert isinstance(result, runner.DelayedException) with pytest.raises(ValueError): result.reraise() @pytest.mark.parametrize( "mock_cpu,in_processes,exp_processes", [ # Make the mocked cpu count a really high value which is # unlikely to collide with the real value. We can then # test all the different combos. (512, 1, 1), (512, 0, 512), (512, -12, 500), (512, 5, 5), # Check that we can't go lower than 1 in a 1 cpu case (1, -1, 1), ], ) @patch("multiprocessing.cpu_count") def test__linter__get_runner_processes( patched_cpu_count, mock_cpu, in_processes, exp_processes ): """Test that get_runner handles processes correctly.""" # Make the mocked cpu count a really high value which is # unlikely to collide with the real value. patched_cpu_count.return_value = mock_cpu _, return_processes = get_runner( linter=Linter(), config=FluffConfig(overrides={"dialect": "ansi"}), processes=in_processes, ) assert return_processes == exp_processes @patch("sqlfluff.core.linter.runner.linter_logger") @patch("sqlfluff.core.linter.Linter.lint_rendered") def test__linter__linting_unexpected_error_handled_gracefully( patched_lint, patched_logger ): """Test that an unexpected internal error returns the issue-surfacing file.""" patched_lint.side_effect = Exception("Something unexpected happened") lntr = Linter() lntr.lint_paths(("test/fixtures/linter/passing.sql",)) assert ( "Unable to lint test/fixtures/linter/passing.sql due to an internal error." # NB: Replace is to handle windows-style paths. in patched_logger.warning.call_args[0][0].replace("\\", "/") and "Exception: Something unexpected happened" in patched_logger.warning.call_args[0][0] ) def test__linter__empty_file(): """Test linter behaves nicely with an empty string.""" lntr = Linter(dialect="ansi") # Make sure no exceptions raised and no violations found in empty file. parsed = lntr.parse_string("") assert not parsed.violations @pytest.mark.parametrize( "ignore_templated_areas,check_tuples", [ (True, [("LT01", 3, 39), ("LT01", 3, 40)]), ( False, [ # there are still two of each because LT01 checks # for both *before* and *after* the operator. # The deduplication filter makes sure there aren't 4. ("LT01", 3, 16), ("LT01", 3, 16), ("LT01", 3, 39), ("LT01", 3, 40), ], ), ], ) def test__linter__mask_templated_violations(ignore_templated_areas, check_tuples): """Test linter masks files properly around templated content. NOTE: this also tests deduplication of fixes which have the same source position. i.e. `LintedFile.deduplicate_in_source_space()`. """ lntr = Linter( config=FluffConfig( overrides={ "rules": "L006", "ignore_templated_areas": ignore_templated_areas, "dialect": "ansi", } ) ) linted = lntr.lint_path(path="test/fixtures/templater/jinja_h_macros/jinja.sql") assert linted.check_tuples() == check_tuples @pytest.mark.parametrize( "fname,config_encoding,lexerror", [ ( "test/fixtures/linter/encoding-utf-8.sql", "autodetect", False, ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "autodetect", False, ), ( "test/fixtures/linter/encoding-utf-8.sql", "utf-8", False, ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "utf-8", True, ), ( "test/fixtures/linter/encoding-utf-8.sql", "utf-8-sig", False, ), ( "test/fixtures/linter/encoding-utf-8-sig.sql", "utf-8-sig", False, ), ], ) def test__linter__encoding(fname, config_encoding, lexerror): """Test linter deals with files with different encoding.""" lntr = Linter( config=FluffConfig( overrides={ "rules": "LT01", "encoding": config_encoding, "dialect": "ansi", } ) ) result = lntr.lint_paths([fname]) assert lexerror == (SQLLexError in [type(v) for v in result.get_violations()]) def test_delayed_exception(): """Test that DelayedException stores and reraises a stored exception.""" ve = ValueError() de = runner.DelayedException(ve) with pytest.raises(ValueError): de.reraise() def test__attempt_to_change_templater_warning(): """Test warning when changing templater in .sqlfluff file in subdirectory.""" initial_config = FluffConfig( configs={"core": {"templater": "jinja", "dialect": "ansi"}} ) lntr = Linter(config=initial_config) updated_config = FluffConfig( configs={"core": {"templater": "python", "dialect": "ansi"}} ) with fluff_log_catcher(logging.WARNING, "sqlfluff.linter") as caplog: lntr.render_string( in_str="select * from table", fname="test.sql", config=updated_config, encoding="utf-8", ) assert "Attempt to set templater to " in caplog.text def test_advanced_api_methods(): """Test advanced API methods on segments.""" # These aren't used by the simple API, which returns # a simple JSON representation of the parse tree, but # are available for advanced API usage and within rules. sql = """ WITH cte AS ( SELECT * FROM tab_a ) SELECT cte.col_a, tab_b.col_b FROM cte INNER JOIN tab_b; """ linter = Linter(dialect="ansi") parsed = linter.parse_string(sql) # CTEDefinitionSegment.get_identifier cte_segment = next(parsed.tree.recursive_crawl("common_table_expression")) assert cte_segment.get_identifier().raw == "cte" # BaseFileSegment.get_table_references & StatementSegment.get_table_references assert parsed.tree.get_table_references() == {"tab_a", "tab_b"} def test_normalise_newlines(): """Test normalising newlines to unix-style line endings.""" in_str = "SELECT\r\n foo\n FROM \r \n\r bar;" out_str = "SELECT\n foo\n FROM \n \n\n bar;" assert out_str == Linter._normalise_newlines(in_str) sqlfluff-2.3.5/test/core/parser/000077500000000000000000000000001451700765000165535ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/parser/__init__.py000066400000000000000000000000461451700765000206640ustar00rootroot00000000000000"""Tests for sqlfluff.core.parser.""" sqlfluff-2.3.5/test/core/parser/conftest.py000066400000000000000000000013611451700765000207530ustar00rootroot00000000000000"""Test fixtures for parser tests.""" import pytest from sqlfluff.core.dialects import dialect_selector from sqlfluff.core.parser.segments import TemplateSegment @pytest.fixture(scope="function") def fresh_ansi_dialect(): """Expand the ansi dialect for use.""" return dialect_selector("ansi") @pytest.fixture(scope="function") def test_segments(generate_test_segments): """A preset list of segments for testing. Includes a templated segment for completeness. """ main_list = generate_test_segments(["bar", " \t ", "foo", "baar", " \t "]) ts = TemplateSegment( pos_marker=main_list[-1].get_end_point_marker(), source_str="{# comment #}", block_type="comment", ) return main_list + (ts,) sqlfluff-2.3.5/test/core/parser/grammar/000077500000000000000000000000001451700765000202015ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/parser/grammar/__init__.py000066400000000000000000000000711451700765000223100ustar00rootroot00000000000000"""Tests for the sqlfluff.core.parser.grammar module.""" sqlfluff-2.3.5/test/core/parser/grammar/conftest.py000066400000000000000000000041761451700765000224100ustar00rootroot00000000000000"""Common test fixtures for grammar testing.""" from typing import Any, Dict, List, Tuple, Type import pytest from sqlfluff.core.parser import KeywordSegment, StringParser from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar.base import BaseGrammar from sqlfluff.core.parser.types import ParseMode @pytest.fixture(scope="function") def structural_parse_mode_test(generate_test_segments, fresh_ansi_dialect): """Test the structural function of a grammar in various parse modes. This helper fixture is designed to modularise grammar tests. """ def _structural_parse_mode_test( test_segment_seeds: List[str], grammar_class: Type[BaseGrammar], grammar_argument_seeds: List[str], grammar_terminator_seeds: List[str], grammar_kwargs: Dict[str, Any], parse_mode: ParseMode, input_slice: slice, output_tuple: Tuple[Any, ...], ): segments = generate_test_segments(test_segment_seeds) # Dialect is required here only to have access to bracket segments. ctx = ParseContext(dialect=fresh_ansi_dialect) # NOTE: We pass terminators using kwargs rather than directly because some # classes don't support it (e.g. Bracketed). if grammar_terminator_seeds: grammar_kwargs["terminators"] = [ StringParser(e, KeywordSegment) for e in grammar_terminator_seeds ] _seq = grammar_class( *(StringParser(e, KeywordSegment) for e in grammar_argument_seeds), parse_mode=parse_mode, **grammar_kwargs, ) _start = input_slice.start or 0 _stop = input_slice.stop or len(segments) _match = _seq.match(segments[:_stop], _start, ctx) # If we're expecting an output tuple, assert the match is truthy. if output_tuple: assert _match _result = tuple( e.to_tuple(show_raw=True, code_only=False, include_meta=True) for e in _match.apply(segments) ) assert _result == output_tuple # Return the function return _structural_parse_mode_test sqlfluff-2.3.5/test/core/parser/grammar/grammar_anyof_test.py000066400000000000000000000201221451700765000244310ustar00rootroot00000000000000"""Tests for the OneOf, AnyOf & AnySetOf grammars. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import pytest from sqlfluff.core.parser import ( KeywordSegment, ParseMode, RawSegment, RegexParser, StringParser, ) from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import OneOf, Sequence from sqlfluff.core.parser.grammar.anyof import AnyNumberOf, AnySetOf from sqlfluff.core.parser.match_result import MatchResult class Example1Segment(RawSegment): """A minimal example segment for testing.""" type = "example1" class Example2Segment(RawSegment): """Another minimal example segment for testing.""" type = "example2" def test__parser__grammar__oneof__copy(): """Test grammar copying.""" bs = StringParser("bar", KeywordSegment) fs = StringParser("foo", KeywordSegment) g1 = OneOf(fs, bs) # Check copy g2 = g1.copy() assert g1 == g2 assert g1 is not g2 # Check copy insert (start) g3 = g1.copy(insert=[bs], at=0) assert g3 == OneOf(bs, fs, bs) # Check copy insert (mid) g4 = g1.copy(insert=[bs], at=1) assert g4 == OneOf(fs, bs, bs) # Check copy insert (end) g5 = g1.copy(insert=[bs], at=-1) assert g5 == OneOf(fs, bs, bs) @pytest.mark.parametrize("allow_gaps", [True, False]) def test__parser__grammar_oneof(test_segments, allow_gaps): """Test the OneOf grammar. NOTE: Should behave the same regardless of allow_gaps. """ bs = StringParser("bar", KeywordSegment) fs = StringParser("foo", KeywordSegment) g = OneOf(fs, bs, allow_gaps=allow_gaps) ctx = ParseContext(dialect=None) # Check directly assert g.match(test_segments, 0, parse_context=ctx) == MatchResult( matched_slice=slice(0, 1), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ) # Check with a bit of whitespace assert not g.match(test_segments, 1, parse_context=ctx) def test__parser__grammar_oneof_templated(test_segments): """Test the OneOf grammar. NB: Should behave the same regardless of code_only. """ bs = StringParser("bar", KeywordSegment) fs = StringParser("foo", KeywordSegment) g = OneOf(fs, bs) ctx = ParseContext(dialect=None) # This shouldn't match, but it *ALSO* shouldn't raise an exception. # https://github.com/sqlfluff/sqlfluff/issues/780 assert not g.match(test_segments, 5, parse_context=ctx) def test__parser__grammar_oneof_exclude(test_segments): """Test the OneOf grammar exclude option.""" bs = StringParser("bar", KeywordSegment) fs = StringParser("foo", KeywordSegment) g = OneOf(bs, exclude=Sequence(bs, fs)) ctx = ParseContext(dialect=None) # Just against the first alone assert g.match(test_segments[:1], 0, parse_context=ctx) # Now with the bit to exclude included assert not g.match(test_segments, 0, parse_context=ctx) def test__parser__grammar_oneof_take_longest_match(test_segments): """Test that the OneOf grammar takes the longest match.""" fooRegex = RegexParser(r"fo{2}", KeywordSegment) baar = StringParser("baar", KeywordSegment) foo = StringParser("foo", KeywordSegment) fooBaar = Sequence( foo, baar, ) ctx = ParseContext(dialect=None) assert fooRegex.match(test_segments, 2, parse_context=ctx).matched_slice == slice( 2, 3 ) # Even if fooRegex comes first, fooBaar # is a longer match and should be taken assert OneOf(fooRegex, fooBaar).match( test_segments, 2, parse_context=ctx ).matched_slice == slice(2, 4) def test__parser__grammar_oneof_take_first(test_segments): """Test that the OneOf grammar takes first match in case they are of same length.""" foo1 = StringParser("foo", Example1Segment) foo2 = StringParser("foo", Example2Segment) ctx = ParseContext(dialect=None) # Both segments would match "foo" # so we test that order matters g1 = OneOf(foo1, foo2) result1 = g1.match(test_segments, 2, ctx) # 2 is the index of "foo" # in g1, the Example1Segment is first. assert result1.matched_class is Example1Segment g2 = OneOf(foo2, foo1) result2 = g2.match(test_segments, 2, ctx) # 2 is the index of "foo" # in g2, the Example2Segment is first. assert result2.matched_class is Example2Segment @pytest.mark.parametrize( "mode,options,terminators,input_slice,kwargs,output_tuple", [ # ##### # Strict matches # ##### # 1. Match once (ParseMode.STRICT, ["a"], [], slice(None, None), {}, (("keyword", "a"),)), # 2. Match none (ParseMode.STRICT, ["b"], [], slice(None, None), {}, ()), # 3. Match twice ( ParseMode.STRICT, ["b", "a"], [], slice(None, None), {}, ( ("keyword", "a"), ("whitespace", " "), ("keyword", "b"), ), ), # 4. Limited match ( ParseMode.STRICT, ["b", "a"], [], slice(None, None), {"max_times": 1}, (("keyword", "a"),), ), # ##### # Greedy matches # ##### # 1. Terminated match ( ParseMode.GREEDY, ["b", "a"], ["b"], slice(None, None), {}, (("keyword", "a"),), ), # 2. Terminated, but not matching the first element. ( ParseMode.GREEDY, ["b"], ["b"], slice(None, None), {}, (("unparsable", (("raw", "a"),)),), ), # 3. Terminated, but only a partial match. ( ParseMode.GREEDY, ["a"], ["c"], slice(None, None), {}, ( ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),)), ), ), # Test exhaustion before hitting min_times. # This is tricky to otherwise get coverage for because it's a # fairly unusual occurrence, but nonetheless a path in the logic # which needs coverage. It would normally only occur if a relatively # high value is set for min_times. ( ParseMode.STRICT, ["d"], [], slice(5, None), {"min_times": 3}, (), ), ], ) def test__parser__grammar_anyof_modes( mode, options, terminators, input_slice, kwargs, output_tuple, structural_parse_mode_test, ): """Test the AnyNumberOf grammar with various parse modes. In particular here we're testing the treatment of unparsable sections. """ structural_parse_mode_test( ["a", " ", "b", " ", "c", "d", " ", "d"], AnyNumberOf, options, terminators, kwargs, mode, input_slice, output_tuple, ) def test__parser__grammar_anysetof(generate_test_segments): """Test the AnySetOf grammar.""" token_list = ["bar", " \t ", "foo", " \t ", "bar"] segments = generate_test_segments(token_list) bar = StringParser("bar", KeywordSegment) foo = StringParser("foo", KeywordSegment) g = AnySetOf(foo, bar) ctx = ParseContext(dialect=None) # Check it doesn't match if the start is whitespace. assert not g.match(segments, 1, ctx) # Check structure if we start with a match. result = g.match(segments, 0, ctx) assert result == MatchResult( matched_slice=slice(0, 3), child_matches=( MatchResult( slice(0, 1), KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), MatchResult( slice(2, 3), KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), # NOTE: The second "bar" isn't included because this # is any *set* of and we've already have "bar" once. ), ) sqlfluff-2.3.5/test/core/parser/grammar/grammar_other_test.py000066400000000000000000000200771451700765000244470ustar00rootroot00000000000000"""Tests for any other grammars. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import logging import pytest from sqlfluff.core.parser import KeywordSegment, StringParser, SymbolSegment from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import Anything, Delimited, Nothing from sqlfluff.core.parser.grammar.noncode import NonCodeMatcher from sqlfluff.core.parser.types import ParseMode @pytest.mark.parametrize( "token_list,min_delimiters,allow_gaps,allow_trailing,match_len", [ # Basic testing (note diff to v1, no trailing whitespace.) (["bar", " \t ", ".", " ", "bar"], 0, True, False, 5), (["bar", " \t ", ".", " ", "bar", " "], 0, True, False, 5), # Testing allow_trailing (["bar", " \t ", ".", " "], 0, True, False, 1), # NOTE: Diff to v1 (["bar", " \t ", ".", " "], 0, True, True, 3), # NOTE: Diff to v1 # Testing the implications of allow_gaps (["bar", " \t ", ".", " ", "bar"], 0, True, False, 5), (["bar", " \t ", ".", " ", "bar"], 0, False, False, 1), (["bar", " \t ", ".", " ", "bar"], 1, True, False, 5), (["bar", " \t ", ".", " ", "bar"], 1, False, False, 0), (["bar", ".", "bar"], 0, True, False, 3), (["bar", ".", "bar"], 0, False, False, 3), (["bar", ".", "bar"], 1, True, False, 3), (["bar", ".", "bar"], 1, False, False, 3), # Check we still succeed with something trailing right on the end. (["bar", ".", "bar", "foo"], 1, False, False, 3), # Check min_delimiters. There's a delimiter here, but not enough to match. (["bar", ".", "bar", "foo"], 2, True, False, 0), ], ) def test__parser__grammar_delimited( min_delimiters, allow_gaps, allow_trailing, token_list, match_len, caplog, generate_test_segments, fresh_ansi_dialect, ): """Test the Delimited grammar when not code_only.""" test_segments = generate_test_segments(token_list) g = Delimited( StringParser("bar", KeywordSegment), delimiter=StringParser(".", SymbolSegment), allow_gaps=allow_gaps, allow_trailing=allow_trailing, min_delimiters=min_delimiters, ) ctx = ParseContext(dialect=fresh_ansi_dialect) with caplog.at_level(logging.DEBUG, logger="sqlfluff.parser"): # Matching with whitespace shouldn't match if we need at least one delimiter m = g.match(test_segments, 0, ctx) assert len(m) == match_len @pytest.mark.parametrize( "input_tokens, terminators, output_tuple", [ # No terminators (or non matching terminators), full match. ( ["a", " ", "b"], [], ( ("raw", "a"), ("whitespace", " "), ("raw", "b"), ), ), ( ["a", " ", "b"], ["c"], ( ("raw", "a"), ("whitespace", " "), ("raw", "b"), ), ), # Terminate after some matched content. ( ["a", " ", "b"], ["b"], (("raw", "a"),), ), # Terminate immediately. ( ["a", " ", "b"], ["a"], (), ), # NOTE: the the "c" terminator won't match because "c" is # a keyword and therefore is required to have whitespace # before it. # See `greedy_match()` for details. ( ["a", " ", "b", "c", " ", "d"], ["c"], ( ("raw", "a"), ("whitespace", " "), ("raw", "b"), ("raw", "c"), ("whitespace", " "), ("raw", "d"), ), ), # These next two tests check the handling of brackets in the # Anything match. Unlike other greedy matches, this grammar # assumes we're not going to re-parse these brackets and so # _does_ infer their structure and creates bracketed elements # for them. ( ["(", "foo", " ", ")", " ", "foo"], ["foo"], ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("raw", "foo"), ("whitespace", " "), ("dedent", ""), ("end_bracket", ")"), ), ), # No trailing whitespace. ), ), ( ["(", " ", "foo", "(", "foo", ")", ")", " ", "foo"], ["foo"], ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("whitespace", " "), ("raw", "foo"), ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("raw", "foo"), ("dedent", ""), ("end_bracket", ")"), ), ), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), ], ) def test__parser__grammar_anything_structure( input_tokens, terminators, output_tuple, structural_parse_mode_test ): """Structure tests for the Anything grammar. NOTE: For most greedy semantics we don't instantiate inner brackets, but in the Anything grammar, the assumption is that we're not coming back to these segments later so we take the time to instantiate any bracketed sections. This is to maintain some backward compatibility with previous parsing behaviour. """ structural_parse_mode_test( input_tokens, Anything, [], terminators, {}, ParseMode.STRICT, slice(None, None), output_tuple, ) @pytest.mark.parametrize( "terminators,match_length", [ # No terminators, full match. ([], 6), # If terminate with foo - match length 1. (["foo"], 1), # If terminate with foof - unterminated. Match everything (["foof"], 6), # Greedy matching until the first item should return none (["bar"], 0), # NOTE: the greedy until "baar" won't match because baar is # a keyword and therefore is required to have whitespace # before it. In the test sequence "baar" does not. # See `greedy_match()` for details. (["baar"], 6), ], ) def test__parser__grammar_anything_match( terminators, match_length, test_segments, fresh_ansi_dialect ): """Test the Anything grammar. NOTE: Anything combined with terminators implements the semantics which used to be implemented by `GreedyUntil`. """ ctx = ParseContext(dialect=fresh_ansi_dialect) terms = [StringParser(kw, KeywordSegment) for kw in terminators] result = Anything(terminators=terms).match(test_segments, 0, parse_context=ctx) assert result.matched_slice == slice(0, match_length) assert result.matched_class is None # We shouldn't have set a class def test__parser__grammar_nothing_match(test_segments, fresh_ansi_dialect): """Test the Nothing grammar.""" ctx = ParseContext(dialect=fresh_ansi_dialect) assert not Nothing().match(test_segments, 0, ctx) def test__parser__grammar_noncode_match(test_segments, fresh_ansi_dialect): """Test the NonCodeMatcher.""" ctx = ParseContext(dialect=fresh_ansi_dialect) # NonCode Matcher doesn't work with simple assert NonCodeMatcher().simple(ctx) is None # We should match one and only one segment match = NonCodeMatcher().match(test_segments, 1, parse_context=ctx) assert match assert match.matched_slice == slice(1, 2) sqlfluff-2.3.5/test/core/parser/grammar/grammar_ref_test.py000066400000000000000000000052501451700765000240760ustar00rootroot00000000000000"""Test the Ref grammar. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import pytest from sqlfluff.core.dialects import Dialect from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import Ref from sqlfluff.core.parser.lexer import RegexLexer from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.parsers import StringParser from sqlfluff.core.parser.segments import CodeSegment, WhitespaceSegment @pytest.fixture(scope="function") def test_dialect(): """A stripped back test dialect for testing.""" test_dialect = Dialect("test", root_segment_name="FileSegment") test_dialect.set_lexer_matchers( [ RegexLexer("whitespace", r"[^\S\r\n]+", WhitespaceSegment), RegexLexer( "code", r"[0-9a-zA-Z_]+", CodeSegment, segment_kwargs={"type": "code"} ), ] ) test_dialect.add(FooSegment=StringParser("foo", CodeSegment, type="foo")) # Return the expanded copy. return test_dialect.expand() def test__parser__grammar__ref_eq(): """Test equality of Ref Grammars.""" r1 = Ref("foo") r2 = Ref("foo") assert r1 is not r2 assert r1 == r2 check_list = [1, 2, r2, 3] # Check we can find it in lists assert r1 in check_list # Check we can get it's position assert check_list.index(r1) == 2 # Check we can remove it from a list check_list.remove(r1) assert r1 not in check_list def test__parser__grammar__ref_repr(): """Test the __repr__ method of Ref.""" assert repr(Ref("foo")) == "" assert repr(Ref("bar", optional=True)) == "" def test__parser__grammar_ref_match(generate_test_segments, test_dialect): """Test the Ref grammar match method.""" foo_ref = Ref("FooSegment") test_segments = generate_test_segments(["bar", "foo", "bar"]) ctx = ParseContext(dialect=test_dialect) match = foo_ref.match(test_segments, 1, ctx) assert match == MatchResult( matched_slice=slice(1, 2), matched_class=CodeSegment, segment_kwargs={"instance_types": ("foo",)}, ) def test__parser__grammar_ref_exclude(generate_test_segments, fresh_ansi_dialect): """Test the Ref grammar exclude option with the match method.""" identifier = Ref("NakedIdentifierSegment", exclude=Ref.keyword("ABS")) test_segments = generate_test_segments(["ABS", "ABSOLUTE"]) ctx = ParseContext(dialect=fresh_ansi_dialect) # Assert ABS does not match, due to the exclude assert not identifier.match(test_segments, 0, ctx) # Assert ABSOLUTE does match assert identifier.match(test_segments, 1, ctx) sqlfluff-2.3.5/test/core/parser/grammar/grammar_sequence_test.py000066400000000000000000000422121451700765000251310ustar00rootroot00000000000000"""Tests for the Sequence grammar. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import logging import pytest from sqlfluff.core.errors import SQLParseError from sqlfluff.core.parser import Dedent, Indent, KeywordSegment, StringParser from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.grammar import Bracketed, Conditional, Sequence from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.types import ParseMode def test__parser__grammar_sequence_repr(): """Test the Sequence grammar __repr__ method.""" bar = StringParser("bar", KeywordSegment) assert repr(bar) == "" foo = StringParser("foo", KeywordSegment) sequence = Sequence(bar, foo) assert ( repr(sequence) == ", ]>" ) def test__parser__grammar_sequence_nested_match(test_segments, caplog): """Test the Sequence grammar when nested.""" bar = StringParser("bar", KeywordSegment) foo = StringParser("foo", KeywordSegment) baar = StringParser("baar", KeywordSegment) g = Sequence(Sequence(bar, foo), baar) ctx = ParseContext(dialect=None) # Confirm the structure of the test segments: assert [s.raw for s in test_segments] == ["bar", " \t ", "foo", "baar", " \t ", ""] with caplog.at_level(logging.DEBUG, logger="sqlfluff.parser"): # Matching just the start of the list shouldn't work. result1 = g.match(test_segments[:3], 0, ctx) assert not result1 # Check it returns falsy with caplog.at_level(logging.DEBUG, logger="sqlfluff.parser"): # Matching the whole list should. result2 = g.match(test_segments, 0, ctx) assert result2 # Check it returns truthy assert result2 == MatchResult( matched_slice=slice(0, 4), # NOTE: One of these is space. child_matches=( MatchResult( matched_slice=slice(0, 1), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), MatchResult( matched_slice=slice(2, 3), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), MatchResult( matched_slice=slice(3, 4), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), ), ) @pytest.mark.parametrize( "mode,sequence,terminators,input_slice,output_tuple", [ # ##### # Test matches where we should get something, and that's # the whole sequence. # NOTE: Include a little whitespace in the slice (i.e. the first _two_ # segments) to check that it isn't included in the match. (ParseMode.STRICT, ["a"], [], slice(None, 2), (("keyword", "a"),)), (ParseMode.GREEDY, ["a"], [], slice(None, 2), (("keyword", "a"),)), (ParseMode.GREEDY_ONCE_STARTED, ["a"], [], slice(None, 2), (("keyword", "a"),)), # ##### # Test matching on sequences where we run out of segments before matching # the whole sequence. # STRICT returns no match. (ParseMode.STRICT, ["a", "b"], [], slice(None, 2), ()), # GREEDY & GREEDY_ONCE_STARTED returns the content as unparsable, and # still don't include the trailing whitespace. The return value does # however have the matched "a" as a keyword and not a raw. ( ParseMode.GREEDY, ["a", "b"], [], slice(None, 2), (("unparsable", (("keyword", "a"),)),), ), ( ParseMode.GREEDY_ONCE_STARTED, ["a", "b"], [], slice(None, 2), (("unparsable", (("keyword", "a"),)),), ), # ##### # Test matching on sequences where we fail to match the first element. # STRICT & GREEDY_ONCE_STARTED return no match. (ParseMode.STRICT, ["b"], [], slice(None, 2), ()), (ParseMode.GREEDY_ONCE_STARTED, ["b"], [], slice(None, 2), ()), # GREEDY claims the remaining elements (unmutated) as unparsable, but # does not claim any trailing whitespace. ( ParseMode.GREEDY, ["b"], [], slice(None, 2), (("unparsable", (("raw", "a"),)),), ), # ##### # Test matches where we should match the sequence fully, but there's more # to match. # First without terminators... # STRICT ignores the rest. (ParseMode.STRICT, ["a"], [], slice(None, 5), (("keyword", "a"),)), # The GREEDY modes claim the rest as unparsable. # NOTE: the whitespace in between is _not_ unparsable. ( ParseMode.GREEDY, ["a"], [], slice(None, 5), ( ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"), ("whitespace", " "), ("raw", "c"))), ), ), ( ParseMode.GREEDY_ONCE_STARTED, ["a"], [], slice(None, 5), ( ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"), ("whitespace", " "), ("raw", "c"))), ), ), # Second *with* terminators. # NOTE: The whitespace before the terminator is not included. (ParseMode.STRICT, ["a"], ["c"], slice(None, 5), (("keyword", "a"),)), ( ParseMode.GREEDY, ["a"], ["c"], slice(None, 5), (("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),))), ), ( ParseMode.GREEDY_ONCE_STARTED, ["a"], ["c"], slice(None, 5), (("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),))), ), # ##### # Test matches where we match the first element of a sequence but not the # second (with terminators) (ParseMode.STRICT, ["a", "x"], ["c"], slice(None, 5), ()), # NOTE: For GREEDY modes, the matched portion is not included as an "unparsable" # only the portion which failed to match. The terminator is not included and # the matched portion is still mutated correctly. ( ParseMode.GREEDY, ["a", "x"], ["c"], slice(None, 5), (("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),))), ), ( ParseMode.GREEDY_ONCE_STARTED, ["a", "x"], ["c"], slice(None, 5), (("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),))), ), # ##### # Test competition between sequence elements and terminators. # In GREEDY_ONCE_STARTED, the first element is matched before any terminators. ( ParseMode.GREEDY_ONCE_STARTED, ["a"], ["a"], slice(None, 2), (("keyword", "a"),), ), # In GREEDY, the terminator is matched first and so takes precedence. ( ParseMode.GREEDY, ["a"], ["a"], slice(None, 2), (), ), # NOTE: In these last two cases, the "b" isn't included because it acted as # a terminator before being considered in the sequence. ( ParseMode.GREEDY_ONCE_STARTED, ["a", "b"], ["b"], slice(None, 3), (("unparsable", (("keyword", "a"),)),), ), ( ParseMode.GREEDY, ["a", "b"], ["b"], slice(None, 3), (("unparsable", (("keyword", "a"),)),), ), ], ) def test__parser__grammar_sequence_modes( mode, sequence, terminators, input_slice, output_tuple, structural_parse_mode_test, ): """Test the Sequence grammar with various parse modes. In particular here we're testing the treatment of unparsable sections. """ structural_parse_mode_test( ["a", " ", "b", " ", "c", "d", " ", "d"], Sequence, sequence, terminators, {}, mode, input_slice, output_tuple, ) @pytest.mark.parametrize( "input_seed,mode,sequence,kwargs,output_tuple", [ # A sequence that isn't bracketed shouldn't match. # Regardless of mode. (["a"], ParseMode.STRICT, ["a"], {}, ()), (["a"], ParseMode.GREEDY, ["a"], {}, ()), # Test potential empty brackets (no whitespace) ( ["(", ")"], ParseMode.STRICT, [], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), ( ["(", ")"], ParseMode.GREEDY, [], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Test potential empty brackets (with whitespace) ( ["(", " ", ")"], ParseMode.STRICT, [], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("whitespace", " "), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), ( ["(", " ", ")"], ParseMode.GREEDY, [], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("whitespace", " "), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), ( ["(", " ", ")"], ParseMode.STRICT, [], # Strict matching, without allowing gaps, shouldn't match. {"allow_gaps": False}, (), ), ( ["(", " ", ")"], ParseMode.GREEDY, [], # Greedy matching, without allowing gaps, should return unparsable. # NOTE: This functionality doesn't get used much. {"allow_gaps": False}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("unparsable", (("whitespace", " "),)), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Happy path content match. ( ["(", "a", ")"], ParseMode.STRICT, ["a"], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("keyword", "a"), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Content match fails ( ["(", "a", ")"], ParseMode.STRICT, ["b"], {}, (), ), ( ["(", "a", ")"], ParseMode.GREEDY, ["b"], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("unparsable", (("raw", "a"),)), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Partial matches (not whole grammar matched) ( ["(", "a", ")"], ParseMode.STRICT, ["a", "b"], {}, (), ), ( ["(", "a", ")"], ParseMode.GREEDY, ["a", "b"], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("unparsable", (("keyword", "a"),)), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Partial matches (not whole sequence matched) ( ["(", "a", " ", "b", ")"], ParseMode.STRICT, ["a"], {}, (), ), ( ["(", "a", " ", "b", ")"], ParseMode.GREEDY, ["a"], {}, ( ( "bracketed", ( ("start_bracket", "("), ("indent", ""), ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),)), ("dedent", ""), ("end_bracket", ")"), ), ), ), ), # Test an unwrapped path (with square brackets) ( ["[", "a", " ", "b", "]"], ParseMode.GREEDY, ["a"], {"bracket_type": "square"}, ( ("start_square_bracket", "["), ("indent", ""), ("keyword", "a"), ("whitespace", " "), ("unparsable", (("raw", "b"),)), ("dedent", ""), ("end_square_bracket", "]"), ), ), ], ) def test__parser__grammar_bracketed_modes( input_seed, mode, sequence, kwargs, output_tuple, structural_parse_mode_test, ): """Test the Bracketed grammar with various parse modes.""" structural_parse_mode_test( input_seed, Bracketed, sequence, [], kwargs, mode, slice(None, None), output_tuple, ) @pytest.mark.parametrize( "input_seed,mode,sequence", [ # Unclosed brackets always raise errors. (["(", "a"], ParseMode.STRICT, ["a"]), (["(", "a"], ParseMode.GREEDY, ["a"]), ], ) def test__parser__grammar_bracketed_error_modes( input_seed, mode, sequence, structural_parse_mode_test, ): """Test the Bracketed grammar with various parse modes.""" with pytest.raises(SQLParseError): structural_parse_mode_test( input_seed, Bracketed, sequence, [], {}, mode, slice(None, None), (), ) def test__parser__grammar_sequence_indent_conditional_match(test_segments, caplog): """Test the Sequence grammar with indents.""" bar = StringParser("bar", KeywordSegment) foo = StringParser("foo", KeywordSegment) # We will assume the default config has indented_joins = False. # We're testing without explicitly setting the `config_type` because # that's the assumed way of using the grammar in practice. g = Sequence( Dedent, Conditional(Indent, indented_joins=False), bar, Conditional(Indent, indented_joins=True), foo, Dedent, ) ctx = ParseContext(dialect=None) with caplog.at_level(logging.DEBUG, logger="sqlfluff.parser"): m = g.match(test_segments, 0, parse_context=ctx) assert m == MatchResult( matched_slice=slice(0, 3), # NOTE: One of these is space. child_matches=( # The two child keywords MatchResult( matched_slice=slice(0, 1), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), MatchResult( matched_slice=slice(2, 3), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("keyword",)}, ), ), insert_segments=( (0, Dedent), # The starting, unconditional dedent. (0, Indent), # The conditional (activated) Indent. # NOTE: There *isn't* the other Indent. (3, Dedent), # The closing unconditional dedent. # NOTE: This last one is still included even though it's # after the last matched segment. ), ) sqlfluff-2.3.5/test/core/parser/helpers_test.py000066400000000000000000000017571451700765000216400ustar00rootroot00000000000000"""Test the helpers.""" import pytest from sqlfluff.core.parser.helpers import trim_non_code_segments @pytest.mark.parametrize( "token_list,pre_len,mid_len,post_len", [ (["bar", ".", "bar"], 0, 3, 0), (("bar", ".", "bar"), 0, 3, 0), ([], 0, 0, 0), ([" ", "\n", "\t", "bar", ".", "bar", " ", "\n", "\t"], 3, 3, 3), ], ) def test__parser__helper_trim_non_code_segments( token_list, pre_len, mid_len, post_len, generate_test_segments, ): """Test trim_non_code_segments.""" segments = generate_test_segments(token_list) pre, mid, post = trim_non_code_segments(segments) # Assert lengths assert (len(pre), len(mid), len(post)) == (pre_len, mid_len, post_len) # Assert content assert [elem.raw for elem in pre] == list(token_list[:pre_len]) assert [elem.raw for elem in mid] == list(token_list[pre_len : pre_len + mid_len]) assert [elem.raw for elem in post] == list(token_list[len(segments) - post_len :]) sqlfluff-2.3.5/test/core/parser/lexer_test.py000066400000000000000000000353231451700765000213110ustar00rootroot00000000000000"""The Test file for The New Parser (Lexing steps).""" import logging from typing import Any, Dict, List, NamedTuple, Tuple, Union import pytest from sqlfluff.core import FluffConfig, SQLLexError from sqlfluff.core.parser import CodeSegment, Lexer, NewlineSegment from sqlfluff.core.parser.lexer import LexMatch, RegexLexer, StringLexer from sqlfluff.core.parser.segments.meta import TemplateSegment from sqlfluff.core.templaters import JinjaTemplater, RawFileSlice, TemplatedFile from sqlfluff.core.templaters.base import TemplatedFileSlice def assert_matches(instring, matcher, matchstring): """Assert that a matcher does or doesn't work on a string. The optional `matchstring` argument, which can optionally be None, allows to either test positive matching of a particular string or negative matching (that it explicitly) doesn't match. """ res = matcher.match(instring) # Check we've got the right type assert isinstance(res, LexMatch) if matchstring is None: assert res.forward_string == instring assert res.elements == [] else: assert res.forward_string == instring[len(matchstring) :] assert len(res.elements) == 1 assert res.elements[0].raw == matchstring @pytest.mark.parametrize( "raw,res", [ # NOTE: The final empty string is the end of file marker ("a b", ["a", " ", "b", ""]), ("b.c", ["b", ".", "c", ""]), ( "abc \n \t def ;blah", ["abc", " ", "\n", " \t ", "def", " ", ";", "blah", ""], ), # Test Quotes ('abc\'\n "\t\' "de`f"', ["abc", "'\n \"\t'", " ", '"de`f"', ""]), # Test Comments ("abc -- comment \nblah", ["abc", " ", "-- comment ", "\n", "blah", ""]), ("abc # comment \nblah", ["abc", " ", "# comment ", "\n", "blah", ""]), # Note the more complicated parsing of block comments. # This tests subdivision and trimming (incl the empty case) ( "abc /* comment \nblah*/", ["abc", " ", "/* comment", " ", "\n", "blah*/", ""], ), ("abc /*\n\t\n*/", ["abc", " ", "/*", "\n", "\t", "\n", "*/", ""]), # Test strings ("*-+bd/", ["*", "-", "+", "bd", "/", ""]), # Test Negatives and Minus ("2+4 -5", ["2", "+", "4", " ", "-", "5", ""]), ("when 'Spec\\'s 23' like", ["when", " ", "'Spec\\'s 23'", " ", "like", ""]), ('when "Spec\\"s 23" like', ["when", " ", '"Spec\\"s 23"', " ", "like", ""]), ], ) def test__parser__lexer_obj(raw, res, caplog): """Test the lexer splits as expected in a selection of cases.""" lex = Lexer(config=FluffConfig(overrides={"dialect": "ansi"})) with caplog.at_level(logging.DEBUG): lexing_segments, _ = lex.lex(raw) assert [seg.raw for seg in lexing_segments] == res @pytest.mark.parametrize( "raw,res", [ (".fsaljk", "."), ("fsaljk", None), ], ) def test__parser__lexer_string(raw, res): """Test the StringLexer.""" matcher = StringLexer("dot", ".", CodeSegment) assert_matches(raw, matcher, res) @pytest.mark.parametrize( "raw,reg,res", [ ("fsaljk", "f", "f"), ("fsaljk", r"f", "f"), ("fsaljk", r"[fas]*", "fsa"), # Matching whitespace segments (" \t fsaljk", r"[^\S\r\n]*", " \t "), # Matching whitespace segments (with a newline) (" \t \n fsaljk", r"[^\S\r\n]*", " \t "), # Matching quotes containing stuff ("'something boring' \t \n fsaljk", r"'[^']*'", "'something boring'"), ( "' something exciting \t\n ' \t \n fsaljk", r"'[^']*'", "' something exciting \t\n '", ), ], ) def test__parser__lexer_regex(raw, reg, res, caplog): """Test the RegexLexer.""" matcher = RegexLexer("test", reg, CodeSegment) with caplog.at_level(logging.DEBUG): assert_matches(raw, matcher, res) def test__parser__lexer_lex_match(caplog): """Test the RepeatedMultiMatcher.""" matchers = [ StringLexer("dot", ".", CodeSegment), RegexLexer("test", r"#[^#]*#", CodeSegment), ] with caplog.at_level(logging.DEBUG): res = Lexer.lex_match("..#..#..#", matchers) assert res.forward_string == "#" # Should match right up to the final element assert len(res.elements) == 5 assert res.elements[2].raw == "#..#" def test__parser__lexer_fail(): """Test the how the lexer fails and reports errors.""" lex = Lexer(config=FluffConfig(overrides={"dialect": "ansi"})) _, vs = lex.lex("Select \u0394") assert len(vs) == 1 err = vs[0] assert isinstance(err, SQLLexError) assert err.line_pos == 8 def test__parser__lexer_fail_via_parse(): """Test the how the parser fails and reports errors while lexing.""" lexer = Lexer(config=FluffConfig(overrides={"dialect": "ansi"})) _, vs = lexer.lex("Select \u0394") assert vs assert len(vs) == 1 err = vs[0] assert isinstance(err, SQLLexError) assert err.line_pos == 8 def test__parser__lexer_trim_post_subdivide(caplog): """Test a RegexLexer with a trim_post_subdivide function.""" matcher = [ RegexLexer( "function_script_terminator", r";\s+(?!\*)\/(?!\*)|\s+(?!\*)\/(?!\*)", CodeSegment, segment_kwargs={"type": "function_script_terminator"}, subdivider=StringLexer( "semicolon", ";", CodeSegment, segment_kwargs={"type": "semicolon"} ), trim_post_subdivide=RegexLexer( "newline", r"(\n|\r\n)+", NewlineSegment, ), ) ] with caplog.at_level(logging.DEBUG): res = Lexer.lex_match(";\n/\n", matcher) assert res.elements[0].raw == ";" assert res.elements[1].raw == "\n" assert res.elements[2].raw == "/" assert len(res.elements) == 3 class _LexerSlicingCase(NamedTuple): name: str in_str: str context: Dict[str, Any] # ( # raw, # source_str (if TemplateSegment), # block_type (if TemplateSegment), # segment_type # ) expected_segments: List[Tuple[str, Union[str, None], Union[str, None], str]] def _statement(*args, **kwargs): return "" def _load_result(*args, **kwargs): return ["foo", "bar"] @pytest.mark.parametrize( "case", [ _LexerSlicingCase( name="call macro and function overrides", in_str="{% call statement('unique_keys', fetch_result=true) %}\n" " select 1 as test\n" "{% endcall %}\n" "{% set unique_keys = load_result('unique_keys') %}\n" "select 2\n", context={"statement": _statement, "load_result": _load_result}, expected_segments=[ ( "", "{% call statement('unique_keys', fetch_result=true) %}", "block_start", "placeholder", ), ("", None, None, "indent"), ("", "\n select 1 as test\n", "literal", "placeholder"), ("", None, None, "dedent"), ("", "{% endcall %}", "block_end", "placeholder"), ("\n", None, None, "newline"), ( "", "{% set unique_keys = load_result('unique_keys') %}", "templated", "placeholder", ), ("\n", None, None, "newline"), ("select", None, None, "word"), (" ", None, None, "whitespace"), ("2", None, None, "literal"), ("\n", None, None, "newline"), ("", None, None, "end_of_file"), ], ), _LexerSlicingCase( name="call an existing macro", in_str="{% macro render_name(title) %}\n" " '{{ title }}. foo' as {{ caller() }}\n" "{% endmacro %}\n" "SELECT\n" " {% call render_name('Sir') %}\n" " bar\n" " {% endcall %}\n" "FROM baz\n", context={}, expected_segments=[ ("", "{% macro render_name(title) %}", "block_start", "placeholder"), ("", None, None, "indent"), ("", "\n '", "literal", "placeholder"), ("", "{{ title }}", "templated", "placeholder"), ("", ". foo' as ", "literal", "placeholder"), ("", "{{ caller() }}", "templated", "placeholder"), ("", "\n", "literal", "placeholder"), ("", None, None, "dedent"), ("", "{% endmacro %}", "block_end", "placeholder"), ("\n", None, None, "newline"), ("SELECT", None, None, "word"), ("\n", None, None, "newline"), (" ", None, None, "whitespace"), ("\n", None, None, "newline"), (" ", None, None, "whitespace"), ("'Sir. foo'", None, None, "raw"), (" ", None, None, "whitespace"), ("as", None, None, "word"), (" ", None, None, "whitespace"), ("\n", None, None, "newline"), (" ", None, None, "whitespace"), ("bar", None, None, "word"), ("\n", None, None, "newline"), (" ", None, None, "whitespace"), ("\n", None, None, "newline"), ("", "\n bar\n ", "literal", "placeholder"), ("", None, None, "dedent"), ("", "{% endcall %}", "block_end", "placeholder"), ("\n", None, None, "newline"), ("FROM", None, None, "word"), (" ", None, None, "whitespace"), ("baz", None, None, "word"), ("\n", None, None, "newline"), ("", None, None, "end_of_file"), ], ), ], ids=lambda case: case.name, ) def test__parser__lexer_slicing_calls(case: _LexerSlicingCase): """Test slicing of call blocks. https://github.com/sqlfluff/sqlfluff/issues/4013 """ config = FluffConfig(overrides={"dialect": "ansi"}) templater = JinjaTemplater(override_context=case.context) templated_file, templater_violations = templater.process( in_str=case.in_str, fname="test.sql", config=config, formatter=None ) assert ( not templater_violations ), f"Found templater violations: {templater_violations}" lexer = Lexer(config=config) lexing_segments, lexing_violations = lexer.lex(templated_file) assert not lexing_violations, f"Found templater violations: {lexing_violations}" assert case.expected_segments == [ ( seg.raw, seg.source_str if isinstance(seg, TemplateSegment) else None, seg.block_type if isinstance(seg, TemplateSegment) else None, seg.type, ) for seg in lexing_segments ] class _LexerSlicingTemplateFileCase(NamedTuple): name: str # easy way to build inputs here is to call templater.process in # test__parser__lexer_slicing_calls and adjust the output how you like: file: TemplatedFile # ( # raw, # source_str (if TemplateSegment), # block_type (if TemplateSegment), # segment_type # ) expected_segments: List[Tuple[str, Union[str, None], Union[str, None], str]] @pytest.mark.parametrize( "case", [ _LexerSlicingTemplateFileCase( name="very simple test case", file=TemplatedFile( source_str="SELECT {# comment #}1;", templated_str="SELECT 1;", fname="test.sql", sliced_file=[ TemplatedFileSlice("literal", slice(0, 7, None), slice(0, 7, None)), TemplatedFileSlice( "comment", slice(7, 20, None), slice(7, 7, None) ), TemplatedFileSlice( "literal", slice(20, 22, None), slice(7, 9, None) ), ], raw_sliced=[ RawFileSlice("SELECT ", "literal", 0, None, 0), RawFileSlice("{# comment #}", "comment", 7, None, 0), RawFileSlice("1;", "literal", 20, None, 0), ], ), expected_segments=[ ("SELECT", None, None, "word"), (" ", None, None, "whitespace"), ("", "{# comment #}", "comment", "placeholder"), ("1", None, None, "literal"), (";", None, None, "raw"), ("", None, None, "end_of_file"), ], ), _LexerSlicingTemplateFileCase( name="special zero length slice type is kept", file=TemplatedFile( source_str="SELECT 1;", templated_str="SELECT 1;", fname="test.sql", sliced_file=[ TemplatedFileSlice("literal", slice(0, 7, None), slice(0, 7, None)), # this is a special marker that the templater wants to show up # as a meta segment: TemplatedFileSlice( "special_type", slice(7, 7, None), slice(7, 7, None) ), TemplatedFileSlice("literal", slice(7, 9, None), slice(7, 9, None)), ], raw_sliced=[ RawFileSlice("SELECT 1;", "literal", 0, None, 0), ], ), expected_segments=[ ("SELECT", None, None, "word"), (" ", None, None, "whitespace"), ("", "", "special_type", "placeholder"), ("1", None, None, "literal"), (";", None, None, "raw"), ("", None, None, "end_of_file"), ], ), ], ids=lambda case: case.name, ) def test__parser__lexer_slicing_from_template_file(case: _LexerSlicingTemplateFileCase): """Test slicing using a provided TemplateFile. Useful for testing special inputs without having to find a templater to trick and yield the input you want to test. """ config = FluffConfig(overrides={"dialect": "ansi"}) lexer = Lexer(config=config) lexing_segments, lexing_violations = lexer.lex(case.file) assert not lexing_violations, f"Found templater violations: {lexing_violations}" assert case.expected_segments == [ ( seg.raw, seg.source_str if isinstance(seg, TemplateSegment) else None, seg.block_type if isinstance(seg, TemplateSegment) else None, seg.type, ) for seg in lexing_segments ] sqlfluff-2.3.5/test/core/parser/markers_test.py000066400000000000000000000051701451700765000216330ustar00rootroot00000000000000"""Tests for PositionMarker.""" import pytest from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.templaters import TemplatedFile @pytest.mark.parametrize( "raw,start_pos,end_pos", [ ("fsaljk", (0, 0), (0, 6)), ("", (2, 2), (2, 2)), # NB: 1 indexed, not 0 indexed. ("\n", (2, 2), (3, 1)), ("boo\n", (2, 2), (3, 1)), ("boo\nfoo", (2, 2), (3, 4)), ("\nfoo", (2, 2), (3, 4)), ], ) def test_markers__infer_next_position(raw, start_pos, end_pos): """Test that we can correctly infer positions from strings.""" assert end_pos == PositionMarker.infer_next_position(raw, *start_pos) def test_markers__setting_position_raw(): """Test that we can correctly infer positions from strings & locations.""" templ = TemplatedFile.from_string("foobar") # Check inference in the template assert templ.get_line_pos_of_char_pos(2, source=True) == (1, 3) assert templ.get_line_pos_of_char_pos(2, source=False) == (1, 3) # Now check it passes through pos = PositionMarker(slice(2, 5), slice(2, 5), templ) # Can we infer positions correctly? assert pos.working_loc == (1, 3) # Check other marker properties work too (i.e. source properties) assert pos.line_no == 1 assert pos.line_pos == 3 # i.e. 2 + 1 (for 1-indexed) def test_markers__setting_position_working(): """Test that we can correctly set positions manually.""" templ = TemplatedFile.from_string("foobar") pos = PositionMarker(slice(2, 5), slice(2, 5), templ, 4, 4) # Can we don't infer when we're explicitly told. assert pos.working_loc == (4, 4) def test_markers__comparison(): """Test that we can correctly compare markers.""" templ = TemplatedFile.from_string("abc") # Make position markers for each of a, b & c # NOTE: We're not explicitly setting the working location, we # rely here on the marker inferring that correctly itself. a_pos = PositionMarker(slice(0, 1), slice(0, 1), templ) b_pos = PositionMarker(slice(1, 2), slice(1, 2), templ) c_pos = PositionMarker(slice(2, 3), slice(2, 3), templ) all_pos = (a_pos, b_pos, c_pos) # Check equality assert all(p == p for p in all_pos) # Check inequality assert a_pos != b_pos and a_pos != c_pos and b_pos != c_pos # Check less than assert a_pos < b_pos and b_pos < c_pos assert not c_pos < a_pos # Check greater than assert c_pos > a_pos and c_pos > b_pos assert not a_pos > c_pos # Check less than or equal assert all(a_pos <= p for p in all_pos) # Check greater than or equal assert all(c_pos >= p for p in all_pos) sqlfluff-2.3.5/test/core/parser/match_algorithms_test.py000066400000000000000000000202241451700765000235110ustar00rootroot00000000000000"""Tests for the BaseGrammar and it's methods. NOTE: All of these tests depend somewhat on the KeywordSegment working as planned. """ import pytest from sqlfluff.core.dialects.base import Dialect from sqlfluff.core.errors import SQLParseError from sqlfluff.core.parser import ( CodeSegment, KeywordSegment, StringParser, SymbolSegment, WhitespaceSegment, ) from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.lexer import RegexLexer from sqlfluff.core.parser.match_algorithms import ( greedy_match, next_ex_bracket_match, next_match, resolve_bracket, trim_to_terminator, ) # NB: All of these tests depend somewhat on the KeywordSegment working as planned @pytest.fixture(scope="function") def test_dialect(): """A stripped back test dialect for testing brackets.""" test_dialect = Dialect("test", root_segment_name="FileSegment") test_dialect.bracket_sets("bracket_pairs").update( [("round", "StartBracketSegment", "EndBracketSegment", True)] ) test_dialect.set_lexer_matchers( [ RegexLexer("whitespace", r"[^\S\r\n]+", WhitespaceSegment), RegexLexer( "code", r"[0-9a-zA-Z_]+", CodeSegment, segment_kwargs={"type": "code"} ), ] ) test_dialect.add( StartBracketSegment=StringParser("(", SymbolSegment, type="start_bracket"), EndBracketSegment=StringParser(")", SymbolSegment, type="end_bracket"), ) # Return the expanded copy. return test_dialect.expand() def make_result_tuple(result_slice, matcher_keywords, test_segments): """Make a comparison tuple for test matching.""" # No result slice means no match. if not result_slice: return () return tuple( KeywordSegment(elem.raw, pos_marker=elem.pos_marker) if elem.raw in matcher_keywords else elem for elem in test_segments[result_slice] ) @pytest.mark.parametrize( "matcher_keywords,result_slice,winning_matcher", [ # Basic version, we should find bar first (["bar", "foo"], slice(0, 1), "bar"), # Look ahead for foo (["foo"], slice(2, 3), "foo"), # Duplicate matchers (["foo", "foo"], slice(2, 3), "foo"), (["sadkjfhas", "asefaslf"], slice(0, 0), None), ], ) def test__parser__algorithms__next_match( matcher_keywords, result_slice, winning_matcher, test_segments, ): """Test the `next_match()` method.""" # Make the string parsers for testing. matchers = [StringParser(keyword, KeywordSegment) for keyword in matcher_keywords] # Fetch the matching keyword from above (because it will have the same position) if winning_matcher: winning_matcher = matchers[matcher_keywords.index(winning_matcher)] ctx = ParseContext(dialect=None) match, matcher = next_match( test_segments, 0, matchers, ctx, ) # Check the right matcher was successful. if winning_matcher: assert matcher is winning_matcher else: # If no designated winning matcher, assert that it wasn't successful. assert matcher is None assert not match assert match.matched_slice == result_slice @pytest.mark.parametrize( "raw_segments,result_slice,error", [ (["(", "a", ")", " ", "foo"], slice(0, 3), None), (["(", "a", "(", "b", ")", "(", "c", ")", "d", ")", "e"], slice(0, 10), None), # This should error because we try to close a square bracket # inside a round one. (["(", "a", "]", "b", ")", "e"], None, SQLParseError), # This should error because we never find the end. (["(", "a", " ", "b", " ", "e"], None, SQLParseError), ], ) def test__parser__algorithms__resolve_bracket( raw_segments, result_slice, error, generate_test_segments ): """Test the `resolve_bracket()` method.""" test_segments = generate_test_segments(raw_segments) start_bracket = StringParser("(", SymbolSegment, type="start_bracket") end_bracket = StringParser(")", SymbolSegment, type="end_bracket") start_sq_bracket = StringParser("[", SymbolSegment, type="start_square_bracket") end_sq_bracket = StringParser("]", SymbolSegment, type="end_square_bracket") ctx = ParseContext(dialect=None) # For this test case we assert that the first segment is the initial match. first_match = start_bracket.match(test_segments, 0, ctx) assert first_match args = (test_segments,) kwargs = dict( opening_match=first_match, opening_matcher=start_bracket, start_brackets=[start_bracket, start_sq_bracket], end_brackets=[end_bracket, end_sq_bracket], bracket_persists=[True, False], parse_context=ctx, ) # If an error is defined, check that it is raised. if error: with pytest.raises(error): resolve_bracket(*args, **kwargs) else: result = resolve_bracket(*args, **kwargs) assert result assert result.matched_slice == result_slice @pytest.mark.parametrize( "raw_segments,target_word,result_slice", [ ([], "foo", slice(0, 0)), (["(", "foo", ")", " ", "foo"], "foo", slice(4, 5)), (["a", " ", "foo", " ", "foo"], "foo", slice(2, 3)), (["foo", " ", "foo", " ", "foo"], "foo", slice(0, 1)), # Error case, unexpected closing bracket. # NOTE: This should never normally happen, but we should # be prepared in case it does so that we return appropriately. (["a", " ", ")", " ", "foo"], "foo", slice(0, 0)), ], ) def test__parser__algorithms__next_ex_bracket_match( raw_segments, target_word, result_slice, generate_test_segments, test_dialect ): """Test the `next_ex_bracket_match()` method.""" test_segments = generate_test_segments(raw_segments) target = StringParser(target_word, KeywordSegment) ctx = ParseContext(dialect=test_dialect) result, _, _ = next_ex_bracket_match( test_segments, 0, matchers=[target], parse_context=ctx, ) assert result.matched_slice == result_slice @pytest.mark.parametrize( "raw_segments,target_words,inc_term,result_slice", [ (["a", "b", " ", "c", "d", " ", "e"], ["e", "c"], False, slice(0, 2)), (["a", "b", " ", "c", "d", " ", "e"], ["e", "c"], True, slice(0, 4)), # NOTE: Because "b" is_alpha, it needs whitespace before it to match. (["a", "b", " ", "b"], ["b"], True, slice(0, 4)), (["a", "b", " ", "b"], ["b"], False, slice(0, 2)), (["a", "b", "c", " ", "b"], ["b"], False, slice(0, 3)), ], ) def test__parser__algorithms__greedy_match( raw_segments, target_words, inc_term, result_slice, generate_test_segments, test_dialect, ): """Test the `greedy_match()` method.""" test_segments = generate_test_segments(raw_segments) matchers = [StringParser(word, KeywordSegment) for word in target_words] ctx = ParseContext(dialect=test_dialect) match = greedy_match( segments=test_segments, idx=0, parse_context=ctx, matchers=matchers, include_terminator=inc_term, ) assert match assert match.matched_slice == result_slice @pytest.mark.parametrize( "raw_segments,target_words,expected_result", [ # Terminators mid sequence. (["a", "b", " ", "c", "d", " ", "e"], ["e", "c"], 2), # Initial terminators. (["a", "b", " ", "c", "d", " ", "e"], ["a", "e"], 0), # No terminators. (["a", "b", " ", "c", "d", " ", "e"], ["x", "y"], 7), # No sequence. ([], ["x", "y"], 0), ], ) def test__parser__algorithms__trim_to_terminator( raw_segments, target_words, expected_result, generate_test_segments, test_dialect, ): """Test the `trim_to_terminator()` method.""" test_segments = generate_test_segments(raw_segments) matchers = [StringParser(word, KeywordSegment) for word in target_words] ctx = ParseContext(dialect=test_dialect) assert ( trim_to_terminator( segments=test_segments, idx=0, parse_context=ctx, terminators=matchers, ) == expected_result ) sqlfluff-2.3.5/test/core/parser/match_result_test.py000066400000000000000000000053041451700765000226600ustar00rootroot00000000000000"""Tests for the MatchResult class. NOTE: This is all experimental for now. """ import pytest from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.segments import BaseSegment, Dedent, Indent class ExampleSegment(BaseSegment): """A minimal example segment for testing.""" type = "example" def _recursive_assert_pos(segment): assert segment.pos_marker for seg in segment.segments: _recursive_assert_pos(seg) @pytest.mark.parametrize( "segment_seed,match_result,match_len,serialised_result", [ ( ["a", "b", "c", "d", "e"], MatchResult( matched_slice=slice(1, 4), insert_segments=((3, Indent),), child_matches=( MatchResult( matched_slice=slice(2, 3), matched_class=ExampleSegment, insert_segments=((2, Indent),), ), ), ), 3, ( ("raw", "b"), ("example", (("indent", ""), ("raw", "c"))), ("indent", ""), ("raw", "d"), ), ), ( ["a", "b", "c", "d", "e"], MatchResult( matched_slice=slice(1, 4), insert_segments=((2, Dedent),), ), 3, ( ("raw", "b"), ("dedent", ""), ("raw", "c"), ("raw", "d"), ), ), ( ["a"], MatchResult( # An example with only inserts. matched_slice=slice(0, 0), insert_segments=((0, Dedent),), ), 0, (("dedent", ""),), ), ], ) def test__parser__matchresult2_apply( segment_seed, match_result, match_len, serialised_result, generate_test_segments ): """Test MatchResult.apply(). This includes testing instantiating the MatchResult and whether setting some attributes and not others works as expected. """ input_segments = generate_test_segments(segment_seed) # Test the length attribute. # NOTE: It's not the number of segments we'll return, but the span # of the match in the original sequence. assert len(match_result) == match_len out_segments = match_result.apply(input_segments) serialised = tuple( seg.to_tuple(show_raw=True, include_meta=True) for seg in out_segments ) assert serialised == serialised_result # Test that _every_ segment (including metas) has a position marker already. for seg in out_segments: _recursive_assert_pos(seg) sqlfluff-2.3.5/test/core/parser/parse_test.py000066400000000000000000000033531451700765000213020ustar00rootroot00000000000000"""The Test file for The New Parser (Grammar Classes).""" from sqlfluff.core.errors import SQLParseError from sqlfluff.core.linter.linter import Linter from sqlfluff.core.parser import Anything, BaseSegment, KeywordSegment, StringParser from sqlfluff.core.parser.context import ParseContext BarKeyword = StringParser("bar", KeywordSegment) class BasicSegment(BaseSegment): """A basic segment for testing parse and expand.""" type = "basic" match_grammar = Anything() def test__parser__parse_match(test_segments): """Test match method on a real segment.""" ctx = ParseContext(dialect=None) # This should match and have consumed everything, which should # now be part of a BasicSegment. match = BasicSegment.match(test_segments, 0, parse_context=ctx) assert match matched = match.apply(test_segments) assert len(matched) == 1 assert isinstance(matched[0], BasicSegment) assert matched[0].segments[0].type == "raw" def test__parser__parse_error(): """Test that SQLParseError is raised for unparsable section.""" in_str = "SELECT ;" lnt = Linter(dialect="ansi") parsed = lnt.parse_string(in_str) assert len(parsed.violations) == 1 violation = parsed.violations[0] assert isinstance(violation, SQLParseError) assert violation.desc() == "Line 1, Position 1: Found unparsable section: 'SELECT'" # Check that the expected labels work for logging. # TODO: This is more specific that in previous iterations, but we could # definitely make this easier to read. assert ( 'Expected: "]> " "after . " "Found nothing." ) in parsed.tree.stringify() sqlfluff-2.3.5/test/core/parser/parser_test.py000066400000000000000000000155071451700765000214700ustar00rootroot00000000000000"""The Test file for Parsers (Matchable Classes).""" import pytest from sqlfluff.core.parser import ( KeywordSegment, MultiStringParser, RawSegment, RegexParser, StringParser, TypedParser, ) from sqlfluff.core.parser.context import ParseContext def test__parser__repr(): """Test the __repr__ method of the parsers.""" # For the string parser note the uppercase template. assert repr(StringParser("foo", KeywordSegment)) == "" # NOTE: For MultiStringParser we only test with one element here # because for more than one, the order is unpredictable. assert ( repr(MultiStringParser(["a"], KeywordSegment)) == "" ) # For the typed & regex parser it's case sensitive (although lowercase # by convention). assert repr(TypedParser("foo", KeywordSegment)) == "" assert repr(RegexParser(r"fo|o", KeywordSegment)) == "" class ExampleSegment(RawSegment): """A minimal example segment for testing.""" type = "example" def test__parser__typedparser__match(generate_test_segments): """Test the match method of TypedParser.""" parser = TypedParser("single_quote", ExampleSegment) ctx = ParseContext(dialect=None) # NOTE: The second element of the sequence has single quotes # and the test fixture will set the type accordingly. segments = generate_test_segments(["foo", "'bar'"]) result1 = parser.match(segments, 0, ctx) assert not result1 result2 = parser.match(segments, 1, ctx) assert result2 assert result2.matched_slice == slice(1, 2) assert result2.matched_class is ExampleSegment def test__parser__typedparser__simple(): """Test the simple method of TypedParser.""" parser = TypedParser("single_quote", ExampleSegment) ctx = ParseContext(dialect=None) assert parser.simple(ctx) == (frozenset(), frozenset(["single_quote"])) def test__parser__stringparser__match(generate_test_segments): """Test the match method of StringParser.""" parser = StringParser("foo", ExampleSegment, type="test") ctx = ParseContext(dialect=None) segments = generate_test_segments(["foo", "bar", "foo"]) result1 = parser.match(segments, 0, ctx) assert result1 assert result1.matched_slice == slice(0, 1) assert result1.matched_class is ExampleSegment assert result1.segment_kwargs == {"instance_types": ("test",)} result2 = parser.match(segments, 1, ctx) assert not result2 result3 = parser.match(segments, 2, ctx) assert result3 assert result3.matched_slice == slice(2, 3) assert result3.matched_class is ExampleSegment assert result3.segment_kwargs == {"instance_types": ("test",)} def test__parser__stringparser__simple(): """Test the simple method of StringParser.""" parser = StringParser("foo", ExampleSegment) ctx = ParseContext(dialect=None) assert parser.simple(ctx) == (frozenset(["FOO"]), frozenset()) def test__parser__regexparser__match(generate_test_segments): """Test the match method of RegexParser.""" parser = RegexParser(r"b.r", ExampleSegment) ctx = ParseContext(dialect=None) segments = generate_test_segments(["foo", "bar", "boo"]) assert not parser.match(segments, 0, ctx) assert not parser.match(segments, 2, ctx) result = parser.match(segments, 1, ctx) assert result assert result.matched_slice == slice(1, 2) assert result.matched_class is ExampleSegment def test__parser__regexparser__simple(): """Test the simple method of RegexParser.""" parser = RegexParser(r"b.r", ExampleSegment) ctx = ParseContext(dialect=None) assert parser.simple(ctx) is None def test__parser__multistringparser__match(generate_test_segments): """Test the match method of MultiStringParser.""" parser = MultiStringParser(["foo", "bar"], ExampleSegment) ctx = ParseContext(dialect=None) segments = generate_test_segments(["foo", "fo", "bar", "boo"]) assert not parser.match(segments, 1, ctx) assert not parser.match(segments, 3, ctx) result1 = parser.match(segments, 0, ctx) assert result1 assert result1.matched_slice == slice(0, 1) assert result1.matched_class is ExampleSegment result2 = parser.match(segments, 2, ctx) assert result2 assert result2.matched_slice == slice(2, 3) assert result2.matched_class is ExampleSegment def test__parser__multistringparser__simple(): """Test the MultiStringParser matchable.""" parser = MultiStringParser(["foo", "bar"], KeywordSegment) ctx = ParseContext(dialect=None) assert parser.simple(ctx) == (frozenset(["FOO", "BAR"]), frozenset()) @pytest.mark.parametrize( "new_type", [None, "bar"], ) def test__parser__typedparser_rematch(new_type, generate_test_segments): """Test that TypedParser allows rematching. Because the TypedParser looks for types and then changes the type as a result, there is a risk of preventing rematching. This is a problem because we use it when checking that fix edits haven't broken the parse tree. In this example the TypedParser is looking for a "single_quote" type segment, but is due to mutate to an Example segment, which inherits directly from `RawSegment`. Unless the TypedParser steps in, this would apparently present a rematching issue. """ pre_match_types = { "single_quote", "raw", "base", } post_match_types = { # Make sure we got the "example" class "example", # But we *also* get the "single_quote" class. # On the second pass this is the main crux of the test. "single_quote", "raw", "base", } kwargs = {} expected_type = "example" if new_type: post_match_types.add(new_type) kwargs = {"type": new_type} expected_type = new_type segments = generate_test_segments(["'foo'"]) # Check types pre-match assert segments[0].class_types == pre_match_types parser = TypedParser("single_quote", ExampleSegment, **kwargs) # Just check that our assumptions about inheritance are right. assert not ExampleSegment.class_is_type("single_quote") ctx = ParseContext(dialect=None) match1 = parser.match(segments, 0, ctx) assert match1 segments1 = match1.apply(segments) # Check types post-match 1 assert segments1[0].class_types == post_match_types assert segments1[0].get_type() == expected_type assert segments1[0].to_tuple(show_raw=True) == (expected_type, "'foo'") # Do a rematch to check it works. match = parser.match(segments1, 0, ctx) assert match # Check types post-match 2 segments2 = match.apply(segments1) assert segments2[0].class_types == post_match_types assert segments2[0].get_type() == expected_type assert segments2[0].to_tuple(show_raw=True) == (expected_type, "'foo'") sqlfluff-2.3.5/test/core/parser/segments/000077500000000000000000000000001451700765000204005ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/parser/segments/__init__.py000066400000000000000000000000451451700765000225100ustar00rootroot00000000000000"""Tests for the segments module.""" sqlfluff-2.3.5/test/core/parser/segments/conftest.py000066400000000000000000000016651451700765000226070ustar00rootroot00000000000000"""Common fixtures for segment tests.""" import pytest from sqlfluff.core.parser import BaseSegment @pytest.fixture(scope="module") def raw_segments(generate_test_segments): """Construct a list of raw segments as a fixture.""" return generate_test_segments(["foobar", ".barfoo"]) @pytest.fixture(scope="module") def raw_seg(raw_segments): """Construct a raw segment as a fixture.""" return raw_segments[0] @pytest.fixture(scope="session") def DummySegment(): """Construct a raw segment as a fixture.""" class DummySegment(BaseSegment): """A dummy segment for testing with no grammar.""" type = "dummy" return DummySegment @pytest.fixture(scope="session") def DummyAuxSegment(): """Construct a raw segment as a fixture.""" class DummyAuxSegment(BaseSegment): """A different dummy segment for testing with no grammar.""" type = "dummy_aux" return DummyAuxSegment sqlfluff-2.3.5/test/core/parser/segments/segments_base_test.py000066400000000000000000000264341451700765000246410ustar00rootroot00000000000000"""Test the BaseSegment class.""" import pickle import pytest from sqlfluff.core.parser import BaseSegment, PositionMarker, RawSegment from sqlfluff.core.parser.segments.base import PathStep from sqlfluff.core.rules.base import LintFix from sqlfluff.core.templaters import TemplatedFile def test__parser__base_segments_type(DummySegment): """Test the .is_type() method.""" assert BaseSegment.class_is_type("base") assert not BaseSegment.class_is_type("foo") assert not BaseSegment.class_is_type("foo", "bar") assert DummySegment.class_is_type("dummy") assert DummySegment.class_is_type("base") assert DummySegment.class_is_type("base", "foo", "bar") def test__parser__base_segments_class_types(DummySegment): """Test the metaclass ._class_types attribute.""" assert DummySegment._class_types == {"dummy", "base"} def test__parser__base_segments_descendant_type_set( raw_segments, DummySegment, DummyAuxSegment ): """Test the .descendant_type_set() method.""" test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.descendant_type_set == {"raw", "base", "dummy_aux"} def test__parser__base_segments_direct_descendant_type_set( raw_segments, DummySegment, DummyAuxSegment ): """Test the .direct_descendant_type_set() method.""" test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.direct_descendant_type_set == {"base", "dummy_aux"} def test__parser__base_segments_to_tuple_a(raw_segments, DummySegment, DummyAuxSegment): """Test the .to_tuple() method.""" test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.to_tuple() == ( "dummy", (("dummy_aux", (("raw", ()), ("raw", ()))),), ) def test__parser__base_segments_to_tuple_b(raw_segments, DummySegment, DummyAuxSegment): """Test the .to_tuple() method.""" test_seg = DummySegment( [DummyAuxSegment(raw_segments + (DummyAuxSegment(raw_segments[:1]),))] ) assert test_seg.to_tuple() == ( "dummy", (("dummy_aux", (("raw", ()), ("raw", ()), ("dummy_aux", (("raw", ()),)))),), ) def test__parser__base_segments_to_tuple_c(raw_segments, DummySegment, DummyAuxSegment): """Test the .to_tuple() method with show_raw=True.""" test_seg = DummySegment( [DummyAuxSegment(raw_segments + (DummyAuxSegment(raw_segments[:1]),))] ) assert test_seg.to_tuple(show_raw=True) == ( "dummy", ( ( "dummy_aux", ( ("raw", "foobar"), ("raw", ".barfoo"), ("dummy_aux", (("raw", "foobar"),)), ), ), ), ) def test__parser__base_segments_as_record_a( raw_segments, DummySegment, DummyAuxSegment ): """Test the .as_record() method. NOTE: In this test, note that there are lists, as some segment types are duplicated within their parent segment. """ test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.as_record() == { "dummy": {"dummy_aux": [{"raw": None}, {"raw": None}]} } def test__parser__base_segments_as_record_b( raw_segments, DummySegment, DummyAuxSegment ): """Test the .as_record() method. NOTE: In this test, note that there are no lists, every segment type is unique within it's parent segment, and so there is no need. """ test_seg = DummySegment( [DummyAuxSegment(raw_segments[:1] + (DummyAuxSegment(raw_segments[:1]),))] ) assert test_seg.as_record() == { "dummy": {"dummy_aux": {"raw": None, "dummy_aux": {"raw": None}}} } def test__parser__base_segments_as_record_c( raw_segments, DummySegment, DummyAuxSegment ): """Test the .as_record() method with show_raw=True. NOTE: In this test, note that there are no lists, every segment type is unique within it's parent segment, and so there is no need. """ test_seg = DummySegment( [DummyAuxSegment(raw_segments[:1] + (DummyAuxSegment(raw_segments[:1]),))] ) assert test_seg.as_record(show_raw=True) == { "dummy": {"dummy_aux": {"raw": "foobar", "dummy_aux": {"raw": "foobar"}}} } def test__parser__base_segments_count_segments( raw_segments, DummySegment, DummyAuxSegment ): """Test the .count_segments() method.""" test_seg = DummySegment([DummyAuxSegment(raw_segments)]) assert test_seg.count_segments() == 4 assert test_seg.count_segments(raw_only=True) == 2 @pytest.mark.parametrize( "list_in, result", [ (["foo"], False), (["foo", " "], True), ([" ", "foo", " "], True), ([" ", "foo"], True), ([" "], True), (["foo", " ", "foo"], False), ], ) def test__parser_base_segments_validate_non_code_ends( generate_test_segments, DummySegment, list_in, result ): """Test BaseSegment.validate_non_code_ends().""" if result: # Assert that it _does_ raise an exception. with pytest.raises(AssertionError): # Validation happens on instantiation. seg = DummySegment(segments=generate_test_segments(list_in)) else: # Check that it _doesn't_ raise an exception... seg = DummySegment(segments=generate_test_segments(list_in)) # ...even when explicitly validating. seg.validate_non_code_ends() def test__parser__base_segments_path_to(raw_segments, DummySegment, DummyAuxSegment): """Test the .path_to() method.""" test_seg_a = DummyAuxSegment(raw_segments) test_seg_b = DummySegment([test_seg_a]) # With a direct parent/child relationship we only get # one element of path. # NOTE: All the dummy segments return True for .is_code() # so that means the do appear in code_idxs. assert test_seg_b.path_to(test_seg_a) == [PathStep(test_seg_b, 0, 1, (0,))] # With a three segment chain - we get two path elements. assert test_seg_b.path_to(raw_segments[0]) == [ PathStep(test_seg_b, 0, 1, (0,)), PathStep(test_seg_a, 0, 2, (0, 1)), ] assert test_seg_b.path_to(raw_segments[1]) == [ PathStep(test_seg_b, 0, 1, (0,)), PathStep(test_seg_a, 1, 2, (0, 1)), ] def test__parser__base_segments_stubs(): """Test stub methods that have no implementation in base class.""" template = TemplatedFile.from_string("foobar") rs1 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) base_segment = BaseSegment(segments=[rs1]) with pytest.raises(NotImplementedError): base_segment.edit("foo") def test__parser__base_segments_raw(raw_seg): """Test raw segments behave as expected.""" # Check Segment Return assert raw_seg.segments == () assert raw_seg.raw == "foobar" # Check Formatting and Stringification assert str(raw_seg) == repr(raw_seg) == "" assert ( raw_seg.stringify(ident=1, tabsize=2) == "[L: 1, P: 1] | raw: " " 'foobar'\n" ) # Check tuple assert raw_seg.to_tuple() == ("raw", ()) # Check tuple assert raw_seg.to_tuple(show_raw=True) == ("raw", "foobar") def test__parser__base_segments_base(raw_segments, fresh_ansi_dialect, DummySegment): """Test base segments behave as expected.""" base_seg = DummySegment(raw_segments) # Check we assume the position correctly assert ( base_seg.pos_marker.start_point_marker() == raw_segments[0].pos_marker.start_point_marker() ) assert ( base_seg.pos_marker.end_point_marker() == raw_segments[-1].pos_marker.end_point_marker() ) # Check that we correctly reconstruct the raw assert base_seg.raw == "foobar.barfoo" # Check tuple assert base_seg.to_tuple() == ( "dummy", (raw_segments[0].to_tuple(), raw_segments[1].to_tuple()), ) # Check Formatting and Stringification assert str(base_seg) == repr(base_seg) == "" assert base_seg.stringify(ident=1, tabsize=2) == ( "[L: 1, P: 1] | dummy:\n" "[L: 1, P: 1] | raw: " " 'foobar'\n" "[L: 1, P: 7] | raw: " " '.barfoo'\n" ) def test__parser__base_segments_raw_compare(): """Test comparison of raw segments.""" template = TemplatedFile.from_string("foobar") rs1 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) rs2 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) assert rs1 == rs2 def test__parser__base_segments_base_compare(DummySegment, DummyAuxSegment): """Test comparison of base segments.""" template = TemplatedFile.from_string("foobar") rs1 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) rs2 = RawSegment("foobar", PositionMarker(slice(0, 6), slice(0, 6), template)) ds1 = DummySegment([rs1]) ds2 = DummySegment([rs2]) dsa2 = DummyAuxSegment([rs2]) # Check for equality assert ds1 == ds2 # Check a different match on the same details are not the same assert ds1 != dsa2 def test__parser__base_segments_pickle_safe(raw_segments): """Test pickling and unpickling of BaseSegment.""" test_seg = BaseSegment([BaseSegment(raw_segments)]) test_seg.set_as_parent() pickled = pickle.dumps(test_seg) result_seg = pickle.loads(pickled) assert test_seg == result_seg # Check specifically the treatment of the parent position. assert result_seg.segments[0].get_parent()[0] is result_seg def test__parser__base_segments_copy_isolation(DummySegment, raw_segments): """Test copy isolation in BaseSegment. First on one of the raws and then on the dummy segment. """ # On a raw a_seg = raw_segments[0] a_copy = a_seg.copy() assert a_seg is not a_copy assert a_seg == a_copy assert a_seg.pos_marker is a_copy.pos_marker a_copy.pos_marker = None assert a_copy.pos_marker is None assert a_seg.pos_marker is not None # On a base b_seg = DummySegment(segments=raw_segments) b_copy = b_seg.copy() assert b_seg is not b_copy assert b_seg == b_copy assert b_seg.pos_marker is b_copy.pos_marker b_copy.pos_marker = None assert b_copy.pos_marker is None assert b_seg.pos_marker is not None # On addition to a lint Fix fix = LintFix("replace", a_seg, [b_seg]) for s in fix.edit: assert not s.pos_marker assert b_seg.pos_marker def test__parser__base_segments_parent_ref(DummySegment, raw_segments): """Test getting and setting parents on BaseSegment.""" # Check initially no parent (because not set) assert not raw_segments[0].get_parent() # Add it to a segment (which also sets the parent value) seg = DummySegment(segments=raw_segments) # The DummySegment shouldn't have a parent. assert seg.get_parent() is None assert seg.segments[0].get_parent()[0] is seg assert seg.segments[1].get_parent()[0] is seg # Remove segment from parent, but don't unset. # Should still check an return None. seg_0 = seg.segments[0] seg.segments = seg.segments[1:] assert seg_0 not in seg.segments assert not seg_0.get_parent() # Check the other still works. assert seg.segments[0].get_parent()[0] sqlfluff-2.3.5/test/core/parser/segments/segments_common_test.py000066400000000000000000000022011451700765000252010ustar00rootroot00000000000000"""Test the KeywordSegment class.""" from sqlfluff.core.parser import KeywordSegment, StringParser from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult def test__parser__core_keyword(raw_segments): """Test the Mystical KeywordSegment.""" # First make a keyword FooKeyword = StringParser("foobar", KeywordSegment, type="bar") # Check it looks as expected assert FooKeyword.template.upper() == "FOOBAR" ctx = ParseContext(dialect=None) # Match it against a list and check it doesn't match assert not FooKeyword.match(raw_segments, 1, parse_context=ctx) # Match it against the final element (returns tuple) m = FooKeyword.match(raw_segments, 0, parse_context=ctx) assert m assert m == MatchResult( matched_slice=slice(0, 1), matched_class=KeywordSegment, segment_kwargs={"instance_types": ("bar",)}, ) segments = m.apply(raw_segments) assert len(segments) == 1 segment = segments[0] assert segment.class_types == { "base", "word", "keyword", "raw", "bar", } sqlfluff-2.3.5/test/core/parser/segments/segments_file_test.py000066400000000000000000000006611451700765000246400ustar00rootroot00000000000000"""Test the BaseFileSegment class.""" from sqlfluff.core.parser import BaseFileSegment def test__parser__base_segments_file(raw_segments): """Test BaseFileSegment to behave as expected.""" base_seg = BaseFileSegment(raw_segments, fname="/some/dir/file.sql") assert base_seg.type == "file" assert base_seg.file_path == "/some/dir/file.sql" assert base_seg.can_start_end_non_code assert base_seg.allow_empty sqlfluff-2.3.5/test/core/parser/segments/segments_raw_test.py000066400000000000000000000017061451700765000245130ustar00rootroot00000000000000"""Test the RawSegment class.""" from sqlfluff.core.parser.segments.base import PathStep def test__parser__raw_get_raw_segments(raw_segments): """Test niche case of calling get_raw_segments on a raw segment.""" for s in raw_segments: assert s.get_raw_segments() == [s] def test__parser__raw_segments_with_ancestors( raw_segments, DummySegment, DummyAuxSegment ): """Test raw_segments_with_ancestors. This is used in the reflow module to assess parse depth. """ test_seg = DummySegment([DummyAuxSegment(raw_segments[:1]), raw_segments[1]]) # Result should be the same raw segment, but with appropriate parents assert test_seg.raw_segments_with_ancestors == [ ( raw_segments[0], [ PathStep(test_seg, 0, 2, (0, 1)), PathStep(test_seg.segments[0], 0, 1, (0,)), ], ), (raw_segments[1], [PathStep(test_seg, 1, 2, (0, 1))]), ] sqlfluff-2.3.5/test/core/plugin_test.py000066400000000000000000000062531451700765000201740ustar00rootroot00000000000000"""Plugin related tests.""" import logging import sys import pytest from sqlfluff.core.config import FluffConfig from sqlfluff.core.plugin.host import get_plugin_manager, purge_plugin_manager from sqlfluff.utils.testing.logging import fluff_log_catcher def test__plugin_manager_registers_example_plugin(): """Test that the example plugin is registered. This test also tests that warnings are raised on the import of plugins which have their imports in the wrong place (e.g. the example plugin). That means we need to make sure the plugin is definitely reimported at the start of this test, so we can see any warnings raised on imports. To do this we clear the plugin manager cache and also forcibly unload the example plugin modules if they are already loaded. This ensures that we can capture any warnings raised by importing the module. """ purge_plugin_manager() # We still to a try/except here, even though it's only run within # the context of a test because the module may or may not already # be imported depending on the order that the tests run in. try: del sys.modules["sqlfluff_plugin_example"] except KeyError: pass try: del sys.modules["sqlfluff_plugin_example.rules"] except KeyError: pass with fluff_log_catcher(logging.WARNING, "sqlfluff.rules") as caplog: plugin_manager = get_plugin_manager() # The plugin import order is non-deterministic. # Use sets in case the dbt plugin (or other plugins) are # already installed too. installed_plugins = set( plugin_module.__name__ for plugin_module in plugin_manager.get_plugins() ) print(f"Installed plugins: {installed_plugins}") assert installed_plugins.issuperset( { "sqlfluff_plugin_example", "sqlfluff.core.plugin.lib", } ) # At this stage we should also check that the example plugin # also raises a warning for it's import location. assert ( "Rule 'Rule_Example_L001' has been imported before all plugins " "have been fully loaded" ) in caplog.text @pytest.mark.parametrize( "rule_ref", # Check both V1 plugin ["Rule_Example_L001"], ) def test__plugin_example_rules_returned(rule_ref): """Test that the example rules from the plugin are returned.""" plugin_manager = get_plugin_manager() # The plugin import order is non-deterministic rule_names = [ rule.__name__ for rules in plugin_manager.hook.get_rules() for rule in rules ] print(f"Rule names: {rule_names}") assert rule_ref in rule_names @pytest.mark.parametrize( "rule_ref,config_option", # Check both V1 and V2 rule plugins. [("Example_L001", "forbidden_columns")], ) def test__plugin_default_config_read(rule_ref, config_option): """Test that the example plugin default config is merged into FluffConfig.""" fluff_config = FluffConfig(overrides={"dialect": "ansi"}) # The plugin import order is non-deterministic print(f"Detected config sections: {fluff_config._configs['rules'].keys()}") # Check V1 assert config_option in fluff_config._configs["rules"][rule_ref] sqlfluff-2.3.5/test/core/rules/000077500000000000000000000000001451700765000164115ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/rules/__init__.py000066400000000000000000000003141451700765000205200ustar00rootroot00000000000000"""Tests for the rules module. Where possible, tests should be contained within the yaml test cases. For python based tests where this is not possible, the rule code should be in the test file name. """ sqlfluff-2.3.5/test/core/rules/crawlers_test.py000066400000000000000000000026761451700765000216570ustar00rootroot00000000000000"""Tests for crawlers.""" import pytest from sqlfluff.core.config import FluffConfig from sqlfluff.core.linter.linter import Linter from sqlfluff.core.rules.context import RuleContext from sqlfluff.core.rules.crawlers import ( ParentOfSegmentCrawler, RootOnlyCrawler, SegmentSeekerCrawler, ) from sqlfluff.core.templaters.base import TemplatedFile @pytest.mark.parametrize( "CrawlerType,crawler_kwargs,raw_sql_in,target_raws_out", [ (RootOnlyCrawler, {}, "SELECT 1 + 2", ["SELECT 1 + 2"]), ( SegmentSeekerCrawler, {"types": {"numeric_literal"}}, "SELECT 1 + 2", ["1", "2"], ), ( ParentOfSegmentCrawler, {"types": {"numeric_literal"}}, "SELECT 1 + 2", ["1 + 2"], ), ], ) def test_rules_crawlers(CrawlerType, crawler_kwargs, raw_sql_in, target_raws_out): """Test Crawlers.""" cfg = FluffConfig(overrides={"dialect": "ansi"}) linter = Linter(config=cfg) root = linter.parse_string(raw_sql_in).tree root_context = RuleContext( dialect=cfg.get("dialect_obj"), fix=True, templated_file=TemplatedFile(raw_sql_in, ""), path=None, segment=root, config=cfg, ) crawler = CrawlerType(**crawler_kwargs) result_raws = [context.segment.raw for context in crawler.crawl(root_context)] assert result_raws == target_raws_out sqlfluff-2.3.5/test/core/rules/docstring_test.py000066400000000000000000000036071451700765000220240ustar00rootroot00000000000000"""Test rules docstring.""" import re import pytest from sqlfluff import lint from sqlfluff.core.plugin.host import get_plugin_manager KEYWORD_ANTI = re.compile(r" \*\*Anti-pattern\*\*") KEYWORD_BEST = re.compile(r" \*\*Best practice\*\*") KEYWORD_CODE_BLOCK = re.compile(r"\n \.\. code-block:: (sql|jinja)\n") @pytest.mark.parametrize( "content,min_count", [ (KEYWORD_ANTI, 1), (KEYWORD_BEST, 1), (KEYWORD_CODE_BLOCK, 2), ], ) def test_content_count(content, min_count): """Test docstring have specific content.""" for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: if rule._check_docstring is True: assert len(content.findall(rule.__doc__)) >= min_count, ( f"{rule.__name__} content {content} does not occur at least " f"{min_count} times" ) def test_keyword_anti_before_best(): """Test docstring anti pattern before best pattern.""" for plugin_rules in get_plugin_manager().hook.get_rules(): for rule in plugin_rules: if rule._check_docstring is True: best_pos = KEYWORD_BEST.search(rule.__doc__).start() anti_pos = KEYWORD_ANTI.search(rule.__doc__).start() assert anti_pos < best_pos, ( f"{rule.__name__} keyword {KEYWORD_BEST} appears before " f"{KEYWORD_ANTI}" ) def test_backtick_replace(): """Test replacing docstring double backticks for lint results.""" sql = """ SELECT DISTINCT(a), b FROM foo """ result = lint(sql, rules=["ST08"]) # ST08 docstring looks like: # ``DISTINCT`` used with parentheses. # Check the double bacticks (``) get replaced by a single quote ('). assert result[0]["description"] == "'DISTINCT' used with parentheses." sqlfluff-2.3.5/test/core/rules/fix_test.py000066400000000000000000000030661451700765000206150ustar00rootroot00000000000000"""Test routines for fixing errors.""" import pytest from sqlfluff.core.rules.fix import LintFix, compute_anchor_edit_info @pytest.fixture(scope="module") def raw_segments(generate_test_segments): """Construct a list of raw segments as a fixture.""" return generate_test_segments(["foobar", ".barfoo"]) def test__rules_base_segments_compute_anchor_edit_info(raw_segments): """Test BaseSegment.compute_anchor_edit_info().""" # Construct a fix buffer, intentionally with: # - one duplicate. # - two different incompatible fixes on the same segment. fixes = [ LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="a")]), LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="a")]), LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="b")]), ] anchor_info_dict = compute_anchor_edit_info(fixes) # Check the target segment is the only key we have. assert list(anchor_info_dict.keys()) == [raw_segments[0].uuid] anchor_info = anchor_info_dict[raw_segments[0].uuid] # Check that the duplicate as been deduplicated. # i.e. this isn't 3. assert anchor_info.replace == 2 # Check the fixes themselves. # NOTE: There's no duplicated first fix. assert anchor_info.fixes == [ LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="a")]), LintFix.replace(raw_segments[0], [raw_segments[0].edit(raw="b")]), ] # Check the first replace assert anchor_info._first_replace == LintFix.replace( raw_segments[0], [raw_segments[0].edit(raw="a")] ) sqlfluff-2.3.5/test/core/rules/functional/000077500000000000000000000000001451700765000205535ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/rules/functional/test_raw_file_slices.py000066400000000000000000000030431451700765000253160ustar00rootroot00000000000000"""Tests for the raw_file_slices module.""" import pytest from sqlfluff.core.templaters.base import RawFileSlice from sqlfluff.utils.functional import raw_file_slices rs_templated_abc = RawFileSlice("{{abc}}", "templated", 0) rs_templated_def = RawFileSlice("{{def}}", "templated", 0) rs_literal_abc = RawFileSlice("abc", "literal", 0) @pytest.mark.parametrize( ["input", "expected"], [ [ raw_file_slices.RawFileSlices(rs_templated_abc, templated_file=None), True, ], [ raw_file_slices.RawFileSlices(rs_templated_def, templated_file=None), False, ], [ raw_file_slices.RawFileSlices( rs_templated_abc, rs_templated_def, templated_file=None ), False, ], ], ) def test_slices_all(input, expected): """Test the "all()" function.""" assert input.all(lambda s: "abc" in s.raw) == expected @pytest.mark.parametrize( ["input", "expected"], [ [ raw_file_slices.RawFileSlices(rs_templated_abc, templated_file=None), True, ], [ raw_file_slices.RawFileSlices(rs_templated_def, templated_file=None), False, ], [ raw_file_slices.RawFileSlices( rs_templated_abc, rs_templated_def, templated_file=None ), True, ], ], ) def test_slices_any(input, expected): """Test the "any()" function.""" assert input.any(lambda s: "abc" in s.raw) == expected sqlfluff-2.3.5/test/core/rules/functional/test_segments.py000066400000000000000000000104761451700765000240210ustar00rootroot00000000000000"""Tests for the segments module.""" import pytest import sqlfluff.utils.functional.segment_predicates as sp from sqlfluff.core.linter.linter import Linter from sqlfluff.core.parser.segments.raw import RawSegment from sqlfluff.utils.functional import segments seg1 = RawSegment("s1") seg2 = RawSegment("s2") seg3 = RawSegment("s3") seg4 = RawSegment("s4") @pytest.mark.parametrize( ["lhs", "rhs", "expected"], [ [ segments.Segments(seg1, seg2), segments.Segments(seg3, seg4), segments.Segments(seg1, seg2, seg3, seg4), ], [ segments.Segments(seg3, seg4), segments.Segments(seg1, seg2), segments.Segments(seg3, seg4, seg1, seg2), ], [ segments.Segments(seg1, seg2), [seg3, seg4], segments.Segments(seg1, seg2, seg3, seg4), ], [ [seg1, seg2], segments.Segments(seg3, seg4), segments.Segments(seg1, seg2, seg3, seg4), ], ], ) def test_segments_add(lhs, rhs, expected): """Verify addition of Segments objects with themselves and lists.""" result = lhs + rhs assert isinstance(result, segments.Segments) assert result == expected @pytest.mark.parametrize( ["input", "expected"], [ [ segments.Segments(seg1, seg2), True, ], [ segments.Segments(seg1, seg3), False, ], ], ) def test_segments_all(input, expected): """Test the "all()" function.""" assert input.all(lambda s: s.raw[-1] <= "2") == expected @pytest.mark.parametrize( ["input", "expected"], [ [ segments.Segments(seg1, seg2), True, ], [ segments.Segments(seg1, seg3), True, ], [ segments.Segments(seg3), False, ], ], ) def test_segments_any(input, expected): """Test the "any()" function.""" assert input.any(lambda s: s.raw[-1] <= "2") == expected def test_segments_reversed(): """Test the "reversed()" function.""" assert segments.Segments(seg1, seg2).reversed() == segments.Segments(seg2, seg1) def test_segments_raw_slices_no_templated_file(): """Test that raw_slices() fails if TemplatedFile not provided.""" with pytest.raises(ValueError): segments.Segments(seg1).raw_slices def test_segments_first_no_predicate(): """Test the "first()" function with no predicate.""" assert segments.Segments(seg1, seg2).first() == segments.Segments(seg1) def test_segments_first_with_predicate(): """Test the "first()" function with a predicate.""" assert segments.Segments(seg1, seg2).first(sp.is_meta()) == segments.Segments() def test_segments_last(): """Test the "last()" function.""" assert segments.Segments(seg1, seg2).last() == segments.Segments(seg2) def test_segments_apply(): """Test the "apply()" function.""" assert segments.Segments(seg1, seg2).apply(lambda s: s.raw[-1]) == ["1", "2"] @pytest.mark.parametrize( ["function", "expected"], [ [sp.get_type(), ["raw", "raw"]], [sp.is_comment(), [False, False]], [sp.is_raw(), [True, True]], ], ) def test_segments_apply_functions(function, expected): """Test the "apply()" function with the "get_name()" function.""" assert segments.Segments(seg1, seg2).apply(function) == expected def test_segment_predicates_and(): """Test the "and_()" function.""" assert segments.Segments(seg1, seg2).select( select_if=sp.and_(sp.is_raw(), lambda s: s.raw[-1] == "1") ) == segments.Segments(seg1) assert ( segments.Segments(seg1, seg2).select( select_if=sp.and_(sp.is_raw(), lambda s: s.raw[-1] == "3") ) == segments.Segments() ) def test_segments_recursive_crawl(): """Test the "recursive_crawl()" function.""" sql = """ WITH cte AS ( SELECT * FROM tab_a ) SELECT cte.col_a, tab_b.col_b FROM cte INNER JOIN tab_b; """ linter = Linter(dialect="ansi") parsed = linter.parse_string(sql) functional_tree = segments.Segments(parsed.tree) assert len(functional_tree.recursive_crawl("common_table_expression")) == 1 assert len(functional_tree.recursive_crawl("table_reference")) == 3 sqlfluff-2.3.5/test/core/rules/noqa_test.py000066400000000000000000000410011451700765000207540ustar00rootroot00000000000000"""Tests for applying noqa directives and the IgnoreMask.""" from typing import List import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import ( SQLBaseError, SQLParseError, ) from sqlfluff.core.rules.noqa import IgnoreMask, NoQaDirective # noqa tests require a rule_set, therefore we construct dummy rule set for glob matching. dummy_rule_map = Linter().get_rulepack().reference_map class DummyLintError(SQLBaseError): """Fake lint error used by tests, similar to SQLLintError.""" def __init__(self, line_no: int, code: str = "LT01"): self._code = code super().__init__(line_no=line_no) def test__linter__raises_malformed_noqa(): """A badly formatted noqa gets raised as a parsing error.""" lntr = Linter(dialect="ansi") result = lntr.lint_string_wrapped("select 1 --noqa missing semicolon") with pytest.raises(SQLParseError): result.check_tuples() @pytest.mark.parametrize( "input,expected", [ ("", None), ("noqa", NoQaDirective(0, 0, None, None, "noqa")), ("noqa?", SQLParseError), ("noqa:", NoQaDirective(0, 0, None, None, "noqa:")), ( "noqa:LT01,LT02", NoQaDirective(0, 0, ("LT01", "LT02"), None, "noqa:LT01,LT02"), ), ( "noqa: enable=LT01", NoQaDirective(0, 0, ("LT01",), "enable", "noqa: enable=LT01"), ), ( "noqa: disable=CP01", NoQaDirective(0, 0, ("CP01",), "disable", "noqa: disable=CP01"), ), ( "noqa: disable=all", NoQaDirective(0, 0, None, "disable", "noqa: disable=all"), ), ("noqa: disable", SQLParseError), ( "Inline comment before inline ignore -- noqa:LT01,LT02", NoQaDirective(0, 0, ("LT01", "LT02"), None, "noqa:LT01,LT02"), ), # Test selection with rule globs ( "noqa:L04*", NoQaDirective( 0, 0, ( "AM04", # L044 is an alias of AM04 "CP04", # L040 is an alias of CP04 "CV04", # L047 is an alias of CV04 "CV05", # L049 is an alias of CV05 "JJ01", # L046 is an alias of JJ01 "LT01", # L048 is an alias of LT01 "LT10", # L041 is an alias of LT10 "ST02", # L043 is an alias of ST02 "ST03", # L045 is an alias of ST03 "ST05", # L042 is an alias of ST05 ), None, "noqa:L04*", ), ), # Test selection with aliases. ( "noqa:L002", NoQaDirective(0, 0, ("LT02",), None, "noqa:L002"), ), # Test selection with alias globs. ( "noqa:L00*", NoQaDirective( 0, 0, ("LT01", "LT02", "LT03", "LT12"), None, "noqa:L00*", ), ), # Test selection with names. ( "noqa:capitalisation.keywords", NoQaDirective(0, 0, ("CP01",), None, "noqa:capitalisation.keywords"), ), # Test selection with groups. ( "noqa:capitalisation", NoQaDirective( 0, 0, ("CP01", "CP02", "CP03", "CP04", "CP05"), None, "noqa:capitalisation", ), ), ], ) def test_parse_noqa(input, expected): """Test correct of "noqa" comments.""" result = IgnoreMask._parse_noqa(input, 0, 0, reference_map=dummy_rule_map) if not isinstance(expected, type): assert result == expected else: # With exceptions, just check the type, not the contents. assert isinstance(result, expected) def test_parse_noqa_no_dups(): """Test overlapping glob expansions don't return duplicate rules in noqa.""" result = IgnoreMask._parse_noqa( comment="noqa:L0*5,L01*", line_no=0, line_pos=0, reference_map=dummy_rule_map ) assert len(result.rules) == len(set(result.rules)) @pytest.mark.parametrize( "noqa,violations,expected,used_noqas", [ [ [], [DummyLintError(1)], [ 0, ], [], ], [ [dict(comment="noqa: LT01", line_no=1)], [DummyLintError(1)], [], [0], ], [ [dict(comment="noqa: LT01", line_no=2)], [DummyLintError(1)], [0], [], ], [ [dict(comment="noqa: LT02", line_no=1)], [DummyLintError(1)], [0], [], ], [ [dict(comment="noqa: enable=LT01", line_no=1)], [DummyLintError(1)], [0], [], ], [ [dict(comment="noqa: disable=LT01", line_no=1)], [DummyLintError(1)], [], [0], ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [DummyLintError(1)], [0], [], # The disable wasn't used, neither was the enable. ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [DummyLintError(2)], [], [0, 1], # Both were used. ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [DummyLintError(3)], [], [0, 1], # Both were used. ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [DummyLintError(4)], [0], [1], # The enable was matched, but the disable wasn't used. ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [DummyLintError(1)], [0], # TODO: This is an odd edge case, where we drop out in our # evaluation too early so see whether the "enable" is ever # matched. In this case _both_ are effectively unused, because # we never evaluate the last one. For a first pass I think this # might be an acceptable edge case. [], ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [DummyLintError(2)], [], [0, 1], # Both were used. ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [DummyLintError(3)], [], [0, 1], # Both were used. ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [DummyLintError(4)], [0], [1], # The enable was matched, but the disable wasn't used. ], [ [ dict(comment="noqa: disable=LT01", line_no=2), dict(comment="noqa: enable=all", line_no=4), ], [ DummyLintError(2, code="LT01"), DummyLintError(2, code="LT02"), DummyLintError(4, code="LT01"), DummyLintError(4, code="LT02"), ], [1, 2, 3], [0, 1], # The enable matched. The disable also matched rules. ], [ [ dict(comment="noqa: disable=all", line_no=2), dict(comment="noqa: enable=LT01", line_no=4), ], [ DummyLintError(2, code="LT01"), DummyLintError(2, code="LT02"), DummyLintError(4, code="LT01"), DummyLintError(4, code="LT02"), ], [2], [0, 1], # The enable matched the disable. The disable also matched ], [ [ dict( comment="Inline comment before inline ignore -- noqa: LT02", line_no=1, ) ], [DummyLintError(1)], [0], [], ], [ [ dict( comment="Inline comment before inline ignore -- noqa: LT02", line_no=1, ), dict( comment="Inline comment before inline ignore -- noqa: LT02", line_no=2, ), ], [ DummyLintError(1), DummyLintError(2), ], [0, 1], [], # Neither used because wrong code. ], [ [ dict( comment="Inline comment before inline ignore -- noqa: L01*", line_no=1, ), ], [ DummyLintError(1), ], [0], [], # Neither used because wrong code. ], [ [ dict( comment="Inline comment before inline ignore -- noqa: LT*", line_no=1, ), ], [ DummyLintError(1), ], [], [0], # Matched indirectly ], ], ids=[ "1_violation_no_ignore", "1_violation_ignore_specific_line", "1_violation_ignore_different_specific_line", "1_violation_ignore_different_specific_rule", "1_violation_ignore_enable_this_range", "1_violation_ignore_disable_this_range", "1_violation_line_1_ignore_disable_specific_2_3", "1_violation_line_2_ignore_disable_specific_2_3", "1_violation_line_3_ignore_disable_specific_2_3", "1_violation_line_4_ignore_disable_specific_2_3", "1_violation_line_1_ignore_disable_all_2_3", "1_violation_line_2_ignore_disable_all_2_3", "1_violation_line_3_ignore_disable_all_2_3", "1_violation_line_4_ignore_disable_all_2_3", "4_violations_two_types_disable_specific_enable_all", "4_violations_two_types_disable_all_enable_specific", "1_violations_comment_inline_ignore", "2_violations_comment_inline_ignore", "1_violations_comment_inline_glob_ignore_unmatch", "1_violations_comment_inline_glob_ignore_match", ], ) def test_linted_file_ignore_masked_violations( noqa: dict, violations: List[SQLBaseError], expected, used_noqas ): """Test that _ignore_masked_violations() correctly filters violations.""" ignore_mask = [ IgnoreMask._parse_noqa(reference_map=dummy_rule_map, line_pos=0, **c) for c in noqa ] result = IgnoreMask(ignore_mask).ignore_masked_violations(violations) expected_violations = [v for i, v in enumerate(violations) if i in expected] assert expected_violations == result # Check whether "used" evaluation works expected_used = [ignore_mask[i] for i, _ in enumerate(noqa) if i in used_noqas] actually_used = [i for i in ignore_mask if i.used] assert actually_used == expected_used def test_linter_noqa(): """Test "noqa" feature at the higher "Linter" level.""" lntr = Linter( config=FluffConfig( overrides={ "dialect": "bigquery", # Use bigquery to allow hash comments. "rules": "AL02, LT04", } ) ) sql = """ SELECT col_a a, col_b b, --noqa: disable=AL02 col_c c, col_d d, --noqa: enable=AL02 col_e e, col_f f, col_g g, --noqa col_h h, col_i i, --noqa:AL02 col_j j, col_k k, --noqa:AL03 col_l l, col_m m, col_n n, --noqa: disable=all col_o o, col_p p, --noqa: enable=all col_q q, --Inline comment --noqa: AL02 col_r r, /* Block comment */ --noqa: AL02 col_s s # hash comment --noqa: AL02 -- We trigger both AL02 (implicit aliasing) -- and LT04 (leading commas) here to -- test glob ignoring of multiple rules. , col_t t --noqa: L01* , col_u u -- Some comment --noqa: L01* , col_v v -- We can ignore both AL02 and LT04 -- noqa: L01[29] FROM foo """ result = lntr.lint_string(sql) violations = result.get_violations() assert {3, 6, 7, 8, 10, 12, 13, 14, 15, 18} == {v.line_no for v in violations} def test_linter_noqa_with_templating(): """Similar to test_linter_noqa, but uses templating (Jinja).""" lntr = Linter( config=FluffConfig( overrides={ "dialect": "bigquery", # Use bigquery to allow hash comments. "templater": "jinja", "rules": "LT05", } ) ) sql = "\n" '"{%- set a_var = ["1", "2"] -%}\n' "SELECT\n" " this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_" "templated_sql_files, --noqa: LT05\n" " this_is_not_so_big a, --Inline comment --noqa: AL02\n" " this_is_not_so_big b, /* Block comment */ --noqa: AL02\n" " this_is_not_so_big c, # hash comment --noqa: AL02\n" " this_is_just_a_very_long_line_for_demonstration_purposes_of_a_bug_involving_" "templated_sql_files, --noqa: L01*\n" "FROM\n" " a_table\n" " " result = lntr.lint_string(sql) assert not result.get_violations() def test_linter_noqa_template_errors(): """Similar to test_linter_noqa, but uses templating (Jinja).""" lntr = Linter( config=FluffConfig( overrides={ "templater": "jinja", "dialect": "ansi", } ) ) sql = """select * --noqa: TMP from raw where balance_date >= {{ execution_date - macros.timedelta() }} --noqa: TMP """ result = lntr.lint_string(sql) assert not result.get_violations() def test_linter_noqa_prs(): """Test "noqa" feature to ignore PRS at the higher "Linter" level.""" lntr = Linter(dialect="ansi") sql = "SELEC * FROM foo -- noqa: PRS\n" result = lntr.lint_string(sql) violations = result.get_violations() assert not violations def test_linter_noqa_tmp(): """Test "noqa" feature to ignore TMP at the higher "Linter" level.""" lntr = Linter( config=FluffConfig( overrides={ "exclude_rules": "LT13", "dialect": "ansi", } ) ) sql = """ SELECT {{ col_a }} AS a -- noqa: TMP,PRS FROM foo; """ result = lntr.lint_string(sql) print(result.tree.stringify()) violations = result.get_violations() assert not violations def test_linter_noqa_disable(): """Test "noqa" comments can be disabled via the config.""" lntr_noqa_enabled = Linter( config=FluffConfig( overrides={ "rules": "AL02", "dialect": "ansi", } ) ) lntr_noqa_disabled = Linter( config=FluffConfig( overrides={ "disable_noqa": True, "rules": "AL02", "dialect": "ansi", } ) ) # This query raises AL02, but it is being suppressed by the inline noqa comment. # We can ignore this comment by setting disable_noqa = True in the config # or by using the --disable-noqa flag in the CLI. sql = """ SELECT col_a a --noqa: AL02 FROM foo """ # Verify that noqa works as expected with disable_noqa = False (default). result_noqa_enabled = lntr_noqa_enabled.lint_string(sql) violations_noqa_enabled = result_noqa_enabled.get_violations() assert len(violations_noqa_enabled) == 0 # Verify that noqa comment is ignored with disable_noqa = True. result_noqa_disabled = lntr_noqa_disabled.lint_string(sql) violations_noqa_disabled = result_noqa_disabled.get_violations() assert len(violations_noqa_disabled) == 1 assert violations_noqa_disabled[0].rule.code == "AL02" sqlfluff-2.3.5/test/core/rules/reference_test.py000066400000000000000000000040711451700765000217620ustar00rootroot00000000000000"""Test components for working with object and table references.""" import pytest from sqlfluff.core.rules import reference @pytest.mark.parametrize( "possible_references, targets, result", [ # Empty list of references is always True. [[], [("abc",)], True], # Simple cases: one reference, one target. [[("agent1",)], [("agent1",)], True], [[("agent1",)], [("customer",)], False], # Multiple references. If any match, good. [[("bar",), ("user_id",)], [("bar",)], True], [[("foo",), ("user_id",)], [("bar",)], False], # Multiple targets. If any reference matches, good. [[("table1",)], [("table1",), ("table2",), ("table3",)], True], [[("tbl2",)], [("db", "sc", "tbl1")], False], [[("tbl2",)], [("db", "sc", "tbl2")], True], # Multi-part references and targets. If one tuple is shorter than # the other, checks for a suffix match. [ [ ( "rc", "tbl1", ) ], [("db", "sc", "tbl1")], False, ], [ [ ( "sc", "tbl1", ) ], [("db", "sc", "tbl1")], True, ], [ [ ( "cb", "sc", "tbl1", ) ], [("db", "sc", "tbl1")], False, ], [ [ ( "db", "sc", "tbl1", ) ], [("db", "sc", "tbl1")], True, ], [[("public", "agent1")], [("agent1",)], True], [[("public", "agent1")], [("public",)], False], ], ) def test_object_ref_matches_table(possible_references, targets, result): """Test object_ref_matches_table().""" assert reference.object_ref_matches_table(possible_references, targets) == result sqlfluff-2.3.5/test/core/rules/rules_test.py000066400000000000000000000333521451700765000211620ustar00rootroot00000000000000"""Tests for the standard set of rules.""" import logging import pytest from sqlfluff.core import Linter from sqlfluff.core.config import FluffConfig from sqlfluff.core.errors import SQLFluffUserError from sqlfluff.core.linter import RuleTuple from sqlfluff.core.parser import WhitespaceSegment from sqlfluff.core.parser.markers import PositionMarker from sqlfluff.core.rules import BaseRule, LintFix, LintResult, get_ruleset from sqlfluff.core.rules.crawlers import RootOnlyCrawler, SegmentSeekerCrawler from sqlfluff.core.rules.doc_decorators import ( document_configuration, document_fix_compatible, document_groups, ) from sqlfluff.core.rules.loader import get_rules_from_path from sqlfluff.core.templaters.base import TemplatedFile from sqlfluff.utils.testing.logging import fluff_log_catcher from sqlfluff.utils.testing.rules import get_rule_from_set from test.fixtures.rules.custom.L000 import Rule_L000 from test.fixtures.rules.custom.S000 import Rule_S000 class Rule_T042(BaseRule): """A dummy rule.""" groups = ("all",) def _eval(self, context): pass class Rule_T001(BaseRule): """A deliberately malicious rule. **Anti-pattern** Blah blah """ groups = ("all",) crawl_behaviour = SegmentSeekerCrawler({"whitespace"}) is_fix_compatible = True def _eval(self, context): """Stars make newlines.""" if context.segment.is_type("whitespace"): return LintResult( anchor=context.segment, fixes=[ LintFix.replace( context.segment, [WhitespaceSegment(context.segment.raw + " ")] ) ], ) class Rule_T002(BaseRule): """A rule which says all raw code segments are bad. This is used for testing unparsable code. """ groups = ("all",) # Root only crawler so that the in-rule filters don't kick in. crawl_behaviour = RootOnlyCrawler() def _eval(self, context): """Stars make newlines.""" violations = [] for seg in context.segment.raw_segments: if seg.is_code: violations.append(LintResult(anchor=seg, description="TESTING")) return violations class Rule_T003(BaseRule): """Another deliberately malicious rule. **Anti-pattern** Blah blah """ groups = ("all",) crawl_behaviour = SegmentSeekerCrawler({"numeric_literal"}) is_fix_compatible = True def _eval(self, context): """Triple any numeric literals.""" return LintResult( anchor=context.segment, fixes=[ LintFix.replace( context.segment, [ context.segment, WhitespaceSegment(context.segment.raw + " "), context.segment, WhitespaceSegment(context.segment.raw + " "), context.segment, ], ) ], ) def test__rules__user_rules(): """Test that can safely add user rules.""" # Set up a linter with the user rule linter = Linter(user_rules=[Rule_T042], dialect="ansi") # Make sure the new one is in there. assert RuleTuple("T042", "", "A dummy rule.", ("all",), ()) in linter.rule_tuples() # Instantiate a second linter and check it's NOT in there. # This tests that copying and isolation works. linter = Linter(dialect="ansi") assert not any(rule[0] == "T042" for rule in linter.rule_tuples()) @pytest.mark.parametrize( "rules, exclude_rules, resulting_codes", [ # NB: We don't check the "select nothing" case, because not setting # the rules setting just means "select everything". # ("", "", set()), # 1: Select by code. # NOTE: T012 uses T011 as it's name but that should be ignored # because of the conflict. ("T010", "", {"T010"}), ("T010,T011", "", {"T010", "T011"}), ("T010,T011", "T011", {"T010"}), # 2: Select by name # NOTE: T012 uses "fake_other" as it's group but that should be ignored # because of the conflict. ("fake_basic", "", {"T010"}), ("fake_other", "", {"T011"}), ("fake_basic,fake_other", "", {"T010", "T011"}), # 3: Select by group # NOTE: T010 uses "foo" as it's alias but that should be ignored # because of the conflict. ("test", "", {"T010", "T011"}), ("foo", "", {"T011", "T012"}), ("test,foo", "", {"T010", "T011", "T012"}), ("test", "foo", {"T010"}), # 3: Select by alias ("fb1", "", {"T010"}), ("fb2", "", {"T011"}), ], ) def test__rules__rule_selection(rules, exclude_rules, resulting_codes): """Test that rule selection works by various means.""" class Rule_T010(BaseRule): """Fake Basic Rule.""" groups = ("all", "test") name = "fake_basic" aliases = ("fb1", "foo") # NB: Foo is a group on another rule. crawl_behaviour = RootOnlyCrawler() def _eval(self, **kwargs): pass class Rule_T011(Rule_T010): """Fake Basic Rule. NOTE: We inherit crawl behaviour and _eval from above. """ groups = ("all", "test", "foo") name = "fake_other" aliases = ("fb2",) class Rule_T012(Rule_T010): """Fake Basic Rule. NOTE: We inherit crawl behaviour and _eval from above. """ # NB: "fake_other" is the name of another rule. groups = ("all", "foo", "fake_other") # No aliases, Name collides with the alias of another rule. name = "fake_again" aliases = () cfg = FluffConfig( overrides={"rules": rules, "exclude_rules": exclude_rules, "dialect": "ansi"} ) linter = Linter(config=cfg, user_rules=[Rule_T010, Rule_T011, Rule_T012]) # Get the set of selected codes: selected_codes = set(tpl[0] for tpl in linter.rule_tuples()) # Check selected rules assert selected_codes == resulting_codes def test__rules__filter_unparsable(): """Test that rules that handle their own crawling respect unparsable.""" # Set up a linter with the user rule linter = Linter(user_rules=[Rule_T002], dialect="ansi", rules=["T002"]) # Lint a simple parsable file and check we do get issues # It's parsable, so we should get issues. res = linter.lint_string("SELECT 1") assert any(v.rule_code() == "T002" for v in res.violations) # Lint an unparsable file. Check we don't get any violations. # It's not parsable so we shouldn't get issues. res = linter.lint_string("asd asdf sdfg") assert not any(v.rule_code() == "T002" for v in res.violations) def test__rules__result_unparsable(): """Test that the linter won't allow rules which make the file unparsable.""" # Set up a linter with the user rule linter = Linter(user_rules=[Rule_T003], dialect="ansi", rules=["T003"]) # Lint a simple parsable file and check we do get issues # It's parsable, so we should get issues. raw_sql = "SELECT 1 FROM a" with fluff_log_catcher(logging.WARNING, "sqlfluff") as caplog: res = linter.lint_string(raw_sql, fix=True) # Check we got the warning. assert "would result in an unparsable file" in caplog.text # Check we get the violation. assert any(v.rule_code() == "T003" for v in res.violations) # The resulting file should be _the same_ because it would have resulted # in an unparsable file if applied. assert res.tree.raw == raw_sql def test__rules__runaway_fail_catch(): """Test that we catch runaway rules.""" runaway_limit = 5 my_query = "SELECT * FROM foo" # Set up the config to only use the rule we are testing. cfg = FluffConfig( overrides={"rules": "T001", "runaway_limit": runaway_limit, "dialect": "ansi"} ) # Lint it using the current config (while in fix mode) linter = Linter(config=cfg, user_rules=[Rule_T001]) # In theory this step should result in an infinite # loop, but the loop limit should catch it. linted = linter.lint_string(my_query, fix=True) # When the linter hits the runaway limit, it returns the original SQL tree. assert linted.tree.raw == my_query def test_rules_cannot_be_instantiated_without_declared_configs(): """Ensure that new rules must be instantiated with config values.""" class Rule_NewRule_ZZ99(BaseRule): """Testing Rule.""" config_keywords = ["tab_space_size"] new_rule = Rule_NewRule_ZZ99(code="L000", description="", tab_space_size=6) assert new_rule.tab_space_size == 6 # Error is thrown since "tab_space_size" is defined in class, # but not upon instantiation with pytest.raises(ValueError): new_rule = Rule_NewRule_ZZ99(code="L000", description="") def test_rules_legacy_doc_decorators(caplog): """Ensure that the deprecated decorators can still be imported but do nothing.""" with fluff_log_catcher(logging.WARNING, "sqlfluff") as caplog: @document_fix_compatible @document_groups @document_configuration class Rule_NewRule_ZZ99(BaseRule): """Untouched Text.""" pass # Check they didn't do anything to the docstring. assert Rule_NewRule_ZZ99.__doc__ == """Untouched Text.""" # Check there are warnings. print("Records:") for record in caplog.records: print(record) assert "uses the @document_fix_compatible decorator" in caplog.text assert "uses the @document_groups decorator" in caplog.text assert "uses the @document_configuration decorator" in caplog.text def test_rules_configs_are_dynamically_documented(): """Ensure that rule configurations are added to the class docstring.""" class RuleWithConfig_ZZ99(BaseRule): """A new rule with configuration.""" config_keywords = ["unquoted_identifiers_policy"] print(f"RuleWithConfig_ZZ99.__doc__: {RuleWithConfig_ZZ99.__doc__!r}") assert "unquoted_identifiers_policy" in RuleWithConfig_ZZ99.__doc__ class RuleWithoutConfig_ZZ99(BaseRule): """A new rule without configuration.""" pass print(f"RuleWithoutConfig_ZZ99.__doc__: {RuleWithoutConfig_ZZ99.__doc__!r}") assert "Configuration" not in RuleWithoutConfig_ZZ99.__doc__ def test_rules_name_validation(): """Ensure that rule names are validated.""" with pytest.raises(SQLFluffUserError) as exc_info: class RuleWithoutBadName_ZZ99(BaseRule): """A new rule without configuration.""" name = "MY-KEBAB-CASE-NAME" assert "Tried to define rule with unexpected name" in exc_info.value.args[0] assert "MY-KEBAB-CASE-NAME" in exc_info.value.args[0] def test_rule_exception_is_caught_to_validation(): """Assert that a rule that throws an exception returns it as a validation.""" std_rule_set = get_ruleset() @std_rule_set.register class Rule_T000(BaseRule): """Rule that throws an exception.""" groups = ("all",) crawl_behaviour = RootOnlyCrawler() def _eval(self, segment, parent_stack, **kwargs): raise Exception("Catch me or I'll deny any linting results from you") linter = Linter( config=FluffConfig(overrides=dict(rules="T000", dialect="ansi")), user_rules=[Rule_T000], ) assert linter.lint_string("select 1").check_tuples() == [("T000", 1, 1)] def test_rule_must_belong_to_all_group(): """Assert correct 'groups' config for rule.""" std_rule_set = get_ruleset() with pytest.raises(AssertionError): @std_rule_set.register class Rule_T000(BaseRule): """Badly configured rule, no groups attribute.""" def _eval(self, **kwargs): pass with pytest.raises(AssertionError): @std_rule_set.register class Rule_T001(BaseRule): """Badly configured rule, no 'all' group.""" groups = () def _eval(self, **kwargs): pass def test_std_rule_import_fail_bad_naming(): """Check that rule import from file works.""" assert get_rules_from_path( rules_path="test/fixtures/rules/custom/*.py", base_module="test.fixtures.rules.custom", ) == [Rule_L000, Rule_S000] with pytest.raises(AttributeError) as e: get_rules_from_path( rules_path="test/fixtures/rules/custom/bad_rule_name/*.py", base_module="test.fixtures.rules.custom.bad_rule_name", ) e.match("Rule classes must be named in the format of") def test_rule_set_return_informative_error_when_rule_not_registered(): """Assert that a rule that throws an exception returns it as a validation.""" cfg = FluffConfig(overrides={"dialect": "ansi"}) with pytest.raises(ValueError) as e: get_rule_from_set("L000", config=cfg) e.match("'L000' not in") seg = WhitespaceSegment( pos_marker=PositionMarker( slice(0, 1), slice(0, 1), TemplatedFile(" ", fname="") ) ) @pytest.mark.parametrize( "lint_result, expected", [ (LintResult(), "LintResult()"), (LintResult(seg), "LintResult()"), ( LintResult(seg, description="foo"), "LintResult(foo: )", ), ( LintResult( seg, description="foo", fixes=[ LintFix("create_before", seg, edit=[seg]), LintFix("create_after", seg, edit=[seg]), ], ), "LintResult(foo: +2F)", ), ], ) def test_rules__lint_result_repr(lint_result, expected): """Test that repr(LintResult) works as expected.""" assert repr(lint_result) == expected sqlfluff-2.3.5/test/core/templaters/000077500000000000000000000000001451700765000174375ustar00rootroot00000000000000sqlfluff-2.3.5/test/core/templaters/__init__.py000066400000000000000000000000271451700765000215470ustar00rootroot00000000000000"""Templater Tests.""" sqlfluff-2.3.5/test/core/templaters/base_test.py000066400000000000000000000301621451700765000217640ustar00rootroot00000000000000"""Tests for templaters.""" import pytest from sqlfluff.core.templaters import ( RawTemplater, TemplatedFile, ) from sqlfluff.core.templaters.base import ( RawFileSlice, TemplatedFileSlice, iter_indices_of_newlines, ) @pytest.mark.parametrize( "raw_str,positions", [ ("", []), ("foo", []), ("foo\nbar", [3]), ("\nfoo\n\nbar\nfoo\n\nbar\n", [0, 4, 5, 9, 13, 14, 18]), ], ) def test__indices_of_newlines(raw_str, positions): """Test iter_indices_of_newlines.""" assert list(iter_indices_of_newlines(raw_str)) == positions def test__templater_raw(): """Test the raw templater.""" t = RawTemplater() instr = "SELECT * FROM {{blah}}" outstr, _ = t.process(in_str=instr, fname="test") assert instr == str(outstr) SIMPLE_FILE_KWARGS = { "fname": "test.sql", "source_str": "01234\n6789{{foo}}fo\nbarss", "templated_str": "01234\n6789x\nfo\nbarss", "sliced_file": [ TemplatedFileSlice(*args) for args in [ ("literal", slice(0, 10, None), slice(0, 10, None)), ("templated", slice(10, 17, None), slice(10, 12, None)), ("literal", slice(17, 25, None), slice(12, 20, None)), ] ], "raw_sliced": [ RawFileSlice(*args) for args in [ ("x" * 10, "literal", 0), ("x" * 7, "templated", 10), ("x" * 8, "literal", 17), ] ], } COMPLEX_FILE_KWARGS = { "fname": "test.sql", "sliced_file": [ TemplatedFileSlice(*args) for args in [ ("literal", slice(0, 13, None), slice(0, 13, None)), ("comment", slice(13, 29, None), slice(13, 13, None)), ("literal", slice(29, 44, None), slice(13, 28, None)), ("block_start", slice(44, 68, None), slice(28, 28, None)), ("literal", slice(68, 81, None), slice(28, 41, None)), ("templated", slice(81, 86, None), slice(41, 42, None)), ("literal", slice(86, 110, None), slice(42, 66, None)), ("templated", slice(68, 86, None), slice(66, 76, None)), ("literal", slice(68, 81, None), slice(76, 89, None)), ("templated", slice(81, 86, None), slice(89, 90, None)), ("literal", slice(86, 110, None), slice(90, 114, None)), # ("templated", slice(68, 86, None), slice(114, 125, None)), ("literal", slice(68, 81, None), slice(125, 138, None)), # ("templated", slice(81, 86, None), slice(138, 139, None)), ("literal", slice(86, 110, None), slice(139, 163, None)), ("templated", slice(110, 123, None), slice(163, 166, None)), ("literal", slice(123, 132, None), slice(166, 175, None)), ("block_end", slice(132, 144, None), slice(175, 175, None)), ("literal", slice(144, 155, None), slice(175, 186, None)), ("block_start", slice(155, 179, None), slice(186, 186, None)), ("literal", slice(179, 189, None), slice(186, 196, None)), ("templated", slice(189, 194, None), slice(196, 197, None)), ("literal", slice(194, 203, None), slice(197, 206, None)), ("literal", slice(179, 189, None), slice(206, 216, None)), ("templated", slice(189, 194, None), slice(216, 217, None)), ("literal", slice(194, 203, None), slice(217, 226, None)), ("literal", slice(179, 189, None), slice(226, 236, None)), ("templated", slice(189, 194, None), slice(236, 237, None)), ("literal", slice(194, 203, None), slice(237, 246, None)), ("block_end", slice(203, 215, None), slice(246, 246, None)), ("literal", slice(215, 230, None), slice(246, 261, None)), ] ], "raw_sliced": [ RawFileSlice(*args) for args in [ # All contain dummy strings for now. ("x" * 13, "literal", 0), ("x" * 16, "comment", 13), ("x" * 15, "literal", 29), ("x" * 24, "block_start", 44), ("x" * 13, "literal", 68), ("x" * 5, "templated", 81), ("x" * 24, "literal", 86), ("x" * 13, "templated", 110), ("x" * 9, "literal", 123), ("x" * 12, "block_end", 132), ("x" * 11, "literal", 144), ("x" * 24, "block_start", 155), ("x" * 10, "literal", 179), ("x" * 5, "templated", 189), ("x" * 9, "literal", 194), ("x" * 12, "block_end", 203), ("x" * 15, "literal", 215), ] ], } COMPLEX_FILE_KWARGS["source_str"] = "".join( s.raw for s in COMPLEX_FILE_KWARGS["raw_sliced"] ) @pytest.mark.parametrize( "tf_kwargs,in_charpos,out_line_no,out_line_pos", [ # Simple examples ( SIMPLE_FILE_KWARGS, 0, 1, 1, ), ( SIMPLE_FILE_KWARGS, 20, 3, 1, ), ( SIMPLE_FILE_KWARGS, 24, 3, 5, ), ], ) def test__templated_file_get_line_pos_of_char_pos( tf_kwargs, in_charpos, out_line_no, out_line_pos, ): """Test TemplatedFile.get_line_pos_of_char_pos.""" file = TemplatedFile(**tf_kwargs) res_line_no, res_line_pos = file.get_line_pos_of_char_pos(in_charpos) assert res_line_no == out_line_no assert res_line_pos == out_line_pos @pytest.mark.parametrize( "templated_position,inclusive,tf_kwargs,sliced_idx_start,sliced_idx_stop", [ (100, True, COMPLEX_FILE_KWARGS, 10, 11), (13, True, COMPLEX_FILE_KWARGS, 0, 3), (28, True, COMPLEX_FILE_KWARGS, 2, 5), # Check end slicing. (12, True, SIMPLE_FILE_KWARGS, 1, 3), (20, True, SIMPLE_FILE_KWARGS, 2, 3), # Check inclusivity (13, False, COMPLEX_FILE_KWARGS, 0, 1), ], ) def test__templated_file_find_slice_indices_of_templated_pos( templated_position, inclusive, tf_kwargs, sliced_idx_start, sliced_idx_stop, ): """Test TemplatedFile._find_slice_indices_of_templated_pos.""" file = TemplatedFile(**tf_kwargs) res_start, res_stop = file._find_slice_indices_of_templated_pos( templated_position, inclusive=inclusive ) assert res_start == sliced_idx_start assert res_stop == sliced_idx_stop @pytest.mark.parametrize( "in_slice,out_slice,is_literal,tf_kwargs", [ # Simple example ( slice(5, 10), slice(5, 10), True, { "sliced_file": [ TemplatedFileSlice( "literal", slice(0, 20, None), slice(0, 20, None) ) ], "raw_sliced": [RawFileSlice("x" * 20, "literal", 0)], "source_str": "x" * 20, "fname": "foo.sql", }, ), # Trimming the end of a literal (with things that follow). ( slice(10, 13), slice(10, 13), True, COMPLEX_FILE_KWARGS, ), # Unrealistic, but should still work ( slice(5, 10), slice(55, 60), True, { "sliced_file": [ TemplatedFileSlice( "literal", slice(50, 70, None), slice(0, 20, None) ) ], "raw_sliced": [ RawFileSlice("x" * 50, "literal", 0), RawFileSlice("x" * 20, "literal", 50), ], "source_str": "x" * 70, "fname": "foo.sql", }, ), # Spanning a template ( slice(5, 15), slice(5, 20), False, SIMPLE_FILE_KWARGS, ), # Handling templated ( slice(5, 15), slice(0, 25), False, # NB: Same as SIMPLE_SLICED_FILE, but with different slice types. { **SIMPLE_FILE_KWARGS, "sliced_file": [ TemplatedFileSlice( "templated", slc.source_slice, slc.templated_slice ) for slc in SIMPLE_FILE_KWARGS["sliced_file"] ], "raw_sliced": [ RawFileSlice(slc.raw, "templated", slc.source_idx) for slc in SIMPLE_FILE_KWARGS["raw_sliced"] ], }, ), # Handling single length slices ( slice(10, 10), slice(10, 10), True, SIMPLE_FILE_KWARGS, ), ( slice(12, 12), slice(17, 17), True, SIMPLE_FILE_KWARGS, ), # Dealing with single length elements ( slice(20, 20), slice(25, 25), True, { "sliced_file": SIMPLE_FILE_KWARGS["sliced_file"] + [ TemplatedFileSlice( "comment", slice(25, 35, None), slice(20, 20, None) ) ], "raw_sliced": SIMPLE_FILE_KWARGS["raw_sliced"] + [RawFileSlice("x" * 10, "comment", 25)], "source_str": SIMPLE_FILE_KWARGS["source_str"] + "x" * 10, "fname": "foo.sql", }, ), # Just more test coverage ( slice(43, 43), slice(87, 87), True, COMPLEX_FILE_KWARGS, ), ( slice(13, 13), slice(13, 13), True, COMPLEX_FILE_KWARGS, ), ( slice(186, 186), slice(155, 155), True, COMPLEX_FILE_KWARGS, ), # Backward slicing. ( slice(100, 130), # NB This actually would reference the wrong way around if we # just take the points. Here we should handle it gracefully. slice(68, 110), False, COMPLEX_FILE_KWARGS, ), ], ) def test__templated_file_templated_slice_to_source_slice( in_slice, out_slice, is_literal, tf_kwargs ): """Test TemplatedFile.templated_slice_to_source_slice.""" file = TemplatedFile(**tf_kwargs) source_slice = file.templated_slice_to_source_slice(in_slice) literal_test = file.is_source_slice_literal(source_slice) assert (is_literal, source_slice) == (literal_test, out_slice) @pytest.mark.parametrize( "file,expected_result", [ # Comment example ( TemplatedFile( source_str=("a" * 10) + "{# b #}" + ("a" * 10), fname="test", sliced_file=[ TemplatedFileSlice("literal", slice(0, 10), slice(0, 10)), TemplatedFileSlice("templated", slice(10, 17), slice(10, 10)), TemplatedFileSlice("literal", slice(17, 27), slice(10, 20)), ], raw_sliced=[ RawFileSlice("a" * 10, "literal", 0), RawFileSlice("{# b #}", "comment", 10), RawFileSlice("a" * 10, "literal", 17), ], ), [RawFileSlice("{# b #}", "comment", 10)], ), # Template tags aren't source only. ( TemplatedFile( source_str=r"aaa{{ b }}aaa", fname="test", sliced_file=[ TemplatedFileSlice("literal", slice(0, 3), slice(0, 3)), TemplatedFileSlice("templated", slice(3, 10), slice(3, 6)), TemplatedFileSlice("literal", slice(10, 13), slice(6, 9)), ], raw_sliced=[ RawFileSlice("aaa", "literal", 0), RawFileSlice("{{ b }}", "templated", 3), RawFileSlice("aaa", "literal", 10), ], ), [], ), ], ) def test__templated_file_source_only_slices(file, expected_result): """Test TemplatedFile.source_only_slices.""" assert file.source_only_slices() == expected_result sqlfluff-2.3.5/test/core/templaters/jinja_test.py000066400000000000000000001722741451700765000221600ustar00rootroot00000000000000"""Tests for the jinja templater. These tests also test much of the core lexer, especially the treatment of templated sections which only really make sense to test in the context of a templater which supports loops and placeholders. """ import logging from collections import defaultdict from pathlib import Path from typing import List, NamedTuple import pytest from jinja2.exceptions import UndefinedError from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import SQLFluffSkipFile, SQLFluffUserError, SQLTemplaterError from sqlfluff.core.templaters import JinjaTemplater from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFile from sqlfluff.core.templaters.jinja import DummyUndefined, JinjaAnalyzer JINJA_STRING = ( "SELECT * FROM {% for c in blah %}{{c}}{% if not loop.last %}, " "{% endif %}{% endfor %} WHERE {{condition}}\n\n" ) JINJA_MACRO_CALL_SQL = ( "{% macro render_name(title) %}\n" " '{{ title }}. foo' as {{ caller() }}\n" "{% endmacro %}\n" "SELECT\n" " {% call render_name('Sir') %}\n" " bar\n" " {% endcall %}\n" "FROM baz\n" ) @pytest.mark.parametrize( "instr, expected_outstr", [ ( JINJA_STRING, "SELECT * FROM f, o, o WHERE a < 10\n\n", ), # Test for issue #968. This was previously raising an UnboundLocalError. ( """ {% set event_columns = ['campaign', 'click_item'] %} SELECT event_id {% for event_column in event_columns %} , {{ event_column }} {% endfor %} FROM events """, ( "\n\n\nSELECT\n event_id\n \n , campaign\n \n , " "click_item\n \nFROM events\n " ), ), ], ids=["simple", "unboundlocal_bugfix"], ) def test__templater_jinja(instr, expected_outstr): """Test jinja templating and the treatment of whitespace.""" t = JinjaTemplater(override_context=dict(blah="foo", condition="a < 10")) outstr, _ = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == expected_outstr class RawTemplatedTestCase(NamedTuple): """Instances of this object are test cases for test__templater_jinja_slices.""" name: str instr: str templated_str: str # These fields are used to check TemplatedFile.sliced_file. expected_templated_sliced__source_list: List[str] expected_templated_sliced__templated_list: List[str] # This field is used to check TemplatedFile.raw_sliced. expected_raw_sliced__source_list: List[str] @pytest.mark.parametrize( "case", [ RawTemplatedTestCase( name="basic_block", instr="\n\n{% set x = 42 %}\nSELECT 1, 2\n", templated_str="\n\n\nSELECT 1, 2\n", expected_templated_sliced__source_list=[ "\n\n", "{% set x = 42 %}", "\nSELECT 1, 2\n", ], expected_templated_sliced__templated_list=[ "\n\n", "", "\nSELECT 1, 2\n", ], expected_raw_sliced__source_list=[ "\n\n", "{% set x = 42 %}", "\nSELECT 1, 2\n", ], ), RawTemplatedTestCase( name="strip_left_block", instr="\n\n{%- set x = 42 %}\nSELECT 1, 2\n", templated_str="\nSELECT 1, 2\n", expected_templated_sliced__source_list=[ "\n\n", "{%- set x = 42 %}", "\nSELECT 1, 2\n", ], expected_templated_sliced__templated_list=[ "", "", "\nSELECT 1, 2\n", ], expected_raw_sliced__source_list=[ "\n\n", "{%- set x = 42 %}", "\nSELECT 1, 2\n", ], ), RawTemplatedTestCase( name="strip_both_block", instr="\n\n{%- set x = 42 -%}\nSELECT 1, 2\n", templated_str="SELECT 1, 2\n", expected_templated_sliced__source_list=[ "\n\n", "{%- set x = 42 -%}", "\n", "SELECT 1, 2\n", ], expected_templated_sliced__templated_list=[ "", "", "", "SELECT 1, 2\n", ], expected_raw_sliced__source_list=[ "\n\n", "{%- set x = 42 -%}", "\n", "SELECT 1, 2\n", ], ), RawTemplatedTestCase( name="strip_and_templated_whitespace", instr="SELECT {{- ' ' -}} 1{{ ' , 2' -}}\n", templated_str="SELECT 1 , 2", expected_templated_sliced__source_list=[ "SELECT", " ", "{{- ' ' -}}", " ", "1", "{{ ' , 2' -}}", "\n", ], expected_templated_sliced__templated_list=[ "SELECT", "", # Placeholder for consumed whitespace " ", # Placeholder for templated whitespace "", # Placeholder for consumed whitespace "1", " , 2", "", # Placeholder for consumed newline ], expected_raw_sliced__source_list=[ "SELECT", " ", "{{- ' ' -}}", " ", "1", "{{ ' , 2' -}}", "\n", ], ), RawTemplatedTestCase( name="strip_both_block_hard", instr="SELECT {%- set x = 42 %} 1 {%- if true -%} , 2{% endif -%}\n", templated_str="SELECT 1, 2", expected_templated_sliced__source_list=[ "SELECT", # NB: Even though the jinja tag consumes whitespace, we still # get it here as a placeholder. " ", "{%- set x = 42 %}", " 1", # This whitespace is a seperate from the 1 because it's consumed. " ", "{%- if true -%}", " ", ", 2", "{% endif -%}", "\n", ], expected_templated_sliced__templated_list=[ "SELECT", "", # Consumed whitespace placeholder "", # Jinja block placeholder " 1", "", # Consumed whitespace "", # Jinja block placeholder "", # More consumed whitespace ", 2", "", # Jinja block "", # Consumed final newline. ], expected_raw_sliced__source_list=[ "SELECT", " ", "{%- set x = 42 %}", " 1", " ", "{%- if true -%}", " ", ", 2", "{% endif -%}", "\n", ], ), RawTemplatedTestCase( name="basic_data", instr="""select c1, {{ 'c' }}2 as user_id """, templated_str="""select c1, c2 as user_id """, expected_templated_sliced__source_list=[ "select\n c1,\n ", "{{ 'c' }}", "2 as user_id\n", ], expected_templated_sliced__templated_list=[ "select\n c1,\n ", "c", "2 as user_id\n", ], expected_raw_sliced__source_list=[ "select\n c1,\n ", "{{ 'c' }}", "2 as user_id\n", ], ), # Note this is basically identical to the "basic_data" case above. # "Right strip" is not actually a thing in Jinja. RawTemplatedTestCase( name="strip_right_data", instr="""SELECT {{ 'col1,' -}} col2 """, templated_str="""SELECT col1,col2 """, expected_templated_sliced__source_list=[ "SELECT\n ", "{{ 'col1,' -}}", "\n ", "col2\n", ], expected_templated_sliced__templated_list=[ "SELECT\n ", "col1,", "", "col2\n", ], expected_raw_sliced__source_list=[ "SELECT\n ", "{{ 'col1,' -}}", "\n ", "col2\n", ], ), RawTemplatedTestCase( name="strip_both_data", instr="""select c1, {{- 'c' -}} 2 as user_id """, templated_str="""select c1,c2 as user_id """, expected_templated_sliced__source_list=[ "select\n c1,", "\n ", "{{- 'c' -}}", "\n", "2 as user_id\n", ], expected_templated_sliced__templated_list=[ "select\n c1,", "", "c", "", "2 as user_id\n", ], expected_raw_sliced__source_list=[ "select\n c1,", "\n ", "{{- 'c' -}}", "\n", "2 as user_id\n", ], ), RawTemplatedTestCase( name="strip_both_comment", instr="""select c1, {#- Column 2 -#} c2 as user_id """, templated_str="""select c1,c2 as user_id """, expected_templated_sliced__source_list=[ "select\n c1,", "\n ", "{#- Column 2 -#}", " ", "c2 as user_id\n", ], expected_templated_sliced__templated_list=[ "select\n c1,", "", "", "", "c2 as user_id\n", ], expected_raw_sliced__source_list=[ "select\n c1,", "\n ", "{#- Column 2 -#}", " ", "c2 as user_id\n", ], ), RawTemplatedTestCase( name="union_all_loop1", instr="""{% set products = [ 'table1', 'table2', ] %} {% for product in products %} SELECT brand FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} """, templated_str=( "\n\n\nSELECT\n brand\nFROM\n table1\nUNION ALL\n\nSELECT\n " "brand\nFROM\n table2\n\n\n" ), expected_templated_sliced__source_list=[ "{% set products = [\n 'table1',\n 'table2',\n ] %}", "\n\n", "{% for product in products %}", "\nSELECT\n brand\nFROM\n ", "{{ product }}", "\n", "{% if not loop.last -%}", " ", "UNION ALL", " ", "{%- endif %}", "\n", "{% endfor %}", "\nSELECT\n brand\nFROM\n ", "{{ product }}", "\n", "{% if not loop.last -%}", "{%- endif %}", "\n", "{% endfor %}", "\n", ], expected_templated_sliced__templated_list=[ "", "\n\n", "", "\nSELECT\n brand\nFROM\n ", "table1", "\n", "", "", "UNION ALL", "", "", "\n", "", "\nSELECT\n brand\nFROM\n ", "table2", "\n", "", "", "\n", "", "\n", ], expected_raw_sliced__source_list=[ "{% set products = [\n 'table1',\n 'table2',\n ] %}", "\n\n", "{% for product in products %}", "\nSELECT\n brand\nFROM\n ", "{{ product }}", "\n", "{% if not loop.last -%}", " ", "UNION ALL", " ", "{%- endif %}", "\n", "{% endfor %}", "\n", ], ), RawTemplatedTestCase( "set_multiple_variables_and_define_macro", """{% macro echo(text) %} {{text}} {% endmacro %} {% set a, b = 1, 2 %} SELECT {{ echo(a) }}, {{ echo(b) }}""", "\n\n\n\nSELECT\n \n1\n,\n \n2\n", [ "{% macro echo(text) %}", "\n", "{{text}}", "\n", "{% endmacro %}", "\n\n", "{% set a, b = 1, 2 %}", "\n\nSELECT\n ", "{{ echo(a) }}", ",\n ", "{{ echo(b) }}", ], [ "", "", "", "", "", "\n\n", "", "\n\nSELECT\n ", "\n1\n", ",\n ", "\n2\n", ], [ "{% macro echo(text) %}", "\n", "{{text}}", "\n", "{% endmacro %}", "\n\n", "{% set a, b = 1, 2 %}", "\n\nSELECT\n ", "{{ echo(a) }}", ",\n ", "{{ echo(b) }}", ], ), ], ids=lambda case: case.name, ) def test__templater_jinja_slices(case: RawTemplatedTestCase): """Test that Jinja templater slices raw and templated file correctly.""" t = JinjaTemplater() templated_file, _ = t.process( in_str=case.instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) assert templated_file is not None assert templated_file.source_str == case.instr assert templated_file.templated_str == case.templated_str # Build and check the list of source strings referenced by "sliced_file". actual_ts_source_list = [ case.instr[ts.source_slice] for ts in templated_file.sliced_file ] assert actual_ts_source_list == case.expected_templated_sliced__source_list # Build and check the list of templated strings referenced by "sliced_file". actual_ts_templated_list = [ templated_file.templated_str[ts.templated_slice] for ts in templated_file.sliced_file ] assert actual_ts_templated_list == case.expected_templated_sliced__templated_list # Build and check the list of source strings referenced by "raw_sliced". previous_rs = None actual_rs_source_list: List[RawFileSlice] = [] for rs in templated_file.raw_sliced + [None]: # type: ignore if previous_rs: if rs: actual_source = case.instr[previous_rs.source_idx : rs.source_idx] else: actual_source = case.instr[previous_rs.source_idx :] actual_rs_source_list.append(actual_source) previous_rs = rs assert actual_rs_source_list == case.expected_raw_sliced__source_list def test_templater_set_block_handling(): """Test handling of literals in {% set %} blocks. Specifically, verify they are not modified in the alternate template. """ def run_query(sql): # Prior to the bug fix, this assertion failed. This was bad because, # inside JinjaTracer, dbt templates similar to the one in this test # would call the database with funky SQL (including weird strings it # uses internally like: 00000000000000000000000000000002. assert sql == "\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n" return sql t = JinjaTemplater(override_context=dict(run_query=run_query)) instr = """{% set my_query1 %} select 1 from foobarfoobarfoobarfoobar_{{ "dev" }} {% endset %} {% set my_query2 %} {{ my_query1 }} {% endset %} {{ run_query(my_query2) }} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n\n\n\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n\n" assert len(vs) == 0 def test__templater_jinja_error_variable(): """Test missing variable error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah="foo")) instr = JINJA_STRING outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "SELECT * FROM f, o, o WHERE \n\n" # Check we have violations. assert len(vs) > 0 # Check one of them is a templating error on line 1 assert any(v.rule_code() == "TMP" and v.line_no == 1 for v in vs) def test__templater_jinja_dynamic_variable_no_violations(): """Test no templater violation for variable defined within template.""" t = JinjaTemplater(override_context=dict(blah="foo")) instr = """{% if True %} {% set some_var %}1{% endset %} SELECT {{some_var}} {% endif %} """ outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == "\n \n SELECT 1\n\n" # Check we have no violations. assert len(vs) == 0 def test__templater_jinja_error_syntax(): """Test syntax problems in the jinja templater.""" t = JinjaTemplater() instr = "SELECT {{foo} FROM jinja_error\n" outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) # Check we just skip templating. assert str(outstr) == instr # Check we have violations. assert len(vs) > 0 # Check one of them is a templating error on line 1 assert any(v.rule_code() == "TMP" and v.line_no == 1 for v in vs) def test__templater_jinja_error_catastrophic(): """Test error handling in the jinja templater.""" t = JinjaTemplater(override_context=dict(blah=7)) instr = JINJA_STRING outstr, vs = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert not outstr assert len(vs) > 0 def test__templater_jinja_error_macro_path_does_not_exist(): """Tests that an error is raised if macro path doesn't exist.""" with pytest.raises(ValueError) as e: JinjaTemplater().construct_render_func( config=FluffConfig.from_path( "test/fixtures/templater/jinja_macro_path_does_not_exist" ) ) assert str(e.value).startswith("Path does not exist") def test__templater_jinja_error_macro_invalid(): """Tests that an error is raised if a macro is invalid.""" invalid_macro_config_string = ( "[sqlfluff]\n" "templater = jinja\n" "dialect = ansi\n" "[sqlfluff:templater:jinja:macros]\n" "a_macro_def = {% macro pkg.my_macro() %}pass{% endmacro %}\n" ) config = FluffConfig.from_string(invalid_macro_config_string) with pytest.raises(SQLFluffUserError) as e: JinjaTemplater().construct_render_func(config=config) error_string = str(e.value) assert error_string.startswith("Error loading user provided macro") assert "{% macro pkg.my_macro() %}pass{% endmacro %}" in error_string def test__templater_jinja_lint_empty(): """Check that parsing a file which renders to an empty string. No exception should be raised, and we should get a single templated element. """ lntr = Linter(dialect="ansi") parsed = lntr.parse_string(in_str='{{ "" }}') assert parsed.templated_file.source_str == '{{ "" }}' assert parsed.templated_file.templated_str == "" # Get the types of the segments print(f"Segments: {parsed.tree.raw_segments}") seg_types = [seg.get_type() for seg in parsed.tree.raw_segments] assert seg_types == ["placeholder", "end_of_file"] def assert_structure(yaml_loader, path, code_only=True, include_meta=False): """Check that a parsed sql file matches the yaml file with the same name.""" lntr = Linter() p = list(lntr.parse_path(path + ".sql")) parsed = p[0][0] if parsed is None: print(p) raise RuntimeError(p[0][1]) # Whitespace is important here to test how that's treated tpl = parsed.to_tuple(code_only=code_only, show_raw=True, include_meta=include_meta) # Check nothing unparsable if "unparsable" in parsed.type_set(): print(parsed.stringify()) raise ValueError("Input file is unparsable.") _hash, expected = yaml_loader(path + ".yml") assert tpl == expected @pytest.mark.parametrize( "subpath,code_only,include_meta", [ # Config Scalar ("jinja_a/jinja", True, False), # Macros ("jinja_b/jinja", False, False), # dbt builtins ("jinja_c_dbt/dbt_builtins_config", True, False), ("jinja_c_dbt/dbt_builtins_is_incremental", True, False), ("jinja_c_dbt/dbt_builtins_ref", True, False), ("jinja_c_dbt/dbt_builtins_source", True, False), ("jinja_c_dbt/dbt_builtins_this", True, False), ("jinja_c_dbt/dbt_builtins_var_default", True, False), ("jinja_c_dbt/dbt_builtins_test", True, False), # do directive ("jinja_e/jinja", True, False), # case sensitivity and python literals ("jinja_f/jinja", True, False), # Macro loading from a folder ("jinja_g_macros/jinja", True, False), # jinja raw tag ("jinja_h_macros/jinja", True, False), ("jinja_i_raw/raw_tag", True, False), ("jinja_i_raw/raw_tag_2", True, False), # Library Loading from a folder ("jinja_j_libraries/jinja", True, False), # Priority of macros ("jinja_k_config_override_path_macros/jinja", True, False), # Placeholders and metas ("jinja_l_metas/001", False, True), ("jinja_l_metas/002", False, True), ("jinja_l_metas/003", False, True), ("jinja_l_metas/004", False, True), ("jinja_l_metas/005", False, True), ("jinja_l_metas/006", False, True), ("jinja_l_metas/007", False, True), ("jinja_l_metas/008", False, True), ("jinja_l_metas/009", False, True), ("jinja_l_metas/010", False, True), ("jinja_l_metas/011", False, True), # Library Loading from a folder when library is module ("jinja_m_libraries_module/jinja", True, False), ("jinja_n_nested_macros/jinja", True, False), # Test more dbt configurations ("jinja_o_config_override_dbt_builtins/override_dbt_builtins", True, False), ("jinja_p_disable_dbt_builtins/disable_dbt_builtins", True, False), # Load all the macros ("jinja_q_multiple_path_macros/jinja", True, False), ("jinja_s_filters_in_library/jinja", True, False), ], ) def test__templater_full(subpath, code_only, include_meta, yaml_loader, caplog): """Check structure can be parsed from jinja templated files.""" # Log the templater and lexer throughout this test caplog.set_level(logging.DEBUG, logger="sqlfluff.templater") caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") assert_structure( yaml_loader, "test/fixtures/templater/" + subpath, code_only=code_only, include_meta=include_meta, ) def test__templater_jinja_block_matching(caplog): """Test the block UUID matching works with a complicated case.""" caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") path = "test/fixtures/templater/jinja_l_metas/002.sql" # Parse the file. p = list(Linter().parse_path(path)) parsed = p[0][0] assert parsed # We only care about the template elements template_segments = [ seg for seg in parsed.raw_segments if seg.is_type("template_loop") or ( seg.is_type("placeholder") and seg.block_type in ("block_start", "block_end", "block_mid") ) ] # Group them together by block UUID assert all( seg.block_uuid for seg in template_segments ), "All templated segments should have a block uuid!" grouped = defaultdict(list) for seg in template_segments: grouped[seg.block_uuid].append(seg.pos_marker.working_loc) print(grouped) # Now the matching block IDs should be found at the following positions. # NOTE: These are working locations in the rendered file. groups = { "for actions clause 1": [(6, 5), (9, 5), (12, 5), (15, 5)], "for actions clause 2": [(17, 5), (21, 5), (29, 5), (37, 5)], # NOTE: all the if loop clauses are grouped together. "if loop.first": [ (18, 9), (20, 9), (20, 9), (22, 9), (22, 9), (28, 9), (30, 9), (30, 9), (36, 9), ], } # Check all are accounted for: for clause in groups.keys(): for block_uuid, locations in grouped.items(): if groups[clause] == locations: print(f"Found {clause}, locations with UUID: {block_uuid}") break else: raise ValueError(f"Couldn't find appropriate grouping of blocks: {clause}") @pytest.mark.parametrize( "test,result", [ ("", []), ("foo", [("foo", "literal", 0)]), ( "foo {{bar}} z ", [ ("foo ", "literal", 0), ("{{bar}}", "templated", 4), (" z ", "literal", 11), ], ), ( ( "SELECT {# A comment #} {{field}} {% for i in [1, 3]%}, " "fld_{{i}}{% endfor %} FROM my_schema.{{my_table}} " ), [ ("SELECT ", "literal", 0), ("{# A comment #}", "comment", 7), (" ", "literal", 22), ("{{field}}", "templated", 23), (" ", "literal", 32), ("{% for i in [1, 3]%}", "block_start", 33, 1, "for"), (", fld_", "literal", 53, 1), ("{{i}}", "templated", 59, 1), ("{% endfor %}", "block_end", 64, 1, "endfor"), (" FROM my_schema.", "literal", 76, 2), ("{{my_table}}", "templated", 92, 2), (" ", "literal", 104, 2), ], ), ( "{% set thing %}FOO{% endset %} BAR", [ ("{% set thing %}", "block_start", 0, 1, "set"), ("FOO", "literal", 15, 1), ("{% endset %}", "block_end", 18, 1, "endset"), (" BAR", "literal", 30, 2), ], ), ( # Tests Jinja "block assignment" syntax. Also tests the use of # template substitution within the block: {{ "dev" }}. """{% set my_query %} select 1 from foobarfoobarfoobarfoobar_{{ "dev" }} {% endset %} {{ my_query }} """, [ ("{% set my_query %}", "block_start", 0, 1, "set"), ("\nselect 1 from foobarfoobarfoobarfoobar_", "literal", 18, 1), ('{{ "dev" }}', "templated", 58, 1), ("\n", "literal", 69, 1), ("{% endset %}", "block_end", 70, 1, "endset"), ("\n", "literal", 82, 2), ("{{ my_query }}", "templated", 83, 2), ("\n", "literal", 97, 2), ], ), # Tests for jinja blocks that consume whitespace. ( """SELECT 1 FROM {%+if true-%} {{ref('foo')}} {%-endif%}""", [ ("SELECT 1 FROM ", "literal", 0), ("{%+if true-%}", "block_start", 14, 1, "if"), (" ", "literal", 27, 1), ("{{ref('foo')}}", "templated", 28, 1), (" ", "literal", 42, 1), ("{%-endif%}", "block_end", 43, 1, "endif"), ], ), ( """{% for item in some_list -%} SELECT * FROM some_table {{ "UNION ALL\n" if not loop.last }} {%- endfor %}""", [ ("{% for item in some_list -%}", "block_start", 0, 1, "for"), # This gets consumed in the templated file, but it's still here. ("\n ", "literal", 28, 1), ("SELECT *\n FROM some_table\n", "literal", 33, 1), ('{{ "UNION ALL\n" if not loop.last }}', "templated", 62, 1), ("\n", "literal", 97, 1), ("{%- endfor %}", "block_end", 98, 1, "endfor"), ], ), ( JINJA_MACRO_CALL_SQL, [ ("{% macro render_name(title) %}", "block_start", 0, 1, "macro"), ("\n" " '", "literal", 30, 1), ("{{ title }}", "templated", 34, 1), (". foo' as ", "literal", 45, 1), ("{{ caller() }}", "templated", 55, 1), ("\n", "literal", 69, 1), ("{% endmacro %}", "block_end", 70, 1, "endmacro"), ("\n" "SELECT\n" " ", "literal", 84, 2), ("{% call render_name('Sir') %}", "block_start", 96, 3, "call"), ("\n" " bar\n" " ", "literal", 125, 3), ("{% endcall %}", "block_end", 142, 3, "endcall"), ("\n" "FROM baz\n", "literal", 155, 4), ], ), ], ) def test__templater_jinja_slice_template(test, result): """Test _slice_template.""" templater = JinjaTemplater() env, _, render_func = templater.construct_render_func() analyzer = JinjaAnalyzer(test, env) analyzer.analyze(render_func=render_func) resp = analyzer.raw_sliced # check contiguous (unless there's a comment in it) if "{#" not in test: assert "".join(elem.raw for elem in resp) == test # check indices idx = 0 for raw_slice in resp: assert raw_slice.source_idx == idx idx += len(raw_slice.raw) # Check total result assert resp == [RawFileSlice(*args) for args in result] def _statement(*args, **kwargs): # NOTE: The standard dbt statement() call returns nothing. return "" def _load_result(*args, **kwargs): return "_load_result" @pytest.mark.parametrize( "raw_file,override_context,result", [ ("", None, []), ("foo", None, [("literal", slice(0, 3, None), slice(0, 3, None))]), # Example with no loops ( "SELECT {{blah}}, boo {# comment #} from something", dict(blah="foobar"), [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("templated", slice(7, 15, None), slice(7, 13, None)), ("literal", slice(15, 21, None), slice(13, 19, None)), ("comment", slice(21, 34, None), slice(19, 19, None)), ("literal", slice(34, 49, None), slice(19, 34, None)), ], ), # Example with loops ( ( "SELECT {# A comment #} {{field}} {% for i in [1, 3, 7]%}, " "fld_{{i}}_x{% endfor %} FROM my_schema.{{my_table}} " ), dict(field="foobar", my_table="barfoo"), [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("comment", slice(7, 22, None), slice(7, 7, None)), ("literal", slice(22, 23, None), slice(7, 8, None)), ("templated", slice(23, 32, None), slice(8, 14, None)), ("literal", slice(32, 33, None), slice(14, 15, None)), ("block_start", slice(33, 56, None), slice(15, 15, None)), ("literal", slice(56, 62, None), slice(15, 21, None)), ("templated", slice(62, 67, None), slice(21, 22, None)), ("literal", slice(67, 69, None), slice(22, 24, None)), ("block_end", slice(69, 81, None), slice(24, 24, None)), ("literal", slice(56, 62, None), slice(24, 30, None)), ("templated", slice(62, 67, None), slice(30, 31, None)), ("literal", slice(67, 69, None), slice(31, 33, None)), ("block_end", slice(69, 81, None), slice(33, 33, None)), ("literal", slice(56, 62, None), slice(33, 39, None)), ("templated", slice(62, 67, None), slice(39, 40, None)), ("literal", slice(67, 69, None), slice(40, 42, None)), ("block_end", slice(69, 81, None), slice(42, 42, None)), ("literal", slice(81, 97, None), slice(42, 58, None)), ("templated", slice(97, 109, None), slice(58, 64, None)), ("literal", slice(109, 110, None), slice(64, 65, None)), ], ), # Example with loops (and utilising the end slice code) ( ( "SELECT {# A comment #} {{field}} {% for i in [1, 3, 7]%}, " "fld_{{i}}{% endfor %} FROM my_schema.{{my_table}} " ), dict(field="foobar", my_table="barfoo"), [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("comment", slice(7, 22, None), slice(7, 7, None)), ("literal", slice(22, 23, None), slice(7, 8, None)), ("templated", slice(23, 32, None), slice(8, 14, None)), ("literal", slice(32, 33, None), slice(14, 15, None)), ("block_start", slice(33, 56, None), slice(15, 15, None)), ("literal", slice(56, 62, None), slice(15, 21, None)), ("templated", slice(62, 67, None), slice(21, 22, None)), ("block_end", slice(67, 79, None), slice(22, 22, None)), ("literal", slice(56, 62, None), slice(22, 28, None)), ("templated", slice(62, 67, None), slice(28, 29, None)), ("block_end", slice(67, 79, None), slice(29, 29, None)), ("literal", slice(56, 62, None), slice(29, 35, None)), ("templated", slice(62, 67, None), slice(35, 36, None)), ("block_end", slice(67, 79, None), slice(36, 36, None)), ("literal", slice(79, 95, None), slice(36, 52, None)), ("templated", slice(95, 107, None), slice(52, 58, None)), ("literal", slice(107, 108, None), slice(58, 59, None)), ], ), # Test a trailing split, and some variables which don't refer anything. ( "{{ config(materialized='view') }}\n\nSELECT 1 FROM {{ source('finance', " "'reconciled_cash_facts') }}\n\n", dict( config=lambda *args, **kwargs: "", source=lambda *args, **kwargs: "finance_reconciled_cash_facts", ), [ ("templated", slice(0, 33, None), slice(0, 0, None)), ("literal", slice(33, 49, None), slice(0, 16, None)), ("templated", slice(49, 97, None), slice(16, 45, None)), ("literal", slice(97, 99, None), slice(45, 47, None)), ], ), # Test splitting with a loop. ( "SELECT\n " "{% for i in [1, 2, 3] %}\n , " "c_{{i}}+42 AS the_meaning_of_li{{ 'f' * i }}\n " "{% endfor %}\n" "FROM my_table", None, [ ("literal", slice(0, 11, None), slice(0, 11, None)), ("block_start", slice(11, 35, None), slice(11, 11, None)), ("literal", slice(35, 48, None), slice(11, 24, None)), ("templated", slice(48, 53, None), slice(24, 25, None)), ("literal", slice(53, 77, None), slice(25, 49, None)), ("templated", slice(77, 90, None), slice(49, 50, None)), ("literal", slice(90, 95, None), slice(50, 55, None)), ("block_end", slice(95, 107, None), slice(55, 55, None)), ("literal", slice(35, 48, None), slice(55, 68, None)), ("templated", slice(48, 53, None), slice(68, 69, None)), ("literal", slice(53, 77, None), slice(69, 93, None)), ("templated", slice(77, 90, None), slice(93, 95, None)), ("literal", slice(90, 95, None), slice(95, 100, None)), ("block_end", slice(95, 107, None), slice(100, 100, None)), ("literal", slice(35, 48, None), slice(100, 113, None)), ("templated", slice(48, 53, None), slice(113, 114, None)), ("literal", slice(53, 77, None), slice(114, 138, None)), ("templated", slice(77, 90, None), slice(138, 141, None)), ("literal", slice(90, 95, None), slice(141, 146, None)), ("block_end", slice(95, 107, None), slice(146, 146, None)), ("literal", slice(107, 121, None), slice(146, 160, None)), ], ), # Test an example where a block is removed entirely. ( "{% set thing %}FOO{% endset %} SELECT 1", None, [ ("block_start", slice(0, 15, None), slice(0, 0, None)), ("literal", slice(15, 18, None), slice(0, 0, None)), ("block_end", slice(18, 30, None), slice(0, 0, None)), ("literal", slice(30, 39, None), slice(0, 9, None)), ], ), ( # Tests Jinja "include" directive. """{% include 'subdir/include_comment.sql' %} SELECT 1 """, None, [ ("templated", slice(0, 42, None), slice(0, 18, None)), ("literal", slice(42, 53, None), slice(18, 29, None)), ], ), ( # Tests Jinja "import" directive. """{% import 'echo.sql' as echo %} SELECT 1 """, None, [ ("templated", slice(0, 31, None), slice(0, 0, None)), ("literal", slice(31, 42, None), slice(0, 11, None)), ], ), ( # Tests Jinja "from import" directive.. """{% from 'echo.sql' import echo %} {% from 'echoecho.sql' import echoecho %} SELECT {{ echo("foo") }}, {{ echoecho("bar") }} """, None, [ ("templated", slice(0, 33, None), slice(0, 0, None)), ("literal", slice(33, 34, None), slice(0, 1, None)), ("templated", slice(34, 75, None), slice(1, 1, None)), ("literal", slice(75, 88, None), slice(1, 14, None)), ("templated", slice(88, 105, None), slice(14, 19, None)), ("literal", slice(105, 111, None), slice(19, 25, None)), ("templated", slice(111, 132, None), slice(25, 34, None)), ("literal", slice(132, 133, None), slice(34, 35, None)), ], ), ( # Tests Jinja "do" directive. Should be treated as a # templated instead of block - issue 4603. """{% do true %} {% if true %} select 1 {% endif %}""", None, [ ("templated", slice(0, 13, None), slice(0, 0, None)), ("literal", slice(13, 15, None), slice(0, 2, None)), ("block_start", slice(15, 28, None), slice(2, 2, None)), ("literal", slice(28, 42, None), slice(2, 16, None)), ("block_end", slice(42, 53, None), slice(16, 16, None)), ], ), ( # Tests issue 2541, a bug where the {%- endfor %} was causing # IndexError: list index out of range. """{% for x in ['A', 'B'] %} {% if x != 'A' %} SELECT 'E' {% endif %} {%- endfor %} """, None, [ ("block_start", slice(0, 25, None), slice(0, 0, None)), ("literal", slice(25, 30, None), slice(0, 5, None)), ("block_start", slice(30, 47, None), slice(5, 5, None)), ("block_end", slice(67, 78, None), slice(5, 5, None)), ("literal", slice(78, 79, None), slice(5, 5, None)), ("block_end", slice(79, 92, None), slice(5, 5, None)), ("literal", slice(25, 30, None), slice(5, 10, None)), ("block_start", slice(30, 47, None), slice(10, 10, None)), ("literal", slice(47, 67, None), slice(10, 30, None)), ("block_end", slice(67, 78, None), slice(30, 30, None)), ("literal", slice(78, 79, None), slice(30, 30, None)), ("block_end", slice(79, 92, None), slice(30, 30, None)), ("literal", slice(92, 93, None), slice(30, 31, None)), ], ), ( # Similar to the test above for issue 2541, but it's even trickier: # whitespace control everywhere and NO NEWLINES or other characters # between Jinja segments. In order to get a thorough-enough trace, # JinjaTracer has to build the alternate template with whitespace # control removed, as this increases the amount of trace output. "{%- for x in ['A', 'B'] -%}" "{%- if x == 'B' -%}" "SELECT 'B';" "{%- endif -%}" "{%- if x == 'A' -%}" "SELECT 'A';" "{%- endif -%}" "{%- endfor -%}", None, [ ("block_start", slice(0, 27, None), slice(0, 0, None)), ("block_start", slice(27, 46, None), slice(0, 0, None)), ("block_end", slice(57, 70, None), slice(0, 0, None)), ("block_start", slice(70, 89, None), slice(0, 0, None)), ("literal", slice(89, 100, None), slice(0, 11, None)), ("block_end", slice(100, 113, None), slice(11, 11, None)), ("block_end", slice(113, 127, None), slice(11, 11, None)), ("block_start", slice(27, 46, None), slice(11, 11, None)), ("literal", slice(46, 57, None), slice(11, 22, None)), ("block_end", slice(57, 70, None), slice(22, 22, None)), ("block_start", slice(70, 89, None), slice(22, 22, None)), ("block_end", slice(100, 113, None), slice(22, 22, None)), ("block_end", slice(113, 127, None), slice(22, 22, None)), ], ), ( # Test for issue 2786. Also lots of whitespace control. In this # case, removing whitespace control alone wasn't enough. In order # to get a good trace, JinjaTracer had to be updated so the # alternate template included output for the discarded whitespace. """select id, {%- for features in ["value4", "value5"] %} {%- if features in ["value7"] %} {{features}} {%- if not loop.last -%},{% endif %} {%- else -%} {{features}} {%- if not loop.last -%},{% endif %} {%- endif -%} {%- endfor %} from my_table """, None, [ ("literal", slice(0, 14, None), slice(0, 14, None)), ("literal", slice(14, 19, None), slice(14, 14, None)), ("block_start", slice(19, 62, None), slice(14, 14, None)), ("literal", slice(62, 71, None), slice(14, 14, None)), ("block_start", slice(71, 103, None), slice(14, 14, None)), ("block_mid", slice(186, 198, None), slice(14, 14, None)), ("literal", slice(198, 211, None), slice(14, 14, None)), ("templated", slice(211, 223, None), slice(14, 20, None)), ("literal", slice(223, 236, None), slice(20, 20, None)), ("block_start", slice(236, 260, None), slice(20, 20, None)), ("literal", slice(260, 261, None), slice(20, 21, None)), ("block_end", slice(261, 272, None), slice(21, 21, None)), ("literal", slice(272, 281, None), slice(21, 21, None)), ("block_end", slice(281, 294, None), slice(21, 21, None)), ("literal", slice(294, 299, None), slice(21, 21, None)), ("block_end", slice(299, 312, None), slice(21, 21, None)), ("literal", slice(62, 71, None), slice(21, 21, None)), ("block_start", slice(71, 103, None), slice(21, 21, None)), ("block_mid", slice(186, 198, None), slice(21, 21, None)), ("literal", slice(198, 211, None), slice(21, 21, None)), ("templated", slice(211, 223, None), slice(21, 27, None)), ("literal", slice(223, 236, None), slice(27, 27, None)), ("block_start", slice(236, 260, None), slice(27, 27, None)), ("block_end", slice(261, 272, None), slice(27, 27, None)), ("literal", slice(272, 281, None), slice(27, 27, None)), ("block_end", slice(281, 294, None), slice(27, 27, None)), ("literal", slice(294, 299, None), slice(27, 27, None)), ("block_end", slice(299, 312, None), slice(27, 27, None)), ("literal", slice(312, 327, None), slice(27, 42, None)), ], ), ( # Test for issue 2835. There's no space between "col" and "=". # Also tests for issue 3750 that self contained set statements # are parsed as "templated" and not "block_start". """{% set col= "col1" %} SELECT {{ col }} """, None, [ ("templated", slice(0, 21, None), slice(0, 0, None)), ("literal", slice(21, 29, None), slice(0, 8, None)), ("templated", slice(29, 38, None), slice(8, 12, None)), ("literal", slice(38, 39, None), slice(12, 13, None)), ], ), ( # Another test for issue 2835. The {% for %} loop inside the # {% set %} caused JinjaTracer to think the {% set %} ended # at the {% endfor %} """{% set some_part_of_the_query %} {% for col in ["col1"] %} {{col}} {% endfor %} {% endset %} SELECT {{some_part_of_the_query}} FROM SOME_TABLE """, None, [ ("block_start", slice(0, 32, None), slice(0, 0, None)), ("literal", slice(32, 37, None), slice(0, 0, None)), ("block_start", slice(37, 62, None), slice(0, 0, None)), ("literal", slice(62, 67, None), slice(0, 0, None)), ("templated", slice(67, 74, None), slice(0, 0, None)), ("literal", slice(74, 79, None), slice(0, 0, None)), ("block_end", slice(79, 91, None), slice(0, 0, None)), ("literal", slice(91, 92, None), slice(0, 0, None)), ("block_end", slice(92, 104, None), slice(0, 0, None)), ("literal", slice(104, 113, None), slice(0, 9, None)), ("templated", slice(113, 139, None), slice(9, 29, None)), ("literal", slice(139, 156, None), slice(29, 46, None)), ], ), ( # Third test for issue 2835. This was the original SQL provided in # the issue report. # Also tests for issue 3750 that self contained set statements # are parsed as "templated" and not "block_start". """{% set whitelisted= [ {'name': 'COL_1'}, {'name': 'COL_2'}, {'name': 'COL_3'} ] %} {% set some_part_of_the_query %} {% for col in whitelisted %} {{col.name}}{{ ", " if not loop.last }} {% endfor %} {% endset %} SELECT {{some_part_of_the_query}} FROM SOME_TABLE """, None, [ ("templated", slice(0, 94, None), slice(0, 0, None)), ("literal", slice(94, 96, None), slice(0, 2, None)), ("block_start", slice(96, 128, None), slice(2, 2, None)), ("literal", slice(128, 133, None), slice(2, 2, None)), ("block_start", slice(133, 161, None), slice(2, 2, None)), ("literal", slice(161, 166, None), slice(2, 2, None)), ("templated", slice(166, 178, None), slice(2, 2, None)), ("templated", slice(178, 205, None), slice(2, 2, None)), ("literal", slice(205, 210, None), slice(2, 2, None)), ("block_end", slice(210, 222, None), slice(2, 2, None)), ("literal", slice(222, 223, None), slice(2, 2, None)), ("block_end", slice(223, 235, None), slice(2, 2, None)), ("literal", slice(235, 244, None), slice(2, 11, None)), ("templated", slice(244, 270, None), slice(11, 66, None)), ("literal", slice(270, 287, None), slice(66, 83, None)), ], ), ( # Test for issue 2822: Handle slicing when there's no newline after # the Jinja block end. "{% if true %}\nSELECT 1 + 1\n{%- endif %}", None, [ ("block_start", slice(0, 13, None), slice(0, 0, None)), ("literal", slice(13, 26, None), slice(0, 13, None)), ("literal", slice(26, 27, None), slice(13, 13, None)), ("block_end", slice(27, 39, None), slice(13, 13, None)), ], ), ( # Test for issue 3434: Handle {% block %}. "SELECT {% block table_name %}block_contents{% endblock %} " "FROM {{ self.table_name() }}\n", None, [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("literal", slice(29, 43, None), slice(7, 21, None)), ("block_start", slice(7, 29, None), slice(21, 21, None)), ("literal", slice(29, 43, None), slice(21, 21, None)), ("block_end", slice(43, 57, None), slice(21, 21, None)), ("literal", slice(57, 63, None), slice(21, 27, None)), ("templated", slice(63, 86, None), slice(27, 27, None)), ("literal", slice(29, 43, None), slice(27, 41, None)), ("literal", slice(86, 87, None), slice(41, 42, None)), ], ), ( # Another test for issue 3434: Similar to the first, but uses # the block inside a loop. """{% block table_name %}block_contents{% endblock %} SELECT {% for j in [4, 5, 6] %} FROM {{ j }}{{ self.table_name() }} {% endfor %} """, None, [ ("literal", slice(22, 36, None), slice(0, 14, None)), ("block_start", slice(0, 22, None), slice(14, 14, None)), ("literal", slice(22, 36, None), slice(14, 14, None)), ("block_end", slice(36, 50, None), slice(14, 14, None)), ("literal", slice(50, 58, None), slice(14, 22, None)), ("block_start", slice(58, 82, None), slice(22, 22, None)), ("literal", slice(82, 88, None), slice(22, 28, None)), ("templated", slice(88, 95, None), slice(28, 29, None)), ("templated", slice(95, 118, None), slice(29, 29, None)), ("literal", slice(22, 36, None), slice(29, 43, None)), ("literal", slice(118, 119, None), slice(43, 44, None)), ("block_end", slice(119, 131, None), slice(44, 44, None)), ("literal", slice(82, 88, None), slice(44, 50, None)), ("templated", slice(88, 95, None), slice(50, 51, None)), ("templated", slice(95, 118, None), slice(51, 51, None)), ("literal", slice(22, 36, None), slice(51, 65, None)), ("literal", slice(118, 119, None), slice(65, 66, None)), ("block_end", slice(119, 131, None), slice(66, 66, None)), ("literal", slice(82, 88, None), slice(66, 72, None)), ("templated", slice(88, 95, None), slice(72, 73, None)), ("templated", slice(95, 118, None), slice(73, 73, None)), ("literal", slice(22, 36, None), slice(73, 87, None)), ("literal", slice(118, 119, None), slice(87, 88, None)), ("block_end", slice(119, 131, None), slice(88, 88, None)), ("literal", slice(131, 132, None), slice(88, 89, None)), ], ), ( "{{ statement('variables', fetch_result=true) }}\n", dict( statement=_statement, load_result=_load_result, ), [ ("templated", slice(0, 47, None), slice(0, 0, None)), ("literal", slice(47, 48, None), slice(0, 1, None)), ], ), ( "{% call statement('variables', fetch_result=true) %}\n" "select 1 as test\n" "{% endcall %}\n" "select 2 as foo\n", dict( statement=_statement, load_result=_load_result, ), [ ("block_start", slice(0, 52, None), slice(0, 0, None)), ("literal", slice(52, 70, None), slice(0, 0, None)), ("block_end", slice(70, 83, None), slice(0, 0, None)), ("literal", slice(83, 100, None), slice(0, 17, None)), ], ), ( JINJA_MACRO_CALL_SQL, None, [ # First all of this is the call block. ("block_start", slice(0, 30, None), slice(0, 0, None)), ("literal", slice(30, 34, None), slice(0, 0, None)), ("templated", slice(34, 45, None), slice(0, 0, None)), ("literal", slice(45, 55, None), slice(0, 0, None)), ("templated", slice(55, 69, None), slice(0, 0, None)), ("literal", slice(69, 70, None), slice(0, 0, None)), ("block_end", slice(70, 84, None), slice(0, 0, None)), # Then the actual query. ("literal", slice(84, 96, None), slice(0, 12, None)), # The block_start (call) contains the actual content. ("block_start", slice(96, 125, None), slice(12, 47, None)), # The middle and end of the call, have zero length in the template ("literal", slice(125, 142, None), slice(47, 47, None)), ("block_end", slice(142, 155, None), slice(47, 47, None)), ("literal", slice(155, 165, None), slice(47, 57, None)), ], ), ], ) def test__templater_jinja_slice_file(raw_file, override_context, result, caplog): """Test slice_file.""" templater = JinjaTemplater(override_context=override_context) _, _, render_func = templater.construct_render_func( config=FluffConfig.from_path( "test/fixtures/templater/jinja_slice_template_macros" ) ) with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): raw_sliced, sliced_file, templated_str = templater.slice_file( raw_file, render_func=render_func ) # Create a TemplatedFile from the results. This runs some useful sanity # checks. _ = TemplatedFile(raw_file, "<>", templated_str, sliced_file, raw_sliced) # Check contiguous on the TEMPLATED VERSION print(sliced_file) prev_slice = None for elem in sliced_file: print(elem) if prev_slice: assert elem[2].start == prev_slice.stop prev_slice = elem[2] # Check that all literal segments have a raw slice for elem in sliced_file: if elem[0] == "literal": assert elem[1] is not None # check result actual = [ ( templated_file_slice.slice_type, templated_file_slice.source_slice, templated_file_slice.templated_slice, ) for templated_file_slice in sliced_file ] assert actual == result def test__templater_jinja_large_file_check(): """Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters. """ # First check we can process the file normally without specific config. # i.e. check the defaults work and the default is high. JinjaTemplater().process( in_str="SELECT 1", fname="", config=FluffConfig(overrides={"dialect": "ansi"}), ) # Second check setting the value low disables the check JinjaTemplater().process( in_str="SELECT 1", fname="", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 0} ), ) # Finally check we raise a skip exception when config is set low. with pytest.raises(SQLFluffSkipFile) as excinfo: JinjaTemplater().process( in_str="SELECT 1", fname="", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 2}, ), ) assert "Length of file" in str(excinfo.value) @pytest.mark.parametrize( "ignore, expected_violation", [ ( "", SQLTemplaterError( "Undefined jinja template variable: 'test_event_cadence'" ), ), ("templating", None), ], ) def test_jinja_undefined_callable(ignore, expected_violation): """Test undefined callable returns TemplatedFile and sensible error.""" templater = JinjaTemplater() templated_file, violations = templater.process( in_str="""WITH streams_cadence_test AS ( {{ test_event_cadence( model= ref('fct_recording_progression_stream'), grouping_column='archive_id', time_column='timestamp', date_part='minute', threshold=1) }} ) SELECT * FROM final """, fname="test.sql", config=FluffConfig(overrides={"dialect": "ansi", "ignore": ignore}), ) # This was previously failing to process, due to UndefinedRecorder not # supporting __call__(), also Jinja thinking it was not *safe* to call. assert templated_file is not None if expected_violation: assert len(violations) == 1 isinstance(violations[0], type(expected_violation)) assert str(violations[0]) == str(expected_violation) else: assert len(violations) == 0 def test_dummy_undefined_fail_with_undefined_error(): """Tests that a recursion error bug no longer occurs.""" ud = DummyUndefined("name") with pytest.raises(UndefinedError): # This was previously causing a recursion error. ud._fail_with_undefined_error() def test_undefined_magic_methods(): """Test all the magic methods defined on DummyUndefined.""" ud = DummyUndefined("name") # _self_impl assert ud + ud is ud assert ud - ud is ud assert ud / ud is ud assert ud // ud is ud assert ud % ud is ud assert ud**ud is ud assert +ud is ud assert -ud is ud assert ud << ud is ud assert ud[ud] is ud assert ~ud is ud assert ud(ud) is ud # _bool_impl assert ud and ud assert ud or ud assert ud ^ ud assert bool(ud) assert ud < ud assert ud <= ud assert ud == ud assert ud != ud assert ud >= ud assert ud > ud assert ud + ud is ud @pytest.mark.parametrize( "sql_path, expected_renderings", [ pytest.param( "simple_if_true.sql", [ "\nSELECT 1\n\n", "\nSELECT 2\n\n", ], id="simple_if_true", ), pytest.param( "simple_if_false.sql", [ "\nSELECT 2\n\n", "\nSELECT 1\n\n", ], id="simple_if_false", ), pytest.param( "if_elif_else.sql", [ "\nSELECT 1\n\n", "\nSELECT 2\n\n", "\nSELECT 3\n\n", ], id="if_elif_else", ), pytest.param( "if_else_if_nested.sql", [ "\nSELECT 1\n\n", "\n\nSELECT 2\n\n\n", "\n\nSELECT 3\n\n\n", ], id="if_else_if_nested", ), # This test case exercises the scoring function. Generates up to 10 # variants, but only the top 5 are returned. pytest.param( "if_elif_else_chain_scoring.sql", [ "\nSELECT 1\n\n", "\nSELECT 100000000\n\n", "\nSELECT 10000000\n\n", "\nSELECT 1000000\n\n", "\nSELECT 100000\n\n", "\nSELECT 10000\n\n", ], id="if_elif_else_chain_scoring", ), # This test case results in a TypeError executing the variant. This # should be ignored, and only the primary should be returned. pytest.param( "if_true_elif_type_error_else.sql", [ "\nSELECT 1\n\n", "\nSELECT 2\n\n", ], id="if_true_elif_type_error_else", ), ], ) def test__templater_lint_unreached_code(sql_path: str, expected_renderings): """Test that Jinja templater slices raw and templated file correctly.""" test_dir = Path("test/fixtures/templater/jinja_lint_unreached_code") t = JinjaTemplater() renderings = [] for templated_file, _ in t.process_with_variants( in_str=(test_dir / sql_path).read_text(), fname=str(sql_path), config=FluffConfig.from_path(str(test_dir)), ): renderings.append(templated_file.templated_str) assert renderings == expected_renderings sqlfluff-2.3.5/test/core/templaters/placeholder_test.py000066400000000000000000000255011451700765000233350ustar00rootroot00000000000000"""Tests for templaters.""" import pytest from sqlfluff.core import FluffConfig from sqlfluff.core.templaters import PlaceholderTemplater def test__templater_raw(): """Test the templaters when nothing has to be replaced.""" t = PlaceholderTemplater(override_context=dict(param_style="colon")) instr = "SELECT * FROM {{blah}} WHERE %(gnepr)s OR e~':'" outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == instr @pytest.mark.parametrize( "instr, param_style, expected_outstr, values", [ ( "SELECT * FROM f, o, o WHERE a < 10\n\n", "colon", "SELECT * FROM f, o, o WHERE a < 10\n\n", dict( unused=7777, ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE userid = :user_id AND date > :start_date """, "colon", """ SELECT user_mail, city_id FROM users_data WHERE userid = 42 AND date > '2021-10-01' """, dict( user_id="42", start_date="'2021-10-01'", city_ids="(1, 2, 3, 45)", ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE userid = :user_id AND date > :start_date""", "colon", """ SELECT user_mail, city_id FROM users_data WHERE userid = 42 AND date > '2021-10-01'""", dict( user_id="42", start_date="'2021-10-01'", city_ids="(1, 2, 3, 45)", ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN :city_ids AND date > '2020-10-01' """, "colon", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, dict( user_id="42", start_date="'2021-10-01'", city_ids="(1, 2, 3, 45)", ), ), ( """ SELECT user_mail, city_id FROM users_data:table_suffix """, "colon_nospaces", """ SELECT user_mail, city_id FROM users_data42 """, dict( table_suffix="42", ), ), ( # Postgres uses double-colons for type casts , see # https://www.postgresql.org/docs/current/sql-expressions.html#SQL-SYNTAX-TYPE-CASTS # This test ensures we don't confuse them with colon placeholders. """ SELECT user_mail, city_id, joined::date FROM users_data:table_suffix """, "colon_nospaces", """ SELECT user_mail, city_id, joined::date FROM users_data42 """, dict( table_suffix="42", ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN ? AND date > ? """, "question_mark", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "1": "(1, 2, 3, 45)", "2": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN :1 AND date > :45 """, "numeric_colon", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "1": "(1, 2, 3, 45)", "45": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN %(city_id)s AND date > %(date)s AND someflag = %(someflag)s LIMIT %(limit)s """, "pyformat", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' AND someflag = False LIMIT 15 """, dict( city_id="(1, 2, 3, 45)", date="'2020-10-01'", limit=15, someflag=False ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN $city_id AND date > $date OR date = ${date} """, "dollar", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' OR date = '2020-10-01' """, dict( city_id="(1, 2, 3, 45)", date="'2020-10-01'", ), ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN $12 AND date > $90 """, "numeric_dollar", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "12": "(1, 2, 3, 45)", "90": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN ${12} AND date > ${90} """, "numeric_dollar", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "12": "(1, 2, 3, 45)", "90": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE user_mail = '${12}' AND date > ${90} """, "numeric_dollar", """ SELECT user_mail, city_id FROM users_data WHERE user_mail = 'test@example.com' AND date > '2020-10-01' """, { "12": "test@example.com", "90": "'2020-10-01'", }, ), ( """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN %s AND date > %s """, "percent", """ SELECT user_mail, city_id FROM users_data WHERE (city_id) IN (1, 2, 3, 45) AND date > '2020-10-01' """, { "1": "(1, 2, 3, 45)", "2": "'2020-10-01'", }, ), ( """ USE DATABASE &{env}_MARKETING; USE SCHEMA &&EMEA; SELECT user_mail, city_id FROM users_data WHERE userid = &user_id AND date > &{start_date} """, "ampersand", """ USE DATABASE PRD_MARKETING; USE SCHEMA &&EMEA; SELECT user_mail, city_id FROM users_data WHERE userid = 42 AND date > '2021-10-01' """, dict( env="PRD", user_id="42", start_date="'2021-10-01'", ), ), ( "USE ${flyway:database}.test_schema;", "flyway_var", "USE test_db.test_schema;", { "flyway:database": "test_db", }, ), ( "SELECT metadata$filename, $1 FROM @stg_data_export_${env_name};", "flyway_var", "SELECT metadata$filename, $1 FROM @stg_data_export_staging;", { "env_name": "staging", }, ), ( "SELECT metadata$filename, $1 FROM @stg_data_export_${env_name};", "flyway_var", "SELECT metadata$filename, $1 FROM @stg_data_export_env_name;", {}, ), ], ids=[ "no_changes", "colon_simple_substitution", "colon_accept_block_at_end", "colon_tuple_substitution", "colon_nospaces", "colon_nospaces_double_colon_ignored", "question_mark", "numeric_colon", "pyformat", "dollar", "numeric_dollar", "numeric_dollar_with_braces", "numeric_dollar_with_braces_and_string", "percent", "ampersand", "flyway_var", "flyway_var", "params_not_specified", ], ) def test__templater_param_style(instr, expected_outstr, param_style, values): """Test different param_style templating.""" t = PlaceholderTemplater(override_context={**values, "param_style": param_style}) outstr, _ = t.process( in_str=instr, fname="test", config=FluffConfig(overrides={"dialect": "ansi"}) ) assert str(outstr) == expected_outstr def test__templater_custom_regex(): """Test custom regex templating.""" t = PlaceholderTemplater( override_context=dict(param_regex="__(?P[\\w_]+)__", my_name="john") ) outstr, _ = t.process( in_str="SELECT bla FROM blob WHERE id = __my_name__", fname="test", config=FluffConfig(overrides={"dialect": "ansi"}), ) assert str(outstr) == "SELECT bla FROM blob WHERE id = john" def test__templater_setup(): """Test the exception raised when config is incomplete or ambiguous.""" t = PlaceholderTemplater(override_context=dict(name="'john'")) with pytest.raises( ValueError, match=( "No param_regex nor param_style was provided to the placeholder templater" ), ): t.process(in_str="SELECT 2+2", fname="test") t = PlaceholderTemplater( override_context=dict(param_style="bla", param_regex="bli") ) with pytest.raises( ValueError, match=r"Either param_style or param_regex must be provided, not both", ): t.process(in_str="SELECT 2+2", fname="test") def test__templater_styles(): """Test the exception raised when parameter style is unknown.""" t = PlaceholderTemplater(override_context=dict(param_style="pperccent")) with pytest.raises(ValueError, match=r"Unknown param_style"): t.process(in_str="SELECT 2+2", fname="test") sqlfluff-2.3.5/test/core/templaters/python_test.py000066400000000000000000000417311451700765000223770ustar00rootroot00000000000000"""Tests for templaters.""" import logging import pytest from sqlfluff.core import FluffConfig, SQLTemplaterError from sqlfluff.core.errors import SQLFluffSkipFile from sqlfluff.core.templaters import PythonTemplater from sqlfluff.core.templaters.base import RawFileSlice, TemplatedFileSlice from sqlfluff.core.templaters.python import IntermediateFileSlice PYTHON_STRING = "SELECT * FROM {blah}" def test__templater_python(): """Test the python templater.""" t = PythonTemplater(override_context=dict(blah="foo")) instr = PYTHON_STRING outstr, _ = t.process(in_str=instr, fname="test") assert str(outstr) == "SELECT * FROM foo" def test__templater_python_error(): """Test error handling in the python templater.""" t = PythonTemplater(override_context=dict(noblah="foo")) instr = PYTHON_STRING with pytest.raises(SQLTemplaterError): t.process(in_str=instr, fname="test") @pytest.mark.parametrize( "int_slice,templated_str,head_test,tail_test,int_test", [ # Test Invariante ( IntermediateFileSlice( "compound", slice(0, 5), slice(0, 5), [RawFileSlice("{{i}}", "templated", 0)], ), "foo", [], [], IntermediateFileSlice( "compound", slice(0, 5), slice(0, 5), [RawFileSlice("{{i}}", "templated", 0)], ), ), # Test Complete Trimming ( IntermediateFileSlice( "compound", slice(0, 3), slice(0, 3), [RawFileSlice("foo", "literal", 0)], ), "foo", [TemplatedFileSlice("literal", slice(0, 3), slice(0, 3))], [], IntermediateFileSlice( "compound", slice(3, 3), slice(3, 3), [], ), ), # Test Basic Trimming. ( IntermediateFileSlice( "compound", slice(0, 11), slice(0, 7), [ RawFileSlice("foo", "literal", 0), RawFileSlice("{{i}}", "templated", 3), RawFileSlice("bar", "literal", 8), ], ), "foo1bar", [TemplatedFileSlice("literal", slice(0, 3), slice(0, 3))], [TemplatedFileSlice("literal", slice(8, 11), slice(4, 7))], IntermediateFileSlice( "compound", slice(3, 8), slice(3, 4), [RawFileSlice("{{i}}", "templated", 3)], ), ), # Test stopping at blocks. ( IntermediateFileSlice( "compound", slice(0, 34), slice(0, 24), [ RawFileSlice("foo", "literal", 0), RawFileSlice("{{for}}", "block_start", 3), RawFileSlice("foo", "literal", 10), RawFileSlice("{{i}}", "literal", 13), RawFileSlice("bar", "literal", 18), RawFileSlice("{{endfor}}", "block_end", 21), RawFileSlice("bar", "literal", 31), ], ), "foofoofoobarfoofoobarbar", [ TemplatedFileSlice("literal", slice(0, 3), slice(0, 3)), TemplatedFileSlice("block_start", slice(3, 10), slice(3, 3)), ], [ TemplatedFileSlice("block_end", slice(21, 31), slice(21, 21)), TemplatedFileSlice("literal", slice(31, 34), slice(21, 24)), ], IntermediateFileSlice( "compound", slice(10, 21), slice(3, 21), [ RawFileSlice("foo", "literal", 10), RawFileSlice("{{i}}", "literal", 13), RawFileSlice("bar", "literal", 18), ], ), ), ], ) def test__templater_python_intermediate__trim( int_slice, templated_str, head_test, tail_test, int_test ): """Test trimming IntermediateFileSlice.""" h, i, t = int_slice.trim_ends(templated_str=templated_str) assert h == head_test assert t == tail_test assert i == int_test @pytest.mark.parametrize( "mainstr,substrings,positions", [ ("", [], []), ("a", ["a"], [[0]]), ("foobar", ["o", "b"], [[1, 2], [3]]), ("bar foo bar foo", ["bar", "foo"], [[0, 8], [4, 12]]), ], ) def test__templater_python_substring_occurrences(mainstr, substrings, positions): """Test _substring_occurrences.""" occurrences = PythonTemplater._substring_occurrences(mainstr, substrings) assert isinstance(occurrences, dict) pos_test = [occurrences[substring] for substring in substrings] assert pos_test == positions @pytest.mark.parametrize( "test,result", [ ({}, []), ({"A": [1]}, [("A", 1)]), ( {"A": [3, 2, 1], "B": [4, 2]}, [("A", 1), ("A", 2), ("B", 2), ("A", 3), ("B", 4)], ), ], ) def test__templater_python_sorted_occurrence_tuples(test, result): """Test _sorted_occurrence_tuples.""" assert PythonTemplater._sorted_occurrence_tuples(test) == result @pytest.mark.parametrize( "test,result", [ ("", []), ("foo", [RawFileSlice("foo", "literal", 0)]), ( "foo {bar} z {{ y", [ RawFileSlice("foo ", "literal", 0), RawFileSlice("{bar}", "templated", 4), RawFileSlice(" z ", "literal", 9), RawFileSlice("{{", "escaped", 12), RawFileSlice(" y", "literal", 14), ], ), ], ) def test__templater_python_slice_template(test, result): """Test _slice_template.""" resp = list(PythonTemplater._slice_template(test)) # check contiguous assert "".join(elem.raw for elem in resp) == test # check indices idx = 0 for raw_file_slice in resp: assert raw_file_slice.source_idx == idx idx += len(raw_file_slice.raw) # Check total result assert resp == result @pytest.mark.parametrize( "raw_sliced,literals,raw_occurrences,templated_occurrences,templated_length,result", [ ([], [], {}, {}, 0, []), ( [RawFileSlice("foo", "literal", 0)], ["foo"], {"foo": [0]}, {"foo": [0]}, 3, [ IntermediateFileSlice( "invariant", slice(0, 3, None), slice(0, 3, None), [RawFileSlice("foo", "literal", 0)], ) ], ), ], ) def test__templater_python_split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_length, result, ): """Test _split_invariants.""" resp = list( PythonTemplater._split_invariants( raw_sliced, literals, raw_occurrences, templated_occurrences, templated_length, ) ) # check result assert resp == result @pytest.mark.parametrize( "split_file,raw_occurrences,templated_occurrences,templated_str,result", [ ([], {}, {}, "", []), ( [ IntermediateFileSlice( "invariant", slice(0, 3, None), slice(0, 3, None), [RawFileSlice("foo", "literal", 0)], ) ], {"foo": [0]}, {"foo": [0]}, "foo", [TemplatedFileSlice("literal", slice(0, 3, None), slice(0, 3, None))], ), ( [ IntermediateFileSlice( "invariant", slice(0, 7, None), slice(0, 7, None), [RawFileSlice("SELECT ", "literal", 0)], ), IntermediateFileSlice( "compound", slice(7, 24, None), slice(7, 22, None), [ RawFileSlice("{blah}", "templated", 7), RawFileSlice(", ", "literal", 13), RawFileSlice("{foo:.2f}", "templated", 15), ], ), IntermediateFileSlice( "invariant", slice(24, 33, None), slice(22, 31, None), [RawFileSlice(" as foo, ", "literal", 22)], ), IntermediateFileSlice( "simple", slice(33, 38, None), slice(31, 35, None), [RawFileSlice("{bar}", "templated", 33)], ), IntermediateFileSlice( "invariant", slice(38, 41, None), slice(35, 38, None), [RawFileSlice(", '", "literal", 35)], ), IntermediateFileSlice( "compound", slice(41, 45, None), slice(38, 40, None), [ RawFileSlice("{{", "escaped", 41), RawFileSlice("}}", "escaped", 43), ], ), IntermediateFileSlice( "invariant", slice(45, 76, None), slice(40, 71, None), [RawFileSlice("' as convertible from something", "literal", 40)], ), ], { "SELECT ": [0], ", ": [13, 31, 38], " as foo, ": [24], ", '": [38], "' as convertible from something": [45], }, { "SELECT ": [0], ", ": [14, 29, 35], " as foo, ": [22], ", '": [35], "' as convertible from something": [40], }, "SELECT nothing, 435.24 as foo, spam, '{}' as convertible from something", [ TemplatedFileSlice("literal", slice(0, 7, None), slice(0, 7, None)), TemplatedFileSlice("templated", slice(7, 13, None), slice(7, 14, None)), TemplatedFileSlice("literal", slice(13, 15, None), slice(14, 16, None)), TemplatedFileSlice( "templated", slice(15, 24, None), slice(16, 22, None) ), TemplatedFileSlice("literal", slice(24, 33, None), slice(22, 31, None)), TemplatedFileSlice( "templated", slice(33, 38, None), slice(31, 35, None) ), TemplatedFileSlice("literal", slice(38, 41, None), slice(35, 38, None)), TemplatedFileSlice("escaped", slice(41, 45, None), slice(38, 40, None)), TemplatedFileSlice("literal", slice(45, 76, None), slice(40, 71, None)), ], ), # Check for recursion error in non-exact raw cases. ( [ IntermediateFileSlice( "compound", slice(0, 13, None), slice(0, 9, None), [ RawFileSlice("{foo}", "templated", 0), RawFileSlice(" , ", "literal", 5), RawFileSlice("{bar}", "templated", 8), ], ), ], {",": [6]}, {",": [4]}, "foo , bar", [ TemplatedFileSlice("templated", slice(0, 5, None), slice(0, 3, None)), # Alternate implementations which group these next three together # would also be fine. TemplatedFileSlice("literal", slice(5, 6, None), slice(3, 4, None)), TemplatedFileSlice("literal", slice(6, 7, None), slice(4, 5, None)), TemplatedFileSlice("literal", slice(7, 8, None), slice(5, 6, None)), TemplatedFileSlice("templated", slice(8, 13, None), slice(6, 9, None)), ], ), ], ) def test__templater_python_split_uniques_coalesce_rest( split_file, raw_occurrences, templated_occurrences, templated_str, result, caplog ): """Test _split_uniques_coalesce_rest.""" with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"): resp = list( PythonTemplater._split_uniques_coalesce_rest( split_file, raw_occurrences, templated_occurrences, templated_str, ) ) # Check contiguous prev_slice = None for elem in result: if prev_slice: assert elem[1].start == prev_slice[0].stop assert elem[2].start == prev_slice[1].stop prev_slice = (elem[1], elem[2]) # check result assert resp == result @pytest.mark.parametrize( "raw_file,templated_file,unwrap_wrapped,result", [ ("", "", True, []), ( "foo", "foo", True, [("literal", slice(0, 3, None), slice(0, 3, None))], ), ( "SELECT {blah}, {foo:.2f} as foo, {bar}, '{{}}' as convertible from " "something", "SELECT nothing, 435.24 as foo, spam, '{}' as convertible from something", True, [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("templated", slice(7, 13, None), slice(7, 14, None)), ("literal", slice(13, 15, None), slice(14, 16, None)), ("templated", slice(15, 24, None), slice(16, 22, None)), ("literal", slice(24, 33, None), slice(22, 31, None)), ("templated", slice(33, 38, None), slice(31, 35, None)), ("literal", slice(38, 41, None), slice(35, 38, None)), ("escaped", slice(41, 45, None), slice(38, 40, None)), ("literal", slice(45, 76, None), slice(40, 71, None)), ], ), # Test a wrapped example. Given the default config is to unwrap any wrapped # queries, it should ignore the ends in the sliced file. ( "SELECT {blah} FROM something", "WITH wrap AS (SELECT nothing FROM something) SELECT * FROM wrap", True, # The sliced version should have trimmed the ends [ ("literal", slice(0, 7, None), slice(0, 7, None)), ("templated", slice(7, 13, None), slice(7, 14, None)), ("literal", slice(13, 28, None), slice(14, 29, None)), ], ), ( "SELECT {blah} FROM something", "WITH wrap AS (SELECT nothing FROM something) SELECT * FROM wrap", False, # Test NOT unwrapping it. # The sliced version should NOT have trimmed the ends [ ("templated", slice(0, 0, None), slice(0, 14, None)), ("literal", slice(0, 7, None), slice(14, 21, None)), ("templated", slice(7, 13, None), slice(21, 28, None)), ("literal", slice(13, 28, None), slice(28, 43, None)), ("templated", slice(28, 28, None), slice(43, 63, None)), ], ), ], ) def test__templater_python_slice_file(raw_file, templated_file, unwrap_wrapped, result): """Test slice_file.""" _, resp, _ = PythonTemplater().slice_file( raw_file, # For the render_func we just use a function which just returns the # templated file from the test case. (lambda x: templated_file), config=FluffConfig( configs={"templater": {"unwrap_wrapped_queries": unwrap_wrapped}}, overrides={"dialect": "ansi"}, ), ) # Check contiguous prev_slice = None for templated_slice in resp: if prev_slice: assert templated_slice.source_slice.start == prev_slice[0].stop assert templated_slice.templated_slice.start == prev_slice[1].stop prev_slice = (templated_slice.source_slice, templated_slice.templated_slice) # check result assert resp == result def test__templater_python_large_file_check(): """Test large file skipping. The check is separately called on each .process() method so it makes sense to test a few templaters. """ # First check we can process the file normally without config. PythonTemplater().process(in_str="SELECT 1", fname="") # Then check we raise a skip exception when config is set low. with pytest.raises(SQLFluffSkipFile) as excinfo: PythonTemplater().process( in_str="SELECT 1", fname="", config=FluffConfig( overrides={"dialect": "ansi", "large_file_skip_char_limit": 2}, ), ) assert "Length of file" in str(excinfo.value) sqlfluff-2.3.5/test/dialects/000077500000000000000000000000001451700765000161175ustar00rootroot00000000000000sqlfluff-2.3.5/test/dialects/__init__.py000066400000000000000000000000431451700765000202250ustar00rootroot00000000000000"""Tests for sqlfluff.dialects.""" sqlfluff-2.3.5/test/dialects/ansi_test.py000066400000000000000000000211201451700765000204560ustar00rootroot00000000000000"""Tests specific to the ansi dialect.""" import logging import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.parser import Lexer @pytest.mark.parametrize( "raw,res", [ # NB: The final empty string is the end of file marker. ("a b", ["a", " ", "b", ""]), ("b.c", ["b", ".", "c", ""]), ( "abc \n \t def ;blah", ["abc", " ", "\n", " \t ", "def", " ", ";", "blah", ""], ), ], ) def test__dialect__ansi__file_lex(raw, res, caplog): """Test we don't drop bits on simple examples.""" config = FluffConfig(overrides=dict(dialect="ansi")) lexer = Lexer(config=config) with caplog.at_level(logging.DEBUG): tokens, _ = lexer.lex(raw) # From just the initial parse, check we're all there raw_list = [token.raw for token in tokens] assert "".join(token.raw for token in tokens) == raw assert raw_list == res # Develop test to check specific elements against specific grammars. @pytest.mark.parametrize( "segmentref,raw", [ ("SelectKeywordSegment", "select"), ("NakedIdentifierSegment", "online_sales"), ("BareFunctionSegment", "current_timestamp"), ("FunctionSegment", "current_timestamp()"), ("NumericLiteralSegment", "1000.0"), ("ExpressionSegment", "online_sales / 1000.0"), ("IntervalExpressionSegment", "INTERVAL 1 YEAR"), ("ExpressionSegment", "CASE WHEN id = 1 THEN 'nothing' ELSE 'test' END"), # Nested Case Expressions # https://github.com/sqlfluff/sqlfluff/issues/172 ( "ExpressionSegment", ( "CASE WHEN id = 1 THEN CASE WHEN true THEN 'something' " "ELSE 'nothing' END ELSE 'test' END" ), ), # Casting expressions # https://github.com/sqlfluff/sqlfluff/issues/161 ("ExpressionSegment", "CAST(ROUND(online_sales / 1000.0) AS varchar)"), # Like expressions # https://github.com/sqlfluff/sqlfluff/issues/170 ("ExpressionSegment", "name NOT LIKE '%y'"), # Functions with a space # https://github.com/sqlfluff/sqlfluff/issues/171 ("SelectClauseElementSegment", "MIN (test.id) AS min_test_id"), # Interval literals # https://github.com/sqlfluff/sqlfluff/issues/148 ( "ExpressionSegment", "DATE_ADD(CURRENT_DATE('America/New_York'), INTERVAL 1 year)", ), # Array accessors ("ExpressionSegment", "my_array[1]"), ("ExpressionSegment", "my_array[OFFSET(1)]"), ("ExpressionSegment", "my_array[5:8]"), ("ExpressionSegment", "4 + my_array[OFFSET(1)]"), ("ExpressionSegment", "bits[OFFSET(0)] + 7"), ( "SelectClauseElementSegment", ( "(count_18_24 * bits[OFFSET(0)])" " / audience_size AS relative_abundance" ), ), ("ExpressionSegment", "count_18_24 * bits[OFFSET(0)] + count_25_34"), ( "SelectClauseElementSegment", ( "(count_18_24 * bits[OFFSET(0)] + count_25_34)" " / audience_size AS relative_abundance" ), ), # Dense math expressions # https://github.com/sqlfluff/sqlfluff/issues/178 # https://github.com/sqlfluff/sqlfluff/issues/179 ("SelectStatementSegment", "SELECT t.val/t.id FROM test WHERE id*1.0/id > 0.8"), ("SelectClauseElementSegment", "t.val/t.id"), # Issue with casting raise as part of PR #177 ("SelectClauseElementSegment", "CAST(num AS INT64)"), # Casting as datatype with arguments ("SelectClauseElementSegment", "CAST(num AS numeric(8,4))"), # Wildcard field selection ("SelectClauseElementSegment", "a.*"), ("SelectClauseElementSegment", "a.b.*"), ("SelectClauseElementSegment", "a.b.c.*"), # Default Element Syntax ("SelectClauseElementSegment", "a..c.*"), # Negative Elements ("SelectClauseElementSegment", "-some_variable"), ("SelectClauseElementSegment", "- some_variable"), # Complex Functions ( "ExpressionSegment", "concat(left(uaid, 2), '|', right(concat('0000000', " "SPLIT_PART(uaid, '|', 4)), 10), '|', '00000000')", ), # Notnull and Isnull ("ExpressionSegment", "c is null"), ("ExpressionSegment", "c is not null"), ("SelectClauseElementSegment", "c is null as c_isnull"), ("SelectClauseElementSegment", "c is not null as c_notnull"), # Shorthand casting ("ExpressionSegment", "NULL::INT"), ("SelectClauseElementSegment", "NULL::INT AS user_id"), ("TruncateStatementSegment", "TRUNCATE TABLE test"), ("TruncateStatementSegment", "TRUNCATE test"), ], ) def test__dialect__ansi_specific_segment_parses( segmentref, raw, caplog, dialect_specific_segment_parses ): """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ dialect_specific_segment_parses("ansi", segmentref, raw, caplog) @pytest.mark.parametrize( "segmentref,raw", [ # Check we don't match empty whitespace as a reference ("ObjectReferenceSegment", "\n ") ], ) def test__dialect__ansi_specific_segment_not_match( segmentref, raw, caplog, dialect_specific_segment_not_match ): """Test that specific segments do not match. NB: We're testing the MATCH function not the PARSE function. This is the opposite to the above. """ dialect_specific_segment_not_match("ansi", segmentref, raw, caplog) @pytest.mark.parametrize( "raw,err_locations", [ # Missing Closing bracket. Error should be raised # on the starting bracket. ("SELECT 1 + (2 ", [(1, 12)]), # Set expression with inappropriate ORDER BY or LIMIT. Error # raised on the UNION. ("SELECT * FROM a ORDER BY 1 UNION SELECT * FROM b", [(1, 28)]), ("SELECT * FROM a LIMIT 1 UNION SELECT * FROM b", [(1, 25)]), ("SELECT * FROM a ORDER BY 1 LIMIT 1 UNION SELECT * FROM b", [(1, 36)]), ], ) def test__dialect__ansi_specific_segment_not_parse(raw, err_locations): """Test queries do not parse, with parsing errors raised properly.""" lnt = Linter(dialect="ansi") parsed = lnt.parse_string(raw) assert len(parsed.violations) > 0 print(parsed.violations) locs = [(v.line_no, v.line_pos) for v in parsed.violations] assert locs == err_locations def test__dialect__ansi_is_whitespace(): """Test proper tagging with is_whitespace.""" lnt = Linter(dialect="ansi") with open("test/fixtures/dialects/ansi/select_in_multiline_comment.sql") as f: parsed = lnt.parse_string(f.read()) # Check all the segments that *should* be whitespace, ARE for raw_seg in parsed.tree.get_raw_segments(): if raw_seg.is_type("whitespace", "newline"): assert raw_seg.is_whitespace @pytest.mark.parametrize( "sql_string, indented_joins, meta_loc", [ ( "select field_1 from my_table as alias_1", True, (1, 4, 8, 11, 15, 16, 17, 18, 19), ), ("select field_1 from my_table as alias_1", False, (1, 4, 8, 11, 15, 16, 17)), ( "select field_1 from my_table as alias_1 join foo using (field_1)", True, (1, 4, 8, 11, 15, 17, 18, 20, 24, 25, 27, 30, 32, 34, 35, 36, 37), ), ( "select field_1 from my_table as alias_1 join foo using (field_1)", False, (1, 4, 8, 11, 15, 17, 19, 23, 24, 26, 29, 31, 33, 34, 35), ), ], ) def test__dialect__ansi_parse_indented_joins(sql_string, indented_joins, meta_loc): """Test parsing of meta segments using Conditional works with indented_joins.""" lnt = Linter( config=FluffConfig( configs={"indentation": {"indented_joins": indented_joins}}, overrides={"dialect": "ansi"}, ) ) parsed = lnt.parse_string(sql_string) # Check that there's nothing unparsable assert "unparsable" not in parsed.tree.type_set() # Check all the segments that *should* be metas, ARE. # NOTE: This includes the end of file marker. res_meta_locs = tuple( idx for idx, raw_seg in enumerate(parsed.tree.get_raw_segments()) if raw_seg.is_meta ) assert res_meta_locs == meta_loc sqlfluff-2.3.5/test/dialects/bigquery_test.py000066400000000000000000000066431451700765000213700ustar00rootroot00000000000000"""Tests specific to the snowflake dialect.""" import hypothesis.strategies as st import pytest from hypothesis import example, given, note, settings from sqlfluff.core import FluffConfig from sqlfluff.core.parser import Lexer, Parser @settings(max_examples=100, deadline=None) @given( st.lists( st.tuples(st.sampled_from(["<", "=", ">"]), st.sampled_from(["AND", "OR"])), min_size=1, max_size=30, ) ) @example(data=[("<", "AND")]) @example(data=[(">", "AND")]) @example(data=[("<", "AND"), (">", "AND")]) @example(data=[("<", "AND"), ("=", "AND"), (">", "AND")]) @example(data=[(">", "AND"), ("<", "AND")]) @example(data=[("<", "AND"), ("<", "AND"), (">", "AND")]) @example(data=[(">", "AND"), (">", "AND"), ("<", "AND")]) def test_bigquery_relational_operator_parsing(data): """Tests queries with a diverse mixture of relational operators.""" # Generate a simple SELECT query with relational operators and conjunctions # as specified in 'data'. Note the conjunctions are used as separators # between comparisons, sn the conjunction in the first item is not used. filter = [] for i, (relation, conjunction) in enumerate(data): if i: filter.append(f" {conjunction} ") filter.append(f"a {relation} b") raw = f'SELECT * FROM t WHERE {"".join(filter)}' note(f"query: {raw}") # Load the right dialect config = FluffConfig(overrides=dict(dialect="bigquery")) tokens, lex_vs = Lexer(config=config).lex(raw) # From just the initial parse, check we're all there assert "".join(token.raw for token in tokens) == raw # Check we don't have lexing issues assert not lex_vs # Do the parse WITHOUT lots of logging # The logs get too long here to be useful. We should use # specific segment tests if we want to debug logs. parsed = Parser(config=config).parse(tokens) print(f"Post-parse structure: {parsed.to_tuple(show_raw=True)}") print(f"Post-parse structure: {parsed.stringify()}") # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs @pytest.mark.parametrize( "table_reference, reference_parts", [ ( "bigquery-public-data.pypi.file_downloads", ["bigquery-public-data", "pypi", "file_downloads"], ), ( "`bigquery-public-data.pypi.file_downloads`", ["bigquery-public-data", "pypi", "file_downloads"], ), ("foo.far.bar", ["foo", "far", "bar"]), ("`foo.far.bar`", ["foo", "far", "bar"]), ("a-b.c-d.e-f", ["a-b", "c-d", "e-f"]), ], ) def test_bigquery_table_reference_segment_iter_raw_references( table_reference, reference_parts ): """Tests BigQuery override of TableReferenceSegment.iter_raw_references(). The BigQuery implementation is more complex, handling: - hyphenated table references - quoted or not quoted table references """ query = f"SELECT bar.user_id FROM {table_reference}" config = FluffConfig(overrides=dict(dialect="bigquery")) tokens, lex_vs = Lexer(config=config).lex(query) parsed = Parser(config=config).parse(tokens) for table_reference in parsed.recursive_crawl("table_reference"): actual_reference_parts = [ orp.part for orp in table_reference.iter_raw_references() ] assert reference_parts == actual_reference_parts sqlfluff-2.3.5/test/dialects/conftest.py000066400000000000000000000104731451700765000203230ustar00rootroot00000000000000"""Sharing fixtures to test the dialects.""" import logging import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.parser import BaseSegment, Lexer from sqlfluff.core.parser.context import ParseContext from sqlfluff.core.parser.match_result import MatchResult from sqlfluff.core.parser.matchable import Matchable def lex(raw, config): """Basic parsing for the tests below.""" # Set up the lexer lex = Lexer(config=config) # Lex the string for matching. For a good test, this would # arguably happen as a fixture, but it's easier to pass strings # as parameters than pre-lexed segment strings. segments, vs = lex.lex(raw) assert not vs print(segments) return segments def validate_segment(segmentref, config): """Get and validate segment for tests below.""" Seg = config.get("dialect_obj").ref(segmentref) if isinstance(Seg, Matchable): return Seg try: if issubclass(Seg, BaseSegment): return Seg except TypeError: pass raise TypeError( "{} is not of type Segment or Matchable. Test is invalid.".format(segmentref) ) def _dialect_specific_segment_parses(dialect, segmentref, raw, caplog): """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ config = FluffConfig(overrides=dict(dialect=dialect)) segments = lex(raw, config=config) Seg = validate_segment(segmentref, config=config) # Most segments won't handle the end of file marker. We should strip it. if segments[-1].is_type("end_of_file"): segments = segments[:-1] ctx = ParseContext.from_config(config) with caplog.at_level(logging.DEBUG): result = Seg.match(segments, 0, parse_context=ctx) assert isinstance(result, MatchResult) parsed = result.apply(segments) assert len(parsed) == 1 print(parsed) parsed = parsed[0] # Check we get a good response print(parsed) print(type(parsed)) print(type(parsed.raw)) # Check we're all there. assert parsed.raw == raw # Check that there's nothing un parsable typs = parsed.type_set() assert "unparsable" not in typs def _dialect_specific_segment_not_match(dialect, segmentref, raw, caplog): """Test that specific segments do not match. NB: We're testing the MATCH function not the PARSE function. This is the opposite to the above. """ config = FluffConfig(overrides=dict(dialect=dialect)) segments = lex(raw, config=config) Seg = validate_segment(segmentref, config=config) ctx = ParseContext.from_config(config) with caplog.at_level(logging.DEBUG): match = Seg.match(segments, 0, parse_context=ctx) assert not match def _validate_dialect_specific_statements(dialect, segment_cls, raw, stmt_count): """This validates one or multiple statements against specified segment class. It even validates the number of parsed statements with the number of expected statements. """ lnt = Linter(dialect=dialect) parsed = lnt.parse_string(raw) assert len(parsed.violations) == 0 # Find any unparsable statements typs = parsed.tree.type_set() assert "unparsable" not in typs # Find the expected type in the parsed segment child_segments = [seg for seg in parsed.tree.recursive_crawl(segment_cls.type)] assert len(child_segments) == stmt_count # Check if all child segments are the correct type for c in child_segments: assert isinstance(c, segment_cls) @pytest.fixture() def dialect_specific_segment_parses(): """Fixture to check specific segments of a dialect.""" return _dialect_specific_segment_parses @pytest.fixture() def dialect_specific_segment_not_match(): """Check specific segments of a dialect which will not match to a segment.""" return _dialect_specific_segment_not_match @pytest.fixture() def validate_dialect_specific_statements(): """This validates one or multiple statements against specified segment class. It even validates the number of parsed statements with the number of expected statements. """ return _validate_dialect_specific_statements sqlfluff-2.3.5/test/dialects/dialects_test.py000066400000000000000000000132151451700765000213220ustar00rootroot00000000000000"""Automated tests for all dialects. Any files in the test/fixtures/dialects/ directory will be picked up and automatically tested against the appropriate dialect. """ from typing import Any, Dict, Optional import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.linter import ParsedString, RenderedFile from sqlfluff.core.parser.segments.base import BaseSegment from sqlfluff.core.templaters import TemplatedFile from ..conftest import ( compute_parse_tree_hash, get_parse_fixtures, load_file, make_dialect_path, parse_example_file, ) parse_success_examples, parse_structure_examples = get_parse_fixtures( fail_on_missing_yml=True ) def lex_and_parse(config_overrides: Dict[str, Any], raw: str) -> Optional[ParsedString]: """Performs a Lex and Parse, with cacheable inputs within fixture.""" # Load the right dialect config = FluffConfig(overrides=config_overrides) # Construct rendered file (to skip the templater) templated_file = TemplatedFile.from_string(raw) rendered_file = RenderedFile( templated_file, [], config, {}, templated_file.fname, "utf8", raw, ) # Parse (which includes lexing) linter = Linter(config=config) parsed_file = linter.parse_rendered(rendered_file) if not raw: # Empty file case # We're just checking there aren't exceptions in this case. return None # Check we managed to parse assert parsed_file.tree # From just the initial parse, check we're all there assert "".join(token.raw for token in parsed_file.tree.raw_segments) == raw # Check we don't have lexing or parsing issues assert not parsed_file.violations return parsed_file @pytest.mark.integration @pytest.mark.parse_suite @pytest.mark.parametrize("dialect,file", parse_success_examples) def test__dialect__base_file_parse(dialect, file): """For given test examples, check successful parsing.""" raw = load_file(dialect, file) config_overrides = dict(dialect=dialect) # Use the helper function to avoid parsing twice parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw) if not parsed: # Empty file case return print(f"Post-parse structure: {parsed.tree.to_tuple(show_raw=True)}") print(f"Post-parse structure: {parsed.tree.stringify()}") # Check we're all there. assert parsed.tree.raw == raw # Check that there's nothing unparsable typs = parsed.tree.type_set() assert "unparsable" not in typs # When testing the validity of fixes we re-parse sections of the file. # To ensure this is safe - here we re-parse the unfixed file to ensure # it's still valid even in the case that no fixes have been applied. assert parsed.tree.validate_segment_with_reparse(parsed.config.get("dialect_obj")) @pytest.mark.integration @pytest.mark.fix_suite @pytest.mark.parametrize("dialect,file", parse_success_examples) def test__dialect__base_broad_fix( dialect, file, raise_critical_errors_after_fix, caplog ): """Run a full fix with all rules, in search of critical errors. NOTE: This suite does all of the same things as the above test suite (the `parse_suite`), but also runs fix. In CI, we run the above tests _with_ coverage tracking, but these we run _without_. The purpose of this test is as a more stretching run through a wide range of test sql examples, and the full range of rules to find any potential critical errors raised by any interactions between different dialects and rules. We also do not use DEBUG logging here because it gets _very_ noisy. """ raw = load_file(dialect, file) config_overrides = dict(dialect=dialect) parsed: Optional[ParsedString] = lex_and_parse(config_overrides, raw) if not parsed: # Empty file case return else: print(parsed.tree.stringify()) config = FluffConfig(overrides=config_overrides) linter = Linter(config=config) rule_pack = linter.get_rulepack() # Due to "raise_critical_errors_after_fix" fixture "fix", # will now throw. linter.lint_parsed( parsed, rule_pack, fix=True, ) @pytest.mark.integration @pytest.mark.parse_suite @pytest.mark.parametrize("dialect,sqlfile,code_only,yamlfile", parse_structure_examples) def test__dialect__base_parse_struct( dialect, sqlfile, code_only, yamlfile, yaml_loader, ): """For given test examples, check parsed structure against yaml.""" parsed: Optional[BaseSegment] = parse_example_file(dialect, sqlfile) actual_hash = compute_parse_tree_hash(parsed) # Load the YAML expected_hash, res = yaml_loader(make_dialect_path(dialect, yamlfile)) if not parsed: assert parsed == res return # Verify the current parse tree matches the historic parse tree. parsed_tree = parsed.to_tuple(code_only=code_only, show_raw=True) # The parsed tree consists of a tuple of "File:", followed by the # statements. So only compare when there is at least one statement. if parsed_tree[1] or res[1]: assert parsed_tree == res # Verify the current hash matches the historic hash. The main purpose of # this check is to force contributors to use the generator script to # create these files. New contributors have sometimes been unaware of # this tool and have attempted to craft the YAML files manually. This # can lead to slight differences, confusion, and errors. assert expected_hash == actual_hash, ( "Parse tree hash does not match. Please run " "'python test/generate_parse_fixture_yml.py' to create YAML files " "in test/fixtures/dialects." ) sqlfluff-2.3.5/test/dialects/exasol_test.py000066400000000000000000000010611451700765000210210ustar00rootroot00000000000000"""Tests specific to the exasol dialect.""" import pytest TEST_DIALECT = "exasol" # Develop test to check specific elements against specific grammars. @pytest.mark.parametrize( "segmentref,raw", [ ("RangeOperator", ".."), ("WalrusOperatorSegment", ":="), ("VariableNameSegment", "var1"), ], ) def test_dialect_exasol_specific_segment_parses( segmentref, raw, caplog, dialect_specific_segment_parses ): """Test exasol specific segments.""" dialect_specific_segment_parses(TEST_DIALECT, segmentref, raw, caplog) sqlfluff-2.3.5/test/dialects/postgres_test.py000066400000000000000000000115651451700765000214060ustar00rootroot00000000000000"""Tests specific to the postgres dialect.""" from typing import Callable import pytest from _pytest.logging import LogCaptureFixture from sqlfluff.core import FluffConfig, Linter from sqlfluff.dialects.dialect_postgres_keywords import ( get_keywords, priority_keyword_merge, ) @pytest.mark.parametrize( "segment_reference,raw", [ # AT TIME ZONE constructs ("SelectClauseElementSegment", "c_column AT TIME ZONE 'UTC'"), ("SelectClauseElementSegment", "(c_column AT TIME ZONE 'UTC')::time"), ( "SelectClauseElementSegment", "timestamp with time zone '2021-10-01' AT TIME ZONE 'UTC'", ), # Notnull and Isnull ("ExpressionSegment", "c is null"), ("ExpressionSegment", "c is not null"), ("ExpressionSegment", "c isnull"), ("ExpressionSegment", "c notnull"), ("SelectClauseElementSegment", "c is null as c_isnull"), ("SelectClauseElementSegment", "c is not null as c_notnull"), ("SelectClauseElementSegment", "c isnull as c_isnull"), ("SelectClauseElementSegment", "c notnull as c_notnull"), ("ArrayAccessorSegment", "[2:10]"), ("ArrayAccessorSegment", "[:10]"), ("ArrayAccessorSegment", "[2:]"), ("ArrayAccessorSegment", "[2]"), ], ) def test_dialect_postgres_specific_segment_parses( segment_reference: str, raw: str, caplog: LogCaptureFixture, dialect_specific_segment_parses: Callable, ) -> None: """Test that specific segments parse as expected. NB: We're testing the PARSE function not the MATCH function although this will be a recursive parse and so the match function of SUBSECTIONS will be tested if present. The match function of the parent will not be tested. """ dialect_specific_segment_parses("postgres", segment_reference, raw, caplog) @pytest.mark.parametrize( "raw", [ "SELECT t1.field, EXTRACT(EPOCH FROM t1.sometime) AS myepoch FROM t1", "SELECT t1.field, EXTRACT(EPOCH FROM t1.sometime - t1.othertime) AS myepoch " "FROM t1", ], ) def test_epoch_datetime_unit(raw: str) -> None: """Test the EPOCH keyword for postgres dialect.""" # Don't test for new lines or capitalisation cfg = FluffConfig( configs={"core": {"exclude_rules": "LT12,LT05,LT09", "dialect": "postgres"}} ) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert result.num_violations() == 0 @pytest.mark.parametrize( "raw", [ "SELECT foo AS space FROM t1", "SELECT space.something FROM t1 AS space", ], ) def test_space_is_not_reserved(raw: str) -> None: """Ensure that SPACE is not treated as reserved.""" cfg = FluffConfig( configs={"core": {"exclude_rules": "LT12,LT05,AL07", "dialect": "postgres"}} ) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert result.num_violations() == 0 def test_priority_keyword_merge() -> None: """Test merging on keyword lists works as expected.""" kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] result = priority_keyword_merge(kw_list_1, kw_list_2) expected_result = [("A", "reserved"), ("B", "non-reserved"), ("C", "non-reserved")] assert sorted(result) == sorted(expected_result) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] result_2 = priority_keyword_merge(kw_list_2, kw_list_1) expected_result_2 = [ ("A", "not-keyword"), ("B", "non-reserved"), ("C", "non-reserved"), ] assert sorted(result_2) == sorted(expected_result_2) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] kw_list_2 = [("A", "reserved"), ("C", "non-reserved")] kw_list_3 = [("B", "reserved")] result_3 = priority_keyword_merge(kw_list_2, kw_list_1, kw_list_3) expected_result_3 = [("A", "not-keyword"), ("B", "reserved"), ("C", "non-reserved")] assert sorted(result_3) == sorted(expected_result_3) kw_list_1 = [("A", "not-keyword"), ("B", "non-reserved")] result_4 = priority_keyword_merge(kw_list_1) expected_result_4 = kw_list_1 assert sorted(result_4) == sorted(expected_result_4) def test_get_keywords() -> None: """Test keyword filtering works as expected.""" kw_list = [ ("A", "not-keyword"), ("B", "reserved"), ("C", "non-reserved"), ("D", "not-keyword"), ("E", "non-reserved-(cannot-be-function-or-type)"), ] expected_result = ["A", "D"] assert sorted(get_keywords(kw_list, "not-keyword")) == sorted(expected_result) expected_result_2 = ["C", "E"] assert sorted(get_keywords(kw_list, "non-reserved")) == sorted(expected_result_2) expected_result_3 = ["B"] assert sorted(get_keywords(kw_list, "reserved")) == sorted(expected_result_3) sqlfluff-2.3.5/test/dialects/snowflake_test.py000066400000000000000000000053451451700765000215300ustar00rootroot00000000000000"""Tests specific to the snowflake dialect.""" import pytest from sqlfluff.core import Linter from sqlfluff.core.dialects import dialect_selector # Deprecated: All new tests should be added as .sql and .yml files under # `test/fixtures/dialects/snowflake`. # See test/fixtures/dialects/README.md for more details. @pytest.mark.parametrize( "segment_cls,raw", [ ( "CreateCloneStatementSegment", "create table orders_clone_restore clone orders at (timestamp => " "to_timestamp_tz('04/05/2013 01:02:03', 'mm/dd/yyyy hh24:mi:ss'));", ), ("ShowStatementSegment", "SHOW GRANTS ON ACCOUNT;"), ("ShowStatementSegment", "show tables history in tpch.public;"), ("ShowStatementSegment", "show future grants in schema sales.public;"), ( "ShowStatementSegment", "show replication databases with primary aws_us_west_2.myaccount1.mydb1;", ), ( "ShowStatementSegment", "SHOW TERSE SCHEMAS HISTORY LIKE '%META%' IN DATABASE MYDB STARTS WITH " "'INT' LIMIT 10 FROM 'LAST_SCHEMA';", ), ("ShowStatementSegment", "SHOW GRANTS TO ROLE SECURITYADMIN;"), ("ShowStatementSegment", "SHOW GRANTS OF SHARE MY_SHARE;"), # Testing https://github.com/sqlfluff/sqlfluff/issues/634 ( "SemiStructuredAccessorSegment", "SELECT ID :: VARCHAR as id, OBJ : userId :: VARCHAR as user_id from x", ), ("DropUserStatementSegment", "DROP USER my_user;"), ("AlterSessionStatementSegment", "ALTER SESSION SET TIMEZONE = 'UTC'"), ( "AlterSessionStatementSegment", "ALTER SESSION SET ABORT_DETACHED_QUERY = FALSE", ), ("AlterSessionStatementSegment", "ALTER SESSION SET JSON_INDENT = 5"), ( "AlterSessionStatementSegment", "ALTER SESSION UNSET ERROR_ON_NONDETERMINISTIC_MERGE;", ), ( "AlterSessionStatementSegment", "ALTER SESSION UNSET TIME_OUTPUT_FORMAT, TWO_DIGIT_CENTURY_START;", ), ], ) def test_snowflake_queries(segment_cls, raw, caplog): """Test snowflake specific queries parse.""" lnt = Linter(dialect="snowflake") parsed = lnt.parse_string(raw) print(parsed.violations) assert len(parsed.violations) == 0 # Find any unparsable statements typs = parsed.tree.type_set() assert "unparsable" not in typs # Find the expected type in the parsed segment seg_type = dialect_selector("snowflake").get_segment(segment_cls).type child_segments = [seg for seg in parsed.tree.recursive_crawl(seg_type)] assert len(child_segments) > 0 # If we get here the raw statement was parsed as expected sqlfluff-2.3.5/test/dialects/soql_test.py000066400000000000000000000013561451700765000205130ustar00rootroot00000000000000"""Tests specific to the soql dialect.""" import pytest from sqlfluff.core import FluffConfig, Linter from sqlfluff.core.errors import SQLParseError @pytest.mark.parametrize( "raw", [ "ALTER TABLE foo DROP COLUMN bar\n", "CREATE USER my_user\n", "TRUNCATE TABLE foo\n", "EXPLAIN SELECT Id FROM Contact\n", "DROP TABLE foo\n", "DROP USER my_user\n", ], ) def test_non_selects_unparseable(raw: str) -> None: """Test that non-SELECT commands are not parseable.""" cfg = FluffConfig(configs={"core": {"dialect": "soql"}}) lnt = Linter(config=cfg) result = lnt.lint_string(raw) assert len(result.violations) == 1 assert isinstance(result.violations[0], SQLParseError) sqlfluff-2.3.5/test/dialects/unparsable_test.py000066400000000000000000000135301451700765000216660ustar00rootroot00000000000000"""Test the behaviour of the unparsable routines.""" from typing import Any, Optional import pytest from sqlfluff.core import FluffConfig from sqlfluff.core.parser import BaseSegment, Lexer, RawSegment from sqlfluff.core.parser.context import ParseContext # NOTE: Being specific on the segment ref helps to avoid crazy nesting. @pytest.mark.parametrize( "segmentref,dialect,raw,structure", [ ( # The first here makes sure all of this works from the outer # segment, but for other tests we should aim to be more specific. None, "ansi", "SELECT 1 1", ( "file", ( ( "statement", ( ( "select_statement", ( ( "select_clause", ( ("keyword", "SELECT"), ("whitespace", " "), ( "select_clause_element", (("numeric_literal", "1"),), ), ("whitespace", " "), ( "unparsable", (("numeric_literal", "1"),), ), ), ), ), ), ), ), ), ), ), ( "SelectClauseSegment", "ansi", "SELECT 1 1", ( "select_clause", ( ("keyword", "SELECT"), ("whitespace", " "), ( "select_clause_element", (("numeric_literal", "1"),), ), ("whitespace", " "), # We should get a single unparsable section # here at the end. ( "unparsable", (("numeric_literal", "1"),), ), ), ), ), # This more complex example looks a little strange, but does # reflect current unparsable behaviour. During future work # on the parser, the structure of this result may change # but it should still result in am unparsable section _within_ # the brackets, and not just a totally unparsable statement. ( "SelectClauseSegment", "ansi", "SELECT 1 + (2 2 2)", ( "select_clause", ( ("keyword", "SELECT"), ("whitespace", " "), ( "select_clause_element", ( ( "expression", ( ("numeric_literal", "1"), ("whitespace", " "), ("binary_operator", "+"), ("whitespace", " "), ( "bracketed", ( ("start_bracket", "("), ("expression", (("numeric_literal", "2"),)), ("whitespace", " "), ( "unparsable", ( ("numeric_literal", "2"), ("whitespace", " "), ("numeric_literal", "2"), ), ), ("end_bracket", ")"), ), ), ), ), ), ), ), ), ), ], ) def test_dialect_unparsable( segmentref: Optional[str], dialect: str, raw: str, structure: Any ): """Test the structure of unparsables.""" config = FluffConfig(overrides=dict(dialect=dialect)) # Get the referenced object (if set, otherwise root) if segmentref: Seg = config.get("dialect_obj").ref(segmentref) else: Seg = config.get("dialect_obj").get_root_segment() # We only allow BaseSegments as matchables in this test. assert issubclass(Seg, BaseSegment) assert not issubclass(Seg, RawSegment) # Lex the raw string. lex = Lexer(config=config) segments, vs = lex.lex(raw) assert not vs # Strip the end of file token if it's there. It will # confuse most segments. if segmentref and segments[-1].is_type("end_of_file"): segments = segments[:-1] ctx = ParseContext.from_config(config) # Match against the segment. match = Seg.match(segments, 0, ctx) result = match.apply(segments) assert len(result) == 1 parsed = result[0] assert isinstance(parsed, Seg) assert parsed.to_tuple(show_raw=True) == structure sqlfluff-2.3.5/test/diff_quality_plugin_test.py000066400000000000000000000043731451700765000220050ustar00rootroot00000000000000"""Tests for the SQLFluff integration with the "diff-quality" tool.""" import sys from pathlib import Path import pytest from sqlfluff import diff_quality_plugin from sqlfluff.cli.commands import lint from sqlfluff.utils.testing.cli import invoke_assert_code @pytest.mark.parametrize( "sql_paths,expected_violations_lines", [ (("linter/indentation_errors.sql",), list(range(2, 7))), (("linter/parse_error.sql",), {1}), # NB: This version of the file is in a directory configured # to ignore parsing errors. (("linter/diffquality/parse_error.sql",), []), (tuple(), []), ], ) def test_diff_quality_plugin(sql_paths, expected_violations_lines, monkeypatch): """Test the plugin at least finds errors on the expected lines.""" def execute(command, exit_codes): printable_command_parts = [ c.decode(sys.getfilesystemencoding()) if isinstance(c, bytes) else c for c in command ] result = invoke_assert_code( ret_code=1 if expected_violations_lines else 0, args=[ lint, printable_command_parts[2:], ], ) return result.output, "" # Mock the execute function -- this is an attempt to prevent the CircleCI # coverage check from hanging. (We've seen issues in the past where using # subprocesses caused things to occasionally hang.) monkeypatch.setattr(diff_quality_plugin, "execute", execute) monkeypatch.chdir("test/fixtures/") violation_reporter = diff_quality_plugin.diff_cover_report_quality( options="--processes=1" ) assert len(sql_paths) in (0, 1) sql_paths = [str(Path(sql_path)) for sql_path in sql_paths] violations_dict = violation_reporter.violations_batch(sql_paths) assert isinstance(violations_dict, dict) if expected_violations_lines: assert len(violations_dict[sql_paths[0]]) > 0 violations_lines = {v.line for v in violations_dict[sql_paths[0]]} for expected_line in expected_violations_lines: assert expected_line in violations_lines else: assert ( len(violations_dict[sql_paths[0]]) == 0 if sql_paths else len(violations_dict) == 0 ) sqlfluff-2.3.5/test/fixtures/000077500000000000000000000000001451700765000162005ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/.sqlfluff000066400000000000000000000000321451700765000200160ustar00rootroot00000000000000[sqlfluff] dialect = ansi sqlfluff-2.3.5/test/fixtures/api/000077500000000000000000000000001451700765000167515ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/api/config_override/000077500000000000000000000000001451700765000221155ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/api/config_override/.sqlfluff000066400000000000000000000000451451700765000237370ustar00rootroot00000000000000[sqlfluff] exclude_rules = RF02,RF04 sqlfluff-2.3.5/test/fixtures/api/config_path_test/000077500000000000000000000000001451700765000222715ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/api/config_path_test/config_path_test.json000066400000000000000000000020071451700765000265030ustar00rootroot00000000000000{ "file": { "statement": { "select_statement": { "select_clause": { "keyword": "SELECT", "whitespace": " ", "select_clause_element": { "column_reference": { "naked_identifier": "foo" } } }, "whitespace": " ", "from_clause": { "keyword": "FROM", "whitespace": " ", "from_expression": { "from_expression_element": { "table_expression": { "table_reference": { "naked_identifier": "bar" } } } } } } }, "statement_terminator": ";", "newline": "\n" } } sqlfluff-2.3.5/test/fixtures/api/config_path_test/config_path_test.sql000066400000000000000000000000421451700765000263260ustar00rootroot00000000000000SELECT foo FROM {{ table_name }}; sqlfluff-2.3.5/test/fixtures/api/config_path_test/extra_configs/000077500000000000000000000000001451700765000251245ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/api/config_path_test/extra_configs/.sqlfluff000066400000000000000000000000621451700765000267450ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] table_name=bar sqlfluff-2.3.5/test/fixtures/api/parse_test/000077500000000000000000000000001451700765000211225ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/api/parse_test/parse_test.json000066400000000000000000000043461451700765000241750ustar00rootroot00000000000000{ "file": { "statement": { "select_statement": { "select_clause": [ { "keyword": "SeLEct" }, { "whitespace": " " }, { "select_clause_element": { "wildcard_expression": { "wildcard_identifier": { "star": "*" } } } }, { "comma": "," }, { "whitespace": " " }, { "select_clause_element": { "numeric_literal": "1" } }, { "comma": "," }, { "whitespace": " " }, { "select_clause_element": { "column_reference": { "naked_identifier": "blah" }, "whitespace": " ", "alias_expression": { "keyword": "as", "whitespace": " ", "naked_identifier": "fOO" } } } ], "whitespace": " ", "from_clause": { "keyword": "from", "whitespace": " ", "from_expression": { "from_expression_element": { "table_expression": { "table_reference": { "naked_identifier": "myTable" } } } } } } } } } sqlfluff-2.3.5/test/fixtures/cli/000077500000000000000000000000001451700765000167475ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/cli/.gitignore000066400000000000000000000000531451700765000207350ustar00rootroot00000000000000# Results of fixed tests fail_many_fix.sql sqlfluff-2.3.5/test/fixtures/cli/disable_noqa_test.sql000066400000000000000000000003571451700765000231550ustar00rootroot00000000000000-- Test to verify that --disable-noqa CLI option -- allows for inline noqa comments to be ignored. -- NOTE: two noqas so that we can also test --warn-unused-ignores SELECT col_a AS a, --noqa: CP01 col_b as b --noqa: CP01 FROM t; sqlfluff-2.3.5/test/fixtures/cli/encoding_test.sql000066400000000000000000000001321451700765000223110ustar00rootroot00000000000000-- This file is encoded in utf-8-SIG SELECT foo FROM bar; -- utf-8-SIG comment → sqlfluff-2.3.5/test/fixtures/cli/extra_config_tsql.sql000066400000000000000000000001261451700765000232020ustar00rootroot00000000000000-- Some tsql specifc sql to test config cli argument. BEGIN SELECT 'Weekend'; END sqlfluff-2.3.5/test/fixtures/cli/extra_configs/000077500000000000000000000000001451700765000216025ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/cli/extra_configs/.sqlfluff000066400000000000000000000000321451700765000234200ustar00rootroot00000000000000[sqlfluff] dialect = tsql sqlfluff-2.3.5/test/fixtures/cli/extra_configs/pyproject.toml000066400000000000000000000000461451700765000245160ustar00rootroot00000000000000[tool.sqlfluff.core] dialect = "tsql" sqlfluff-2.3.5/test/fixtures/cli/fail_many.sql000066400000000000000000000001771451700765000214340ustar00rootroot00000000000000-- File which fails on templating and lexing errors. SELECT {{ something }} as trailing_space , 3 + FROM SELECT FROM sqlfluff-2.3.5/test/fixtures/cli/ignore_local_config/000077500000000000000000000000001451700765000227315ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/cli/ignore_local_config/.sqlfluff000066400000000000000000000000401451700765000245460ustar00rootroot00000000000000[sqlfluff] exclude_rules = AL02 sqlfluff-2.3.5/test/fixtures/cli/ignore_local_config/ignore_local_config_test.sql000066400000000000000000000002631451700765000304740ustar00rootroot00000000000000-- This query raises AL02. -- We exlude this rule in the .sqlfluff file and then test -- ignoring this config file via the --ignore-local-config CLI flag. SELECT col_a a FROM foo sqlfluff-2.3.5/test/fixtures/cli/passing_a.sql000066400000000000000000000000541451700765000214330ustar00rootroot00000000000000SELECT tbl.name, tbl.value FROM tbl sqlfluff-2.3.5/test/fixtures/cli/passing_b.sql000066400000000000000000000004751451700765000214430ustar00rootroot00000000000000SELECT tbl.name, b.value, /* This is a block comment */ d.something, -- Which a comment after it tbl.foo, c.val + b.val / -2 AS a_calculation FROM tbl INNER JOIN b ON (tbl.common_id = b.common_id) JOIN c ON (tbl.id = c.id) LEFT JOIN d ON (tbl.id = d.other_id) ORDER BY tbl.name ASC sqlfluff-2.3.5/test/fixtures/cli/unknown_jinja_tag/000077500000000000000000000000001451700765000224545ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/cli/unknown_jinja_tag/.sqlfluff000066400000000000000000000001631451700765000242770ustar00rootroot00000000000000[sqlfluff] dialect = ansi [sqlfluff:templater:jinja] load_macros_from_path = my_macros apply_dbt_builtins = False sqlfluff-2.3.5/test/fixtures/cli/unknown_jinja_tag/my_macros/000077500000000000000000000000001451700765000244455ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/cli/unknown_jinja_tag/my_macros/dbt_test.sql000066400000000000000000000002501451700765000267730ustar00rootroot00000000000000{% test warn_if_odd(model, column_name) %} {{ config(severity = 'warn') }} select * from {{ model }} where ({{ column_name }} % 2) = 1 {% endtest %} sqlfluff-2.3.5/test/fixtures/cli/unknown_jinja_tag/test.sql000066400000000000000000000000111451700765000241440ustar00rootroot00000000000000SELECT 1 sqlfluff-2.3.5/test/fixtures/cli/warning_a.sql000066400000000000000000000002131451700765000214310ustar00rootroot00000000000000-- This file should fail _only_ for spacing around + -- We explicit configure that rule to only warn. -- sqlfluff:warnings:LT01 SELECT 1+2 sqlfluff-2.3.5/test/fixtures/config/000077500000000000000000000000001451700765000174455ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/glob_exclude/000077500000000000000000000000001451700765000221015ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/glob_exclude/.sqlfluff000066400000000000000000000000451451700765000237230ustar00rootroot00000000000000[sqlfluff] exclude_rules = L05*,RF02 sqlfluff-2.3.5/test/fixtures/config/glob_exclude/test.sql000066400000000000000000000004331451700765000236010ustar00rootroot00000000000000 /* Denylist glob test This query violates RF02, AM04, LT13, AM05, and CV06. When we exclude L05*,RF02 in the config we expect RF02, LT13, AM05, and CV06 to be ignored by the linter. - AM05 because it's alias is L051 - CV06 because it's alias is L052 */ SELECT * FROM bar JOIN baz sqlfluff-2.3.5/test/fixtures/config/glob_include/000077500000000000000000000000001451700765000220735ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/glob_include/.sqlfluff000066400000000000000000000002071451700765000237150ustar00rootroot00000000000000[sqlfluff] rules = L05*,RF02 [sqlfluff:rules:convention.terminator] # Semi-colon formatting approach. require_final_semicolon = True sqlfluff-2.3.5/test/fixtures/config/glob_include/test.sql000066400000000000000000000004401451700765000235710ustar00rootroot00000000000000 /* Allowlist glob test This query violates RF02, AM04, LT13, AM05, and CV06. When we include L05*,RF02 in the config we expect RF02, LT13, AM05, and CV06 only to be raised by the linter. - AM05 because it's alias is L051 - CV06 because it's alias is L052 */ SELECT * FROM bar JOIN baz sqlfluff-2.3.5/test/fixtures/config/inheritance_a/000077500000000000000000000000001451700765000222365ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/inheritance_a/.sqlfluff000066400000000000000000000001251451700765000240570ustar00rootroot00000000000000[sqlfluff] dialect=mysql testing_val=foobar testing_int=4 [sqlfluff:bar] foo=barbar sqlfluff-2.3.5/test/fixtures/config/inheritance_a/nested/000077500000000000000000000000001451700765000235205ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/inheritance_a/nested/pyproject.toml000066400000000000000000000000451451700765000264330ustar00rootroot00000000000000[tool.sqlfluff.core] testing_int = 1 sqlfluff-2.3.5/test/fixtures/config/inheritance_a/nested/setup.cfg000066400000000000000000000000531451700765000253370ustar00rootroot00000000000000[sqlfluff] testing_int=5 testing_bar=7.698 sqlfluff-2.3.5/test/fixtures/config/inheritance_a/nested/tox.ini000066400000000000000000000001271451700765000250330ustar00rootroot00000000000000[sqlfluff] testing_int=6 [sqlfluff:bar] foo=foobar [sqlfluff:fnarr:fnarr] foo=foobar sqlfluff-2.3.5/test/fixtures/config/inheritance_a/testing.sql000066400000000000000000000000211451700765000244250ustar00rootroot00000000000000SELECT 1 FROM tblsqlfluff-2.3.5/test/fixtures/config/inheritance_b/000077500000000000000000000000001451700765000222375ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/inheritance_b/example.sql000066400000000000000000000000161451700765000244100ustar00rootroot00000000000000 SELeCT fOosqlfluff-2.3.5/test/fixtures/config/inheritance_b/nested/000077500000000000000000000000001451700765000235215ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/inheritance_b/nested/.sqlfluff000066400000000000000000000000701451700765000253410ustar00rootroot00000000000000[sqlfluff] rules=LT01,LT02,CP01,CP02 exclude_rules=CP03 sqlfluff-2.3.5/test/fixtures/config/inheritance_b/nested/example.sql000066400000000000000000000000161451700765000256720ustar00rootroot00000000000000 SELeCT fOosqlfluff-2.3.5/test/fixtures/config/inheritance_b/tox.ini000066400000000000000000000000631451700765000235510ustar00rootroot00000000000000[sqlfluff] rules=LT01,CP01,CP02 exclude_rules=CP01 sqlfluff-2.3.5/test/fixtures/config/placeholder/000077500000000000000000000000001451700765000217275ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/placeholder/.sqlfluff-placeholder000066400000000000000000000002331451700765000260300ustar00rootroot00000000000000[sqlfluff] testing_val=foobar testing_int=4 [sqlfluff:bar] foo=barbar [sqlfluff:templater:placeholder] param_style = flyway_var flyway:database = test_dbsqlfluff-2.3.5/test/fixtures/config/rules_group_with_exclude/000077500000000000000000000000001451700765000245575ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/rules_group_with_exclude/.sqlfluff000066400000000000000000000000551451700765000264020ustar00rootroot00000000000000[sqlfluff] rules = core exclude_rules = LT04 sqlfluff-2.3.5/test/fixtures/config/rules_group_with_exclude/test.sql000066400000000000000000000004101451700765000262520ustar00rootroot00000000000000 /* Rules group with exclude rules test If some monster wants to run the core rules, but at the same time allow trailing and leading commas, then they can do that now This query should only trigger CP01 */ SELECT field_1, field_2 , field_3 from bar sqlfluff-2.3.5/test/fixtures/config/rules_set_to_none/000077500000000000000000000000001451700765000231735ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/rules_set_to_none/.sqlfluff000066400000000000000000000000301451700765000250070ustar00rootroot00000000000000[sqlfluff] rules = None sqlfluff-2.3.5/test/fixtures/config/rules_set_to_none/test.sql000066400000000000000000000004121451700765000246700ustar00rootroot00000000000000 /* Rules set to none test The previous default setting for rules was 'None' which meant all rules would be run. The new default is 'all', but having rules = None should still run all rules, meaning this query will trigger LT13,AM04, and CP01 */ SELECT * from bar sqlfluff-2.3.5/test/fixtures/config/toml/000077500000000000000000000000001451700765000204205ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/config/toml/pyproject.toml000066400000000000000000000005421451700765000233350ustar00rootroot00000000000000[tool.sqlfluff.core] nocolor = true verbose = 2 testing_int = 5 testing_bar = 7.698 testing_bool = false testing_arr = [ "a", "b", "c" ] testing_inline_table = { x = 1 } rules = ["LT03", "LT09"] [tool.sqlfluff.bar] foo = "foobar" [tool.sqlfluff.fnarr.fnarr] foo = "foobar" [tool.sqlfluff.rules.capitalisation.keywords] capitalisation_policy = "upper" sqlfluff-2.3.5/test/fixtures/dialects/000077500000000000000000000000001451700765000177705ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/README.md000066400000000000000000000026401451700765000212510ustar00rootroot00000000000000# Automated parser tests The `parser` directory contains the files for automated parser tests. This is organised first into folders for each `dialect` (e.g. `ansi`, `mysql`) which each then contain both `.sql` files and `.yml` files. The intent for these folders is that each test should be in the _highest_ dialect that it can be in. i.e. If it can be in the `ansi` dialect then it should be in there. Within each folder, any `.sql` files will be tested that they can successfully parse (i.e. that they do not raise any errors and that the parsed result does not contain any _unparsable_ segments). If there is a `.yml` file with the same filename as the `.sql` file then the _structure_ of the parsed query will also be compared against the structure within that yaml file. ## Adding a new test For best test coverage, add both a `.sql` and `.yml` file. The easiest way to add a `.yml` file is to run: ``` python test/generate_parse_fixture_yml.py [--dialect ] [--filter ] [--new-only] ``` Or via `tox`: ``` tox generate-fixture-yml ``` Or via `tox` with arguments: ``` tox generate-fixture-yml -- --dialect ``` This will regenerate all the parsed structure yml files, or a subset based on the given filters. ## Running parser tests To avoid running the whole test suite with tox after changing parsers, you can instead run: ``` pytest test/dialects/dialects_test.py ``` to save some time. sqlfluff-2.3.5/test/fixtures/dialects/ansi/000077500000000000000000000000001451700765000207225ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/ansi/.sqlfluff000066400000000000000000000000321451700765000225400ustar00rootroot00000000000000[sqlfluff] dialect = ansi sqlfluff-2.3.5/test/fixtures/dialects/ansi/alter_sequence.sql000066400000000000000000000002721451700765000244430ustar00rootroot00000000000000ALTER SEQUENCE foo INCREMENT BY 1; ALTER SEQUENCE foo MAXVALUE 7 NO minvalue; ALTER SEQUENCE foo NOCACHE CYCLE; ALTER SEQUENCE foo NOORDER CACHE 5 NOCYCLE; ALTER SEQUENCE foo ORDER; sqlfluff-2.3.5/test/fixtures/dialects/ansi/alter_sequence.yml000066400000000000000000000036221451700765000244470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8702692b6970efbc664e56a683b682412dff7f788392da48f53763ce454394ea file: - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: MAXVALUE numeric_literal: '7' - alter_sequence_options_segment: - keyword: 'NO' - keyword: minvalue - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: NOCACHE - alter_sequence_options_segment: keyword: CYCLE - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: NOORDER - alter_sequence_options_segment: keyword: CACHE numeric_literal: '5' - alter_sequence_options_segment: keyword: NOCYCLE - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: ORDER - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/alter_table_rename_to.sql000066400000000000000000000000641451700765000257520ustar00rootroot00000000000000ALTER TABLE old_table_name RENAME TO new_table_name;sqlfluff-2.3.5/test/fixtures/dialects/ansi/alter_table_rename_to.yml000066400000000000000000000012361451700765000257560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6a71f21b89678cde482692a03d2c37005c6266d69de63a55322abcd5b953996c file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: old_table_name - keyword: RENAME - keyword: TO - table_reference: naked_identifier: new_table_name statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/ansi_cast_with_whitespaces.sql000066400000000000000000000021501451700765000270370ustar00rootroot00000000000000-- ansi_cast_with_whitespaces.sql /* Several valid queries where there is whitespace surrounding the ANSI cast operator (::) */ -- query from https://github.com/sqlfluff/sqlfluff/issues/2720 SELECT amount_of_honey :: FLOAT FROM bear_inventory; -- should be able to support an arbitrary amount of whitespace SELECT amount_of_honey :: FLOAT FROM bear_inventory; SELECT amount_of_honey:: FLOAT FROM bear_inventory; SELECT amount_of_honey ::FLOAT FROM bear_inventory; -- should support a wide variety of typecasts SELECT amount_of_honey :: time FROM bear_inventory; SELECT amount_of_honey :: text FROM bear_inventory; SELECT amount_of_honey :: VARCHAR( 512 ) FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMPTZ FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMP WITHOUT TIME ZONE FROM bear_inventory; -- should support casts with arbitrary amount of whitespace in join statements SELECT bi.amount_of_honey FROM bear_inventory bi LEFT JOIN favorite_cola fc ON fc.bear_id :: VARCHAR(512) = bi.bear_id ::VARCHAR(512) WHERE fc.favorite_cola = 'RC Cola'; sqlfluff-2.3.5/test/fixtures/dialects/ansi/ansi_cast_with_whitespaces.yml000066400000000000000000000200641451700765000270450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 98901f41139ba11863745752a671f49b443c94a77a7fd6ece89c6f40ffb33aff file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: text from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: data_type_identifier: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: bi - dot: . - naked_identifier: amount_of_honey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory alias_expression: naked_identifier: bi join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: favorite_cola alias_expression: naked_identifier: fc - join_on_condition: keyword: 'ON' expression: - cast_expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - cast_expression: column_reference: - naked_identifier: bi - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: favorite_cola comparison_operator: raw_comparison_operator: '=' quoted_literal: "'RC Cola'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/arithmetic_a.sql000066400000000000000000000013561451700765000241010ustar00rootroot00000000000000SELECT 1 + (2 * 3) >= 4 + 6+13 as val; SELECT 1 + ~(~2 * 3) >= 4 + ~6+13 as val; SELECT -1; SELECT -1 + 5; SELECT ~1; SELECT -1 + ~5; SELECT 4 & ~8 | 16; SELECT 8 + ~(3); SELECT 8 | ~ ~ ~4; SELECT 1 * -(5); SELECT 1 * -5; SELECT 1 * - - - 5; SELECT 1 * - - - (5); SELECT 1 * + + (5); SELECT 1 * - - - func(5); SELECT 1 * ~ ~ ~ func(5); SELECT 1 * +(5); SELECT 1 * +5; SELECT 1 * + + 5; SELECT FALSE AND NOT (TRUE); SELECT FALSE AND NOT NOT NOT (TRUE); -- parses middle NOT as column ref SELECT FALSE AND NOT (TRUE); SELECT FALSE AND NOT func(5); SELECT 'abc' LIKE - - 5; -- PG can parse this ok, and then fail due to data type mismatch SELECT 'abc' LIKE ~ ~ 5; -- PG can parse this ok, and then fail due to data type mismatch sqlfluff-2.3.5/test/fixtures/dialects/ansi/arithmetic_a.yml000066400000000000000000000253111451700765000241000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 68162fcf003cf96cc1361038a52f8a6da37d5bee87037883ce33a4f2c011cf6f file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - bracketed: start_bracket: ( expression: - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '4' - binary_operator: + - numeric_literal: '6' - binary_operator: + - numeric_literal: '13' alias_expression: keyword: as naked_identifier: val - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - tilde: '~' - bracketed: start_bracket: ( expression: - tilde: '~' - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '4' - binary_operator: + - tilde: '~' - numeric_literal: '6' - binary_operator: + - numeric_literal: '13' alias_expression: keyword: as naked_identifier: val - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: tilde: '~' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - tilde: '~' - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '4' - binary_operator: ampersand: '&' - tilde: '~' - numeric_literal: '8' - binary_operator: pipe: '|' - numeric_literal: '16' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '8' binary_operator: + tilde: '~' bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '8' - binary_operator: pipe: '|' - tilde: '~' - tilde: '~' - tilde: '~' - numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '1' binary_operator: '*' sign_indicator: '-' bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - sign_indicator: '-' - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: + - sign_indicator: + - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - sign_indicator: '-' - function: function_name: function_name_identifier: func bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - tilde: '~' - tilde: '~' - tilde: '~' - function: function_name: function_name_identifier: func bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '1' binary_operator: '*' sign_indicator: + bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: sign_indicator: + numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: + - numeric_literal: sign_indicator: + numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'FALSE' - binary_operator: AND - keyword: NOT - keyword: NOT - keyword: NOT - bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT function: function_name: function_name_identifier: func bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'abc'" keyword: LIKE sign_indicator: '-' numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'abc'" - keyword: LIKE - tilde: '~' - tilde: '~' - numeric_literal: '5' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/bracket_in_comment.sql000066400000000000000000000000271451700765000252650ustar00rootroot00000000000000select a /* ) */ from bsqlfluff-2.3.5/test/fixtures/dialects/ansi/bracket_in_comment.yml000066400000000000000000000013731451700765000252740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c4b65a8593c78cb67ed7ac4ca38869092f0274857b76255e5a79d0499a1a50fb file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b sqlfluff-2.3.5/test/fixtures/dialects/ansi/bracketed_statement.sql000066400000000000000000000000541451700765000254520ustar00rootroot00000000000000(SELECT 1); ((SELECT 1)); (((SELECT 1))); sqlfluff-2.3.5/test/fixtures/dialects/ansi/bracketed_statement.yml000066400000000000000000000024701451700765000254600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7028eaa159caaa519f3691bc92ad59783a0e192e83b0435f5633b589c6f7847 file: - statement: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: bracketed: start_bracket: ( bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/comments.sql000066400000000000000000000001221451700765000232630ustar00rootroot00000000000000-- This is a comment /* So is this */ /* This is a multiple line comment */ sqlfluff-2.3.5/test/fixtures/dialects/ansi/comments.yml000066400000000000000000000006171451700765000232760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fdda373cd9cd649f82a9c5cf7ba9e290375c0ceae29477b0bad5a25f24a52ae3 file: null sqlfluff-2.3.5/test/fixtures/dialects/ansi/commit.sql000066400000000000000000000000071451700765000227300ustar00rootroot00000000000000commit sqlfluff-2.3.5/test/fixtures/dialects/ansi/commit.yml000066400000000000000000000007101451700765000227330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 524dc2c43882d88047d23a738fdc9a5fa41c1c595e4b2e3dba6e192f7c424244 file: statement: transaction_statement: keyword: commit sqlfluff-2.3.5/test/fixtures/dialects/ansi/commit_and_no_chain.sql000066400000000000000000000000241451700765000254070ustar00rootroot00000000000000commit and no chain sqlfluff-2.3.5/test/fixtures/dialects/ansi/commit_and_no_chain.yml000066400000000000000000000010041451700765000254100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d10b732862da9211f19d4b99e56ea99c86b1a5156fc2fabe7b8deaff50c2b78 file: statement: transaction_statement: - keyword: commit - keyword: and - keyword: 'no' - keyword: chain sqlfluff-2.3.5/test/fixtures/dialects/ansi/commit_work.sql000066400000000000000000000000141451700765000237700ustar00rootroot00000000000000commit work sqlfluff-2.3.5/test/fixtures/dialects/ansi/commit_work.yml000066400000000000000000000007341451700765000240030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7bb7ca6fe94332bd83031c85e7adc4cf063c66f926f54b6a85e17ba258015462 file: statement: transaction_statement: - keyword: commit - keyword: work sqlfluff-2.3.5/test/fixtures/dialects/ansi/commit_work_and_no_chain.sql000066400000000000000000000000311451700765000264470ustar00rootroot00000000000000commit work and no chain sqlfluff-2.3.5/test/fixtures/dialects/ansi/commit_work_and_no_chain.yml000066400000000000000000000010301451700765000264510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4b90d12608e387d7607439e59f911f13161235e52b413201f43b79e1f264af8c file: statement: transaction_statement: - keyword: commit - keyword: work - keyword: and - keyword: 'no' - keyword: chain sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_cast.sql000066400000000000000000000014201451700765000237150ustar00rootroot00000000000000CREATE CAST (int AS bool) WITH FUNCTION fname; CREATE CAST (int AS bool) WITH FUNCTION fname AS ASSIGNMENT; CREATE CAST (int AS bool) WITH FUNCTION fname(); CREATE CAST (int AS bool) WITH FUNCTION fname(bool); CREATE CAST (int AS bool) WITH FUNCTION sch.fname(int, bool) AS ASSIGNMENT; CREATE CAST (udt_1 AS udt_2) WITH FUNCTION fname(udt_1, udt_2) FOR udt_3; CREATE CAST (sch.udt_1 AS sch.udt_2) WITH FUNCTION sch.fname(sch.udt_1, sch.udt_2) FOR sch.udt_3; CREATE CAST (int AS bool) WITH ROUTINE fname(); CREATE CAST (int AS bool) WITH PROCEDURE fname(); CREATE CAST (int AS bool) WITH METHOD fname(); CREATE CAST (int AS bool) WITH INSTANCE METHOD fname(); CREATE CAST (int AS bool) WITH STATIC METHOD fname(); CREATE CAST (int AS bool) WITH CONSTRUCTOR METHOD fname(); sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_cast.yml000066400000000000000000000174371451700765000237360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54848c7eae3e3c3c50ab04feca39d8798be72e8e24009c02366c2b4083750362 file: - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: bool end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: naked_identifier: sch dot: . function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: int - comma: ',' - data_type: data_type_identifier: bool - end_bracket: ) - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - keyword: AS - data_type: data_type_identifier: udt_2 - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - comma: ',' - data_type: data_type_identifier: udt_2 - end_bracket: ) - keyword: FOR - object_reference: naked_identifier: udt_3 - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - keyword: AS - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: naked_identifier: sch dot: . function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - comma: ',' - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - keyword: FOR - object_reference: - naked_identifier: sch - dot: . - naked_identifier: udt_3 - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: ROUTINE - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: PROCEDURE - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: METHOD - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: INSTANCE - keyword: METHOD - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: STATIC - keyword: METHOD - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: WITH - keyword: CONSTRUCTOR - keyword: METHOD - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_database.yml000066400000000000000000000000001451700765000245220ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_database_a.sql000066400000000000000000000000331451700765000250260ustar00rootroot00000000000000create database my_databasesqlfluff-2.3.5/test/fixtures/dialects/ansi/create_database_a.yml000066400000000000000000000010441451700765000250330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e42524c286cd1f2d8fd1e82b6332662d677b62505552f1b2cc0664940f91052 file: statement: create_database_statement: - keyword: create - keyword: database - database_reference: naked_identifier: my_database sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_database_if_not_exists.sql000066400000000000000000000000511451700765000274630ustar00rootroot00000000000000create database if not exists my_databasesqlfluff-2.3.5/test/fixtures/dialects/ansi/create_database_if_not_exists.yml000066400000000000000000000011371451700765000274730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7a2ac2ab01722015628232b7c24c213d6332bd20f90d7306ce32faf0694f8a16 file: statement: create_database_statement: - keyword: create - keyword: database - keyword: if - keyword: not - keyword: exists - database_reference: naked_identifier: my_database sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_function.sql000066400000000000000000000001651451700765000246150ustar00rootroot00000000000000CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; DROP FUNCTION add; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_function.yml000066400000000000000000000022761451700765000246240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6657ccb17c21230dcd44150c0464dd5e2144de9a4afe006af75c665ef55cf456 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: integer - comma: ',' - data_type: data_type_identifier: integer - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - quoted_literal: "'select $1 + $2;'" - keyword: LANGUAGE - naked_identifier: SQL - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: add - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_function_no_args.sql000066400000000000000000000001211451700765000263150ustar00rootroot00000000000000CREATE FUNCTION add() RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_function_no_args.yml000066400000000000000000000016001451700765000263220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 077a98298f8ca20b74493e61089ae36b827d9c8de2e4858a507e9257619b2b41 file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - quoted_literal: "'select $1 + $2;'" - keyword: LANGUAGE - naked_identifier: SQL statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_index_if_not_exists.sql000066400000000000000000000001251451700765000270300ustar00rootroot00000000000000CREATE INDEX IF NOT EXISTS transaction_updated ON transaction_master (last_updated); sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_index_if_not_exists.yml000066400000000000000000000015331451700765000270360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb1585edfd71857739ed70dbec2feaf89c1615bfe41a655c8d05374f9454d739 file: statement: create_index_statement: - keyword: CREATE - keyword: INDEX - keyword: IF - keyword: NOT - keyword: EXISTS - index_reference: naked_identifier: transaction_updated - keyword: 'ON' - table_reference: naked_identifier: transaction_master - bracketed: start_bracket: ( index_column_definition: naked_identifier: last_updated end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_index_simple.sql000066400000000000000000000002231451700765000254430ustar00rootroot00000000000000CREATE INDEX transaction_updated ON transaction_master(last_updated); CREATE UNIQUE INDEX transaction_updated ON transaction_master(last_updated); sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_index_simple.yml000066400000000000000000000023141451700765000254500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e305bc89e96ea473198a1c092d3f15dd7764269593bfdb0e6b826e7d17f77649 file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: transaction_updated - keyword: 'ON' - table_reference: naked_identifier: transaction_master - bracketed: start_bracket: ( index_column_definition: naked_identifier: last_updated end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: transaction_updated - keyword: 'ON' - table_reference: naked_identifier: transaction_master - bracketed: start_bracket: ( index_column_definition: naked_identifier: last_updated end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_model_options.sql000066400000000000000000000003121451700765000256350ustar00rootroot00000000000000CREATE OR REPLACE MODEL model3 OPTIONS ( MODEL_TYPE='LOGISTIC_REG', AUTO_CLASS_WEIGHTS=TRUE, INPUT_LABEL_COLS = ['label_str'] ) AS SELECT a, b FROM table1 WHERE training = 1 sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_model_options.yml000066400000000000000000000037041451700765000256470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 85eb5ea66a830ebcaf88fe4de952e6b43c8c149166cbf164f5f2b04ad969b9c7 file: statement: create_model_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MODEL - object_reference: naked_identifier: model3 - keyword: OPTIONS - bracketed: - start_bracket: ( - parameter: MODEL_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'LOGISTIC_REG'" - comma: ',' - parameter: AUTO_CLASS_WEIGHTS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - parameter: INPUT_LABEL_COLS - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'label_str'" end_square_bracket: ']' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: training comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_role.sql000066400000000000000000000000241451700765000237230ustar00rootroot00000000000000CREATE ROLE foo_rolesqlfluff-2.3.5/test/fixtures/dialects/ansi/create_role.yml000066400000000000000000000010251451700765000237270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8d37ad3c99e87b6413a9653853c23aa004554e27e1e3f7832f6c531a582920c8 file: statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: foo_role sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_schema.yml000066400000000000000000000000001451700765000242160ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_schema_a.sql000066400000000000000000000000271451700765000245250ustar00rootroot00000000000000create schema my_schemasqlfluff-2.3.5/test/fixtures/dialects/ansi/create_schema_a.yml000066400000000000000000000010341451700765000245260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b2a34157972c496f8de49616590e4c505278c27cb7f885e6091e323d83f9f630 file: statement: create_schema_statement: - keyword: create - keyword: schema - schema_reference: naked_identifier: my_schema sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_schema_if_not_exists.sql000066400000000000000000000000451451700765000271620ustar00rootroot00000000000000create schema if not exists my_schemasqlfluff-2.3.5/test/fixtures/dialects/ansi/create_schema_if_not_exists.yml000066400000000000000000000011271451700765000271660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55fa276eca5e297488dafef7ebdf134e099cc4ea04e210d37c6f1e6c111241ca file: statement: create_schema_statement: - keyword: create - keyword: schema - keyword: if - keyword: not - keyword: exists - schema_reference: naked_identifier: my_schema sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_sequence.sql000066400000000000000000000004571451700765000246040ustar00rootroot00000000000000CREATE SEQUENCE foo; CREATE SEQUENCE foo INCREMENT BY 3; CREATE SEQUENCE foo MINVALUE 5 NO MAXVALUE; CREATE SEQUENCE foo NO MINVALUE MAXVALUE 12; CREATE SEQUENCE foo INCREMENT BY 5 START WITH 8 CACHE 4; CREATE SEQUENCE foo NOCACHE; CREATE SEQUENCE foo NOCYCLE ORDER; CREATE SEQUENCE foo NOORDER; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_sequence.yml000066400000000000000000000054051451700765000246040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0ed3f2e276d9585288a10f1422f2a387ea7fe6cc60fe9c82c5c93ecbb44c6cc file: - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '3' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: MINVALUE numeric_literal: '5' - create_sequence_options_segment: - keyword: 'NO' - keyword: MAXVALUE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: 'NO' - keyword: MINVALUE - create_sequence_options_segment: keyword: MAXVALUE numeric_literal: '12' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '5' - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '8' - create_sequence_options_segment: keyword: CACHE numeric_literal: '4' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: NOCACHE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: NOCYCLE - create_sequence_options_segment: keyword: ORDER - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: NOORDER - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table.sql000066400000000000000000000004171451700765000240570ustar00rootroot00000000000000-- Test various forms of quoted data types CREATE TABLE foo ( pk int PRIMARY KEY, quoted_name "custom udt", qualified_name sch.qualified, quoted_qualified "my schema".qualified, more_quoted "my schema"."custom udt", quoted_udt sch."custom udt" ); sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table.yml000066400000000000000000000034501451700765000240610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4d508e6a17455867f424bf23fbf6c04cd8ec300f8aa8b13b6920ee8199a9944b file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: pk data_type: data_type_identifier: int column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: quoted_name data_type: quoted_identifier: '"custom udt"' - comma: ',' - column_definition: naked_identifier: qualified_name data_type: naked_identifier: sch dot: . data_type_identifier: qualified - comma: ',' - column_definition: naked_identifier: quoted_qualified data_type: quoted_identifier: '"my schema"' dot: . data_type_identifier: qualified - comma: ',' - column_definition: naked_identifier: more_quoted data_type: - quoted_identifier: '"my schema"' - dot: . - quoted_identifier: '"custom udt"' - comma: ',' - column_definition: naked_identifier: quoted_udt data_type: naked_identifier: sch dot: . quoted_identifier: '"custom udt"' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_a_c1_c2.sql000066400000000000000000000000531451700765000253220ustar00rootroot00000000000000create table table1 (c1 SMALLINT, c2 DATE) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_a_c1_c2.yml000066400000000000000000000015321451700765000253270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b259c9e7889ddd71fda8e2d7d3e303d69184a38db1c02676ecdb69415c4e2039 file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_a_column_constraints.sql000066400000000000000000000012101451700765000303530ustar00rootroot00000000000000create table table1 ( c1 INT NOT NULL, c2 INT NULL DEFAULT 1, c3 INT PRIMARY KEY, c4 INT UNIQUE, c5 INT REFERENCES table2, c6 INT REFERENCES table2 (c6_other), c6 INT REFERENCES table2 (c6_other) MATCH FULL, c6 INT REFERENCES table2 (c6_other) MATCH PARTIAL, c6 INT REFERENCES table2 (c6_other) MATCH SIMPLE, c6 INT REFERENCES table2 (c6_other) ON DELETE NO ACTION, c6 INT REFERENCES table2 (c6_other) ON UPDATE SET NULL, c6 INT REFERENCES table2 (c6_other) ON DELETE RESTRICT ON UPDATE CASCADE, c7 INT NOT NULL DEFAULT 1 UNIQUE REFERENCES table3 (c7_other), c8 INT NOT NULL DEFAULT 1::INT ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_a_column_constraints.yml000066400000000000000000000145631451700765000303740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d7769d9eb54abd09aa301054592b03b6258fb68d97f01f894f52b65d67cb820e file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: c2 - data_type: data_type_identifier: INT - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT numeric_literal: '1' - comma: ',' - column_definition: naked_identifier: c3 data_type: data_type_identifier: INT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: c4 data_type: data_type_identifier: INT column_constraint_segment: keyword: UNIQUE - comma: ',' - column_definition: naked_identifier: c5 data_type: data_type_identifier: INT column_constraint_segment: keyword: REFERENCES table_reference: naked_identifier: table2 - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: keyword: REFERENCES table_reference: naked_identifier: table2 bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: MATCH - keyword: FULL - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: MATCH - keyword: PARTIAL - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: MATCH - keyword: SIMPLE - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: 'NO' - keyword: ACTION - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: SET - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: c6 data_type: data_type_identifier: INT column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: c6_other end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - comma: ',' - column_definition: - naked_identifier: c7 - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT numeric_literal: '1' - column_constraint_segment: keyword: UNIQUE - column_constraint_segment: keyword: REFERENCES table_reference: naked_identifier: table3 bracketed: start_bracket: ( column_reference: naked_identifier: c7_other end_bracket: ) - comma: ',' - column_definition: - naked_identifier: c8 - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT cast_expression: numeric_literal: '1' casting_operator: '::' data_type: data_type_identifier: INT - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_a_pk_unique_fk_constraints.sql000066400000000000000000000002351451700765000315440ustar00rootroot00000000000000create table table1 ( c1 INT, c2 INT, c3 INT, PRIMARY KEY (c1), UNIQUE (c2, c3), FOREIGN KEY (c2, c3) REFERENCES table2 (c2_, c3_) ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_a_pk_unique_fk_constraints.yml000066400000000000000000000042011451700765000315430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b3b030f95d03b4e596d31d6c865d1aa67ac01dd005d0a5929bd77ba56946c7d6 file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c3 data_type: data_type_identifier: INT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c1 end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2_ - comma: ',' - column_reference: naked_identifier: c3_ - end_bracket: ) - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_as.sql000066400000000000000000000000661451700765000245420ustar00rootroot00000000000000CREATE OR REPLACE TABLE t2 AS SELECT a, b FROM t1sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_as.yml000066400000000000000000000021021451700765000245350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74ba87761cac0f656c136be0907cf0c68088419fb3524ed5bdc65a4714ca4bf2 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_as_select_cte_no_parentheses.sql000066400000000000000000000001551451700765000320300ustar00rootroot00000000000000CREATE TABLE final_rows AS WITH source_table AS ( SELECT * FROM source_data ) SELECT * FROM source_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_as_select_cte_no_parentheses.yml000066400000000000000000000033131451700765000320310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1659f3f5ab4f99e7621bf54f474960ca57dd88541f828ecc8f1d7fcfc88a175 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: final_rows - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: source_table keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_data end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_as_select_cte_parentheses.sql000066400000000000000000000002051451700765000313300ustar00rootroot00000000000000CREATE TABLE final_rows AS ( WITH source_table AS ( SELECT * FROM source_data ) SELECT * FROM source_table ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_as_select_cte_parentheses.yml000066400000000000000000000035241451700765000313410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41cc71e536723cf73e74d0c075a76665a111a7de29762ce7ad9a1abba8d9a780 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: final_rows - keyword: AS - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: source_table keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_data end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_auto_increment.sql000066400000000000000000000000551451700765000271510ustar00rootroot00000000000000CREATE TABLE a ( id INT AUTO_INCREMENT ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_auto_increment.yml000066400000000000000000000014201451700765000271500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6defdfc61f69a5f0424a15eae97b42d22a0806c32c0e207417916c7d1370024 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: keyword: AUTO_INCREMENT end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_column_comment.sql000066400000000000000000000001001451700765000271430ustar00rootroot00000000000000CREATE TABLE a ( id VARCHAR(100) COMMENT 'Column comment' ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_column_comment.yml000066400000000000000000000017751451700765000271700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b4f5a4ef29cb9b0822653e329beb20cb29f4b504edf94d688056a0039b317f4c file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'Column comment'" end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_column_constraint.sql000066400000000000000000000005471451700765000277040ustar00rootroot00000000000000CREATE TABLE users ( username TEXT, age INT CHECK(age > 18) ); CREATE TABLE users ( username TEXT, age INT CHECK(age IS NOT NULL) ); CREATE TABLE Persons ( ID int NOT NULL, LastName varchar(255) NOT NULL, FirstName varchar(255), Age int, City varchar(255), CONSTRAINT CHK_Person CHECK (Age>=18 AND City='Sandnes') ); sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_column_constraint.yml000066400000000000000000000105751451700765000277100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb5ab21c6366260a274d74adaa1ddd3577f6f3cfb09290ad9f5eb9f2560f0933 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_definition: naked_identifier: username data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '>' numeric_literal: '18' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_definition: naked_identifier: username data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: age data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: age - keyword: IS - keyword: NOT - null_literal: 'NULL' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: Persons - bracketed: - start_bracket: ( - column_definition: naked_identifier: ID data_type: data_type_identifier: int column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: LastName data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: FirstName data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - comma: ',' - column_definition: naked_identifier: Age data_type: data_type_identifier: int - comma: ',' - column_definition: naked_identifier: City data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - comma: ',' - column_definition: naked_identifier: CONSTRAINT data_type: data_type_identifier: CHK_Person column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: Age - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '18' - binary_operator: AND - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Sandnes'" end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_constraint_default.sql000066400000000000000000000001641451700765000300260ustar00rootroot00000000000000BEGIN TRANSACTION; CREATE TABLE IF NOT EXISTS "tbl" ( "col" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ); COMMIT; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_constraint_default.yml000066400000000000000000000022651451700765000300340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 149db629af0164d27dc37ef59df84f570101734b0f14157185888321791fddb1 file: - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '"tbl"' - bracketed: start_bracket: ( column_definition: - quoted_identifier: '"col"' - data_type: keyword: TIMESTAMP - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_constraint_reference_option.sql000066400000000000000000000005141451700765000317270ustar00rootroot00000000000000CREATE TABLE b ( b INT NOT NULL, c INT NOT NULL, d INT NOT NULL, CONSTRAINT c_b FOREIGN KEY (b) REFERENCES a(b) ON DELETE RESTRICT ON UPDATE NO ACTION, CONSTRAINT c_d FOREIGN KEY (d) REFERENCES a(d) ON UPDATE CASCADE ON DELETE SET NULL, CONSTRAINT c_c FOREIGN KEY (c) REFERENCES a(c) ON DELETE SET DEFAULT ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_constraint_reference_option.yml000066400000000000000000000064621451700765000317410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4dcc841678a21a7fe42a1604833bdc7089d1f015e3041884d035b203d3e11b32 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: b - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: d data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c_b - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - keyword: 'ON' - keyword: UPDATE - keyword: 'NO' - keyword: ACTION - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c_d - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: d end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_reference: naked_identifier: d end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - keyword: 'ON' - keyword: DELETE - keyword: SET - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c_c - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: c end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_reference: naked_identifier: c end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: SET - keyword: DEFAULT - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_default_function.sql000066400000000000000000000000651451700765000274670ustar00rootroot00000000000000CREATE TABLE a ( ts TIMESTAMP DEFAULT GETDATE() )sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_default_function.yml000066400000000000000000000017001451700765000274660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5fba003cbd9f65cfc9ec87ce57f3046b77c5dcbf6946999aacd4e59b49bd7313 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: ts data_type: keyword: TIMESTAMP column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_double_precision.sql000066400000000000000000000000561451700765000274630ustar00rootroot00000000000000CREATE TABLE test ( angle double precision ); sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_double_precision.yml000066400000000000000000000013741451700765000274710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 922fa2632db9df21e02a75ae5a704665060b2f2726e5c5c703b0550430da3250 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test - bracketed: start_bracket: ( column_definition: naked_identifier: angle data_type: - keyword: double - keyword: precision end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_table_comment.sql000066400000000000000000000000771451700765000267520ustar00rootroot00000000000000CREATE TABLE a ( id VARCHAR(100) ) COMMENT 'Table comment' sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_table_comment.yml000066400000000000000000000017051451700765000267530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da6f038cd44a46433364b3711dd06159873265fb4432c8c90053a97068b9b4b6 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) end_bracket: ) - comment_clause: keyword: COMMENT quoted_literal: "'Table comment'" sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_varchar.sql000066400000000000000000000000471451700765000255640ustar00rootroot00000000000000CREATE TABLE a ( id VARCHAR(100) ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_table_varchar.yml000066400000000000000000000015541451700765000255720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddbfbeee0227954476881c7b0236fdc0ca7fa47d1401c2911ed5508aff69f55f file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_trigger.sql000066400000000000000000000006621451700765000244350ustar00rootroot00000000000000CREATE TRIGGER foo BEFORE INSERT ON bar EXECUTE PROCEDURE proc(args); CREATE TRIGGER foo BEFORE INSERT on bar EXECUTE PROCEDURE proc(args); CREATE TRIGGER foo AFTER UPDATE OF bar, baz ON bar EXECUTE PROCEDURE proc(args); CREATE TRIGGER foo INSTEAD OF DELETE ON bar FROM baz DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE proc(args); CREATE TRIGGER foo BEFORE INSERT ON bar WHEN (a=b) EXECUTE PROCEDURE proc(args); sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_trigger.yml000066400000000000000000000070241451700765000244360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 77dfeed770d7d6ba167bd221b10d00dfcb9e2c78a9c3b38975352b733f9e00d3 file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'on' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: AFTER - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: INSTEAD - keyword: OF - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: FROM - table_reference: naked_identifier: baz - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: WHEN - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - keyword: EXECUTE - keyword: PROCEDURE - function_name_identifier: proc - bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_user.sql000066400000000000000000000000251451700765000237410ustar00rootroot00000000000000CREATE USER foo_user sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_user.yml000066400000000000000000000010251451700765000237440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 04fed1ca81b7b7e7a80eb72cffecba963fe5218190b64a4347770e44dd839146 file: statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: foo_user sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_view_a.sql000066400000000000000000000004611451700765000242410ustar00rootroot00000000000000CREATE VIEW a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id); CREATE OR REPLACE VIEW vw_appt_latest AS ( WITH most_current as ( SELECT da.* FROM dim_appt da WHERE da.current_appt_id IS NULL ) SELECT * from most_current ); sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_view_a.yml000066400000000000000000000073641451700765000242540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 828d3386d75173425688561ed663eaed5b254a2b06a3288cafd28f329128f049 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: a - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: vw_appt_latest - keyword: AS - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: most_current keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: da dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dim_appt alias_expression: naked_identifier: da where_clause: keyword: WHERE expression: column_reference: - naked_identifier: da - dot: . - naked_identifier: current_appt_id keyword: IS null_literal: 'NULL' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: most_current end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_view_if_not_exists.sql000066400000000000000000000001651451700765000266770ustar00rootroot00000000000000CREATE VIEW IF NOT EXISTS a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_view_if_not_exists.yml000066400000000000000000000034341451700765000267030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6932759b92f812b45c0a8c035862e8dfaf8b3055b433de9b17e2e20604d7d93c file: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: a - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: id end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_zero_argument_function.sql000066400000000000000000000002321451700765000275510ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION a() RETURNS integer AS ' SELECT 1; ' LANGUAGE SQL; CREATE FUNCTION a() RETURNS integer AS ' SELECT 1; ' LANGUAGE SQL; sqlfluff-2.3.5/test/fixtures/dialects/ansi/create_zero_argument_function.yml000066400000000000000000000026371451700765000275660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a34b50baf434a3c5d92187e85a5fbecb00d8f1695deec76a38ce613ff964436b file: - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: a - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - quoted_literal: "'\n SELECT 1;\n'" - keyword: LANGUAGE - naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: a - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - quoted_literal: "'\n SELECT 1;\n'" - keyword: LANGUAGE - naked_identifier: SQL - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/delete_from.sql000066400000000000000000000000441451700765000237260ustar00rootroot00000000000000DELETE FROM table_name WHERE a > 0; sqlfluff-2.3.5/test/fixtures/dialects/ansi/delete_from.yml000066400000000000000000000016161451700765000237360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d32ff56cb5f997f61ef9026e4322d824482c3e189b70583070cb106cfeb6a1b file: statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/describe_table.sql000066400000000000000000000000331451700765000243660ustar00rootroot00000000000000describe table "my_table"; sqlfluff-2.3.5/test/fixtures/dialects/ansi/describe_table.yml000066400000000000000000000010771451700765000244010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c5ded8a79782ebe613a1e38b6e94c82aafd938c8f45edb5aa950939e7444626 file: statement: describe_statement: keyword: describe naked_identifier: table object_reference: quoted_identifier: '"my_table"' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/double_dot.sql000066400000000000000000000002731451700765000235650ustar00rootroot00000000000000-- Snowflake Double-Dot Notation -- https://docs.snowflake.com/en/sql-reference/name-resolution.html#resolution-when-schema-omitted-double-dot-notation SELECT * FROM my_database..my_tablesqlfluff-2.3.5/test/fixtures/dialects/ansi/double_dot.yml000066400000000000000000000015721451700765000235720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e87c7ed014c2b8ec15e4fe464f02b12cc5608766a1b970e657ce20b5977042f0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: my_database - dot: . - dot: . - naked_identifier: my_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_cast.sql000066400000000000000000000002401451700765000234150ustar00rootroot00000000000000DROP CAST (int AS bool); DROP CAST (int AS bool) RESTRICT; DROP CAST (int AS bool) CASCADE; DROP CAST (udt_1 AS udt_2); DROP CAST (sch.udt_1 AS sch.udt_2); sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_cast.yml000066400000000000000000000037751451700765000234370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ab44c21d3a8f7594f50924ffe73aaf87716721475787ac5558e197378caf83c file: - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: RESTRICT - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: int - keyword: AS - data_type: data_type_identifier: bool - end_bracket: ) - keyword: CASCADE - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - keyword: AS - data_type: data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - keyword: AS - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_index_if_exists.sql000066400000000000000000000000521451700765000256500ustar00rootroot00000000000000DROP INDEX IF EXISTS transaction_updated; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_index_if_exists.yml000066400000000000000000000011411451700765000256520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3634f62eee0607dad56faa2de91f83077f514dd94ddbb22de9c6cb3e5e9cff0f file: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: transaction_updated statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_index_simple.sql000066400000000000000000000000401451700765000251410ustar00rootroot00000000000000DROP INDEX transaction_updated; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_index_simple.yml000066400000000000000000000010711451700765000251500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 463c6a46c26e0890aca73e7444abf5f6ff40309a08d248cdb5d6deab9aac69ec file: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: transaction_updated statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_model.sql000066400000000000000000000000341451700765000235640ustar00rootroot00000000000000DROP MODEL IF EXISTS model3 sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_model.yml000066400000000000000000000010721451700765000235710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2c02c6b91dd87146c7d0e271966beea6473289a4ae243637cd8742d72f5aa1ed file: statement: drop_MODELstatement: - keyword: DROP - keyword: MODEL - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: model3 sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_schema.sql000066400000000000000000000003121451700765000237230ustar00rootroot00000000000000drop schema my_schema; drop schema my_schema cascade; drop schema my_schema restrict; drop schema if exists my_schema; drop schema if exists my_schema cascade; drop schema if exists my_schema restrict; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_schema.yml000066400000000000000000000031201451700765000237250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54f1225c5247d92c341517d4b0626dd3b5b9f6df4fe91e4fb61e27c12f81b68a file: - statement: drop_schema_statement: - keyword: drop - keyword: schema - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - schema_reference: naked_identifier: my_schema - keyword: cascade - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - schema_reference: naked_identifier: my_schema - keyword: restrict - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - keyword: if - keyword: exists - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - keyword: if - keyword: exists - schema_reference: naked_identifier: my_schema - keyword: cascade - statement_terminator: ; - statement: drop_schema_statement: - keyword: drop - keyword: schema - keyword: if - keyword: exists - schema_reference: naked_identifier: my_schema - keyword: restrict - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_sequence.sql000066400000000000000000000000531451700765000242750ustar00rootroot00000000000000DROP SEQUENCE foo; DROP SEQUENCE foo.foo; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_sequence.yml000066400000000000000000000014071451700765000243030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad101c01a6b1de9948242d88c6100a9d96553b768c416aeabda25f24162545a2 file: - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: - naked_identifier: foo - dot: . - naked_identifier: foo - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_table_a.sql000066400000000000000000000000151451700765000240520ustar00rootroot00000000000000drop table a sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_table_a.yml000066400000000000000000000010151451700765000240550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1cda24832a0538f65fc2459b57806a1e2212dfd66c59e3a338e297caa0e8ccd file: statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: a sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_table_a_cascade.sql000066400000000000000000000000251451700765000255160ustar00rootroot00000000000000drop table a cascade sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_table_a_cascade.yml000066400000000000000000000010441451700765000255220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e6c446ca08215c9e601ab3ab20521d1dadecdc90f059c8ad2d2063d835ae81ae file: statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: a - keyword: cascade sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_table_a_restrict.sql000066400000000000000000000000261451700765000257730ustar00rootroot00000000000000drop table a restrict sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_table_a_restrict.yml000066400000000000000000000010451451700765000257770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 181c25001520158bcef4025a06fdccd3400ec29e57a5f29667ed4f953bcbbf14 file: statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: a - keyword: restrict sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_table_if_exists_a.sql000066400000000000000000000000271451700765000261320ustar00rootroot00000000000000drop table if exists a sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_table_if_exists_a.yml000066400000000000000000000010651451700765000261370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c0ff0ea789008c5781f680d2c2203bd6b17aa8e0467434a795d8fb2f96f56abe file: statement: drop_table_statement: - keyword: drop - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: a sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_trigger.sql000066400000000000000000000000221451700765000241240ustar00rootroot00000000000000DROP TRIGGER foo; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_trigger.yml000066400000000000000000000010451451700765000241340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5a6c840481533f56dbc53131a0b3635f48d417d2581a5971178265d9fc44768c file: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: foo statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_type.sql000066400000000000000000000001561451700765000234520ustar00rootroot00000000000000DROP TYPE typename; DROP TYPE IF EXISTS typename; DROP TYPE typename CASCADE; DROP TYPE typename RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_type.yml000066400000000000000000000021551451700765000234550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af0af8bb9b926a9826a74fd76cace2e475ea1c3747ccbf0245b2fc47180ed5a5 file: - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - object_reference: naked_identifier: typename - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: typename - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - object_reference: naked_identifier: typename - keyword: CASCADE - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - object_reference: naked_identifier: typename - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_view_a.sql000066400000000000000000000000141451700765000237340ustar00rootroot00000000000000drop view a sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_view_a.yml000066400000000000000000000010131451700765000237360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc09d9bc6e480190a8c283a36082f8b11104b89247d2ed552a4e1768ce9b0cb6 file: statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: a sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_view_a_cascade.sql000066400000000000000000000000241451700765000254000ustar00rootroot00000000000000drop view a cascade sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_view_a_cascade.yml000066400000000000000000000010421451700765000254030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b0e62b90e366c4740da190f473bda08e67af21745183790557383213e3e9be01 file: statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: a - keyword: cascade sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_view_a_restrict.sql000066400000000000000000000000251451700765000256550ustar00rootroot00000000000000drop view a restrict sqlfluff-2.3.5/test/fixtures/dialects/ansi/drop_view_a_restrict.yml000066400000000000000000000010431451700765000256600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bd605eda6485ce2c878fd84d2fee268e9c78b528e1204357ba90dd575035ebcb file: statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: a - keyword: restrict sqlfluff-2.3.5/test/fixtures/dialects/ansi/empty_file.sql000066400000000000000000000000001451700765000235660ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/ansi/empty_file.yml000066400000000000000000000006171451700765000236060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fdda373cd9cd649f82a9c5cf7ba9e290375c0ceae29477b0bad5a25f24a52ae3 file: null sqlfluff-2.3.5/test/fixtures/dialects/ansi/escape.sql000066400000000000000000000000551451700765000227030ustar00rootroot00000000000000SELECT * FROM x WHERE z LIKE '^_f' ESCAPE '^'sqlfluff-2.3.5/test/fixtures/dialects/ansi/escape.yml000066400000000000000000000020061451700765000227030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6031d20b71f31d745d515287b9081a8b9bb89cf246fdcda13a3981bf3469dbc6 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x where_clause: keyword: WHERE expression: - column_reference: naked_identifier: z - keyword: LIKE - quoted_literal: "'^_f'" - keyword: ESCAPE - quoted_literal: "'^'" sqlfluff-2.3.5/test/fixtures/dialects/ansi/escaped_quotes.sql000066400000000000000000000001631451700765000244470ustar00rootroot00000000000000select case when "Spec\"s 23" like 'Spec\'s%' then 'boop' end as field; select 'This shouldn''t fail' as success; sqlfluff-2.3.5/test/fixtures/dialects/ansi/escaped_quotes.yml000066400000000000000000000025171451700765000244560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e9cde3cbfcab57f38d759e64d68ed11707b93e371783ea18ae937ef1b45492a1 file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: quoted_identifier: '"Spec\"s 23"' keyword: like quoted_literal: "'Spec\\'s%'" - keyword: then - expression: quoted_literal: "'boop'" - keyword: end alias_expression: keyword: as naked_identifier: field - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: "'This shouldn''t fail'" alias_expression: keyword: as naked_identifier: success - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/expression_recursion.sql000066400000000000000000000046761451700765000257500ustar00rootroot00000000000000 -- This test checks for recursion errors. If the expression -- is not parsed correctly it can lead to very deep recursion. -- If this test is failing, then check the structure of expression -- parsing. select 1 from test_table where test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' --5 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' -- 10 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' -- 15 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' -- 20 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' --30 or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' or test_table.string_field like 'some string%' -- 40 sqlfluff-2.3.5/test/fixtures/dialects/ansi/expression_recursion.yml000066400000000000000000000232501451700765000257370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ba286d89ab754541c9bbd2d1b81aa8e3895ca103e075a61b38f01db1667f7dc1 file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table where_clause: keyword: where expression: - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" - binary_operator: or - column_reference: - naked_identifier: test_table - dot: . - naked_identifier: string_field - keyword: like - quoted_literal: "'some string%'" sqlfluff-2.3.5/test/fixtures/dialects/ansi/expression_recursion_2.sql000066400000000000000000000010671451700765000261600ustar00rootroot00000000000000 -- This test checks for recursion errors. If the expression -- is not parsed correctly it can lead to very deep recursion. -- If this test is failing, then check the structure of expression -- parsing. SELECT * FROM t WHERE a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b AND a < b sqlfluff-2.3.5/test/fixtures/dialects/ansi/expression_recursion_2.yml000066400000000000000000000202211451700765000261530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec768aae77d8a2ecce149b275de13c7ca322a6d1fc8a2d249ca6ed00a7445d15 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b sqlfluff-2.3.5/test/fixtures/dialects/ansi/from_fetch.sql000066400000000000000000000000571451700765000235610ustar00rootroot00000000000000SELECT * FROM counter FETCH FIRST 10 ROWS ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/from_fetch.yml000066400000000000000000000016511451700765000235640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dc15eef87b4a1e04131c0c5059abe5c561cb4e8a656934f74aec336cfe33f1e0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '10' - keyword: ROWS - keyword: ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/functions_a.sql000066400000000000000000000002071451700765000237520ustar00rootroot00000000000000SELECT DATE(t), ROUND(b, 2), LEFT(right(s, 5), LEN(s + 6)) as compound FROM tbl_b; SELECT _custom_function(5) as test_column; sqlfluff-2.3.5/test/fixtures/dialects/ansi/functions_a.yml000066400000000000000000000061001451700765000237520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cddea2ed2c29465ac50497915f3647aa81be4ffb94314bf52367a5bfaee996d7 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATE bracketed: start_bracket: ( expression: column_reference: naked_identifier: t end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROUND bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEFT bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: right bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: s - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: LEN bracketed: start_bracket: ( expression: column_reference: naked_identifier: s binary_operator: + numeric_literal: '6' end_bracket: ) - end_bracket: ) alias_expression: keyword: as naked_identifier: compound from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl_b - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: _custom_function bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) alias_expression: keyword: as naked_identifier: test_column - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/functions_b.sql000066400000000000000000000002051451700765000237510ustar00rootroot00000000000000-- Thanks @mrshu for this query, it tests nested functions SELECT SPLIT(LOWER(text), ' ') AS text FROM "database"."sample_table" sqlfluff-2.3.5/test/fixtures/dialects/ansi/functions_b.yml000066400000000000000000000027331451700765000237630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e4bdc23e403b479428b3dc9824cd2177d8b78d1f5ecef106c49daf64513e0c7d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SPLIT bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: LOWER bracketed: start_bracket: ( expression: column_reference: naked_identifier: text end_bracket: ) - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) alias_expression: keyword: AS naked_identifier: text from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"database"' - dot: . - quoted_identifier: '"sample_table"' sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_all_on_mytable_to_role.sql000066400000000000000000000000371451700765000273420ustar00rootroot00000000000000grant all on mytable to public sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_all_on_mytable_to_role.yml000066400000000000000000000011541451700765000273450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8305d724d1da5414030fc847f2dd138ab8af97040c8366e510a96d6f1f77735 file: statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_all_on_table_mytable_to_role.sql000066400000000000000000000000451451700765000305100ustar00rootroot00000000000000grant all on table mytable to myrole sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_all_on_table_mytable_to_role.yml000066400000000000000000000012011451700765000305050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 97e7798a4e3aac6fbb05d2025e8ac0ae6be6be806ce036119eedd9103ca3b26a file: statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: table - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: myrole sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_all_privileges_on_mytable_to_role.sql000066400000000000000000000000521451700765000315700ustar00rootroot00000000000000grant all privileges on mytable to myrole sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_all_privileges_on_mytable_to_role.yml000066400000000000000000000012061451700765000315740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 722e222a729b789a73aa7a7c29b5181a7f048377b5741b29810476a4b2f341ea file: statement: access_statement: - keyword: grant - keyword: all - keyword: privileges - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: myrole sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_multiple_tables.sql000066400000000000000000000007401451700765000260240ustar00rootroot00000000000000GRANT INSERT ON my_table, my_table2 TO public; GRANT INSERT ON my_table, my_table2 TO "public"; GRANT INSERT, UPDATE ON my_table, my_table2 TO public; GRANT INSERT, UPDATE ON my_table, my_table2 TO "public"; GRANT INSERT, UPDATE, DELETE ON my_table, my_table2 TO public; GRANT INSERT, UPDATE, DELETE ON my_table, my_table2 TO "public"; GRANT INSERT, UPDATE, DELETE, SELECT ON my_table, my_table2 TO public; GRANT INSERT, UPDATE, DELETE, SELECT ON my_table, my_table2 TO "public"; sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_multiple_tables.yml000066400000000000000000000067321451700765000260350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1c12c3673e61617399ed12ec453a59f43e5bf00d2b38e6117f3ef08efb25ddc3 file: - statement: access_statement: - keyword: GRANT - keyword: INSERT - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: naked_identifier: public - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: quoted_identifier: '"public"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: naked_identifier: public - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: quoted_identifier: '"public"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: naked_identifier: public - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: quoted_identifier: '"public"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: naked_identifier: public - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - keyword: 'ON' - object_reference: naked_identifier: my_table - comma: ',' - object_reference: naked_identifier: my_table2 - keyword: TO - role_reference: quoted_identifier: '"public"' - statement_terminator: ; grant_select_col1_col2_update_col1_on_mytable_to_public.sql000066400000000000000000000000761451700765000344270ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/ansigrant select (col1, col2), update (col1) on mytable to public grant_select_col1_col2_update_col1_on_mytable_to_public.yml000066400000000000000000000017341451700765000344330ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/ansi# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7723482a8f471de4ce6b952aead403d556fd48795cad2d947d68d5d7baeee290 file: statement: access_statement: - keyword: grant - keyword: select - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: update - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_select_on_mytable_to_public.sql000066400000000000000000000000421451700765000303620ustar00rootroot00000000000000grant select on mytable to public sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_select_on_mytable_to_public.yml000066400000000000000000000011571451700765000303740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 29c7fafdbdad2321a36d17f804f86e5b3fd472049786028fd1310097c14c6c45 file: statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_select_on_mytable_to_public_with_grant_option.sql000066400000000000000000000000641451700765000342040ustar00rootroot00000000000000grant select on mytable to public with grant option sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_select_on_mytable_to_public_with_grant_option.yml000066400000000000000000000012561451700765000342120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74f8012e4befe77e9c842c77b0dbf29035a7f04c5dedd0eba48c352539d91932 file: statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public - keyword: with - keyword: grant - keyword: option sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_select_update_insert_on_mytable_to_public.sql000066400000000000000000000000621451700765000333120ustar00rootroot00000000000000grant select, update, insert on mytable to public sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_select_update_insert_on_mytable_to_public.yml000066400000000000000000000012751451700765000333230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efa558aa4843d6c812b49728b5c5644ffb6f79a126233ff013fc6a8b30b89ebe file: statement: access_statement: - keyword: grant - keyword: select - comma: ',' - keyword: update - comma: ',' - keyword: insert - keyword: 'on' - object_reference: naked_identifier: mytable - keyword: to - role_reference: naked_identifier: public sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_update_on_all_tables_in_schema_a_to_public.sql000066400000000000000000000000611451700765000333410ustar00rootroot00000000000000grant update on all tables in schema a to public sqlfluff-2.3.5/test/fixtures/dialects/ansi/grant_update_on_all_tables_in_schema_a_to_public.yml000066400000000000000000000012721451700765000333500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8d44ae0355d61b36f7b20519f4b7c29abba67ad1c63d2a25cec001c882ba3c0f file: statement: access_statement: - keyword: grant - keyword: update - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: naked_identifier: a - keyword: to - role_reference: naked_identifier: public sqlfluff-2.3.5/test/fixtures/dialects/ansi/group_by_fetch.sql000066400000000000000000000001161451700765000244400ustar00rootroot00000000000000SELECT status FROM orders GROUP BY status FETCH FIRST 3 ROWS ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/group_by_fetch.yml000066400000000000000000000020161451700765000244430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 379ba05fd8fd1bd3e4c5030df27c24869bb5ac75ece660e7da1a45dc715634c6 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: status from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: status fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '3' - keyword: ROWS - keyword: ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/having_fetch.sql000066400000000000000000000001761451700765000240740ustar00rootroot00000000000000SELECT house_id, COUNT (person_id) FROM persons GROUP BY house_id HAVING COUNT (person_id) > 10 FETCH FIRST 30 ROWS ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/having_fetch.yml000066400000000000000000000034431451700765000240760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5eabc81d543256d133d52eba8e50015bdbc30e9b8bac8f19202393dc712ed6f6 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: house_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: column_reference: naked_identifier: person_id end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: persons groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: house_id having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: column_reference: naked_identifier: person_id end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '30' - keyword: ROWS - keyword: ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/insert_a.sql000066400000000000000000000001341451700765000232450ustar00rootroot00000000000000INSERT into tbl_b (col1) values (123); INSERT INTO tbl_c ( SELECT * FROM table1 ); sqlfluff-2.3.5/test/fixtures/dialects/ansi/insert_a.yml000066400000000000000000000027251451700765000232570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 039eff37d4ce25fec8d2a99a5e11fc8145d03925916ccfa9ef22203887299592 file: - statement: insert_statement: - keyword: INSERT - keyword: into - table_reference: naked_identifier: tbl_b - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - values_clause: keyword: values bracketed: start_bracket: ( numeric_literal: '123' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl_c - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/insert_default_values.sql000066400000000000000000000000441451700765000260300ustar00rootroot00000000000000INSERT INTO mytable DEFAULT VALUES; sqlfluff-2.3.5/test/fixtures/dialects/ansi/insert_default_values.yml000066400000000000000000000011271451700765000260350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5635732edc5f38069bfe20809107f8b9f1d88a8415966d1daf88ee0afcf8572 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - keyword: DEFAULT - keyword: VALUES statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/insert_using_subquery.sql000066400000000000000000000001531451700765000261120ustar00rootroot00000000000000INSERT INTO foo SELECT 0 AS bar; INSERT INTO foo (SELECT 1 AS bar); INSERT INTO foo ((SELECT 1 AS bar)); sqlfluff-2.3.5/test/fixtures/dialects/ansi/insert_using_subquery.yml000066400000000000000000000034141451700765000261170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a385cd32bc29fe03941e9b42b6ac62fb8b7d5760272ed88f2a543a7508b6b927 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' alias_expression: keyword: AS naked_identifier: bar - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: bar end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: bar end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/insert_with_statement.sql000066400000000000000000000002451451700765000260670ustar00rootroot00000000000000INSERT INTO table2 (column1, column2, column3) WITH mycte AS ( SELECT foo, bar FROM mytable1 ) SELECT foo, bar, baz FROM mycte; sqlfluff-2.3.5/test/fixtures/dialects/ansi/insert_with_statement.yml000066400000000000000000000044501451700765000260730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aa63e81316e7735b7b77672357284c7905d78729e2ec8fec5c205a1544fd459b file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/like_operators.sql000066400000000000000000000004561451700765000244720ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/828 -- https://github.com/sqlfluff/sqlfluff/issues/842 -- https://www.postgresql.org/docs/9.0/functions-matching.html#FUNCTIONS-LIKE SELECT * FROM my_tbl WHERE a !~ '[a-z]' AND d !~~* '[a-z]' AND b LIKE 'Spec\'s%' AND c !~* '^([0-9]){1,}(\.)([0-9]{1,})$' sqlfluff-2.3.5/test/fixtures/dialects/ansi/like_operators.yml000066400000000000000000000027111451700765000244700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9a948444cc206b11e887205e197e2486899b67c235db0be229db5700c5fa4d9 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a - like_operator: '!~' - quoted_literal: "'[a-z]'" - binary_operator: AND - column_reference: naked_identifier: d - like_operator: '!~~*' - quoted_literal: "'[a-z]'" - binary_operator: AND - column_reference: naked_identifier: b - keyword: LIKE - quoted_literal: "'Spec\\'s%'" - binary_operator: AND - column_reference: naked_identifier: c - like_operator: '!~*' - quoted_literal: "'^([0-9]){1,}(\\.)([0-9]{1,})$'" sqlfluff-2.3.5/test/fixtures/dialects/ansi/merge_into.sql000066400000000000000000000011521451700765000235720ustar00rootroot00000000000000-- Merge using Table MERGE INTO t USING u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); -- Merge using Select MERGE INTO t USING (SELECT * FROM u) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); -- Merge using Delete MERGE INTO t USING u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN MATCHED THEN DELETE; -- Merge using multiple operations MERGE INTO t USING u ON (a = b) WHEN MATCHED AND a > b THEN UPDATE SET a = b WHEN MATCHED AND ( a < b AND c < d ) THEN DELETE WHEN NOT MATCHED THEN INSERT (a, c) VALUES (b, d); sqlfluff-2.3.5/test/fixtures/dialects/ansi/merge_into.yml000066400000000000000000000203551451700765000236020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12b2866dcee4e144701a860bb381a3c8d3106abf8b3fa1a961543dd1fc0a4c95 file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u end_bracket: ) - alias_expression: keyword: AS naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: b - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: c - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: d end_bracket: ) - keyword: THEN - merge_delete_clause: keyword: DELETE - merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: d - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/modulo.sql000066400000000000000000000002731451700765000227440ustar00rootroot00000000000000SELECT CASE WHEN (year_number % 400 = 0) OR (year_number % 4 = 0 AND year_number % 100 != 0) THEN TRUE ELSE FALSE END AS is_leap_year FROM mytable sqlfluff-2.3.5/test/fixtures/dialects/ansi/modulo.yml000066400000000000000000000050301451700765000227420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 46ba166cadf9fa6b22ec050742dd5cb4a9e61424167441a19a7fc1be1315bba5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: year_number - binary_operator: '%' - numeric_literal: '400' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' end_bracket: ) - binary_operator: OR - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: year_number - binary_operator: '%' - numeric_literal: '4' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - binary_operator: AND - column_reference: naked_identifier: year_number - binary_operator: '%' - numeric_literal: '100' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - numeric_literal: '0' end_bracket: ) - keyword: THEN - expression: boolean_literal: 'TRUE' - else_clause: keyword: ELSE expression: boolean_literal: 'FALSE' - keyword: END alias_expression: keyword: AS naked_identifier: is_leap_year from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable sqlfluff-2.3.5/test/fixtures/dialects/ansi/multi_statement_a.sql000066400000000000000000000001521451700765000251570ustar00rootroot00000000000000select a from tbl1 /*comment here*/ ; /*and here*/ select b from tbl2; -- trailling ending comment sqlfluff-2.3.5/test/fixtures/dialects/ansi/multi_statement_a.yml000066400000000000000000000022461451700765000251670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2fa662a2e8867bec6a62bd9077d5d583802675ef5ea6c2bd07acc0bedacbe7f3 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/multi_statement_b.sql000066400000000000000000000004451451700765000251650ustar00rootroot00000000000000# COMMENT -- Another Comment Select A from Sys.dual where a -- inline comment in ('RED', /* Inline */ 'GREEN','BLUE'); select * from tbl_b; # as another comment insert into sch.tbl_b (col1) values (123); with tmp as ( select * from blah ) select a, b from tmp; # And that's the endsqlfluff-2.3.5/test/fixtures/dialects/ansi/multi_statement_b.yml000066400000000000000000000064241451700765000251720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 938b6683f1b9f0f7851bf9bcdfaf2bb51e51191501dfadde73d7dd881c3f7182 file: - statement: select_statement: select_clause: keyword: Select select_clause_element: column_reference: naked_identifier: A from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sys - dot: . - naked_identifier: dual where_clause: keyword: where expression: column_reference: naked_identifier: a keyword: in bracketed: - start_bracket: ( - quoted_literal: "'RED'" - comma: ',' - quoted_literal: "'GREEN'" - comma: ',' - quoted_literal: "'BLUE'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl_b - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: into - table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl_b - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - values_clause: keyword: values bracketed: start_bracket: ( numeric_literal: '123' end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: tmp keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: blah end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/multi_statement_c.sql000066400000000000000000000001311451700765000251560ustar00rootroot00000000000000select * from boo; WITH blah AS (select x,y,4.567 FROM foo) select z, y, x from blah;sqlfluff-2.3.5/test/fixtures/dialects/ansi/multi_statement_c.yml000066400000000000000000000044621451700765000251730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8b80ac837c1d2f5e4dede56754cabd62664e3d88e0359515f5f0e4ae7a5f4431 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: blah keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: numeric_literal: '4.567' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: z - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: blah - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/naked_identifiers.sql000066400000000000000000000005311451700765000251110ustar00rootroot00000000000000-- A valid identifier is alphanumeric and contains at least one letter. select "a" as 0_is_a_legal_identifier; select "a" as 00_is_a_legal_identifier; select 123_is_a_legal_identifier.456_is_a_legal_identifier; select "a" as 0is_a_legal_identifier; select _s00.45_is_a_legal_identifier from sdf9_._234awdf; select "a" as is_a_legal_identifier0; sqlfluff-2.3.5/test/fixtures/dialects/ansi/naked_identifiers.yml000066400000000000000000000046371451700765000251260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ef29a36b737d98354dc3a7c87b1ffae25a39700e7ca832ad716f3b85f882359a file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '"a"' alias_expression: keyword: as naked_identifier: 0_is_a_legal_identifier - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '"a"' alias_expression: keyword: as naked_identifier: 00_is_a_legal_identifier - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: 123_is_a_legal_identifier - dot: . - naked_identifier: 456_is_a_legal_identifier - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '"a"' alias_expression: keyword: as naked_identifier: 0is_a_legal_identifier - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: _s00 - dot: . - naked_identifier: 45_is_a_legal_identifier from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sdf9_ - dot: . - naked_identifier: _234awdf - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '"a"' alias_expression: keyword: as naked_identifier: is_a_legal_identifier0 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/non_breaking_space.sql000066400000000000000000000001101451700765000252420ustar00rootroot00000000000000#space before from is non-breaking space SELECT a,b, c from sch."blah" sqlfluff-2.3.5/test/fixtures/dialects/ansi/non_breaking_space.yml000066400000000000000000000020341451700765000252530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41944fdfe4253c4020bed83e05d0bd14c2539fd2155859af01458fda50d4a7bb file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sch dot: . quoted_identifier: '"blah"' sqlfluff-2.3.5/test/fixtures/dialects/ansi/numeric_literal.sql000066400000000000000000000001741451700765000246230ustar00rootroot00000000000000SELECT 1, 1.2, 1., .2, 1e3, 1E3, 1.2e+3, 1.2E+3, 1.e-3, 1.E-3, .2e3, .2E3 ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/numeric_literal.yml000066400000000000000000000027051451700765000246270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0533292dcf978bb1a20080d5df81ca71fdebc7a769b09e9b9f8c3b72f8e030e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '1.2' - comma: ',' - select_clause_element: numeric_literal: '1.' - comma: ',' - select_clause_element: numeric_literal: '.2' - comma: ',' - select_clause_element: numeric_literal: 1e3 - comma: ',' - select_clause_element: numeric_literal: 1E3 - comma: ',' - select_clause_element: numeric_literal: '1.2e+3' - comma: ',' - select_clause_element: numeric_literal: '1.2E+3' - comma: ',' - select_clause_element: numeric_literal: '1.e-3' - comma: ',' - select_clause_element: numeric_literal: '1.E-3' - comma: ',' - select_clause_element: numeric_literal: .2e3 - comma: ',' - select_clause_element: numeric_literal: .2E3 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/revoke_select_on_table_a_from_group_b.sql000066400000000000000000000000461451700765000312000ustar00rootroot00000000000000revoke select on table a from group b sqlfluff-2.3.5/test/fixtures/dialects/ansi/revoke_select_on_table_a_from_group_b.yml000066400000000000000000000012231451700765000312000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f0fcfb3080b3ecc20c562740c3f682929cf8be7ea60f2b4b54fc46c1fdd371f file: statement: access_statement: - keyword: revoke - keyword: select - keyword: 'on' - keyword: table - object_reference: naked_identifier: a - keyword: from - keyword: group - object_reference: naked_identifier: b sqlfluff-2.3.5/test/fixtures/dialects/ansi/rollback.sql000066400000000000000000000000111451700765000232240ustar00rootroot00000000000000rollback sqlfluff-2.3.5/test/fixtures/dialects/ansi/rollback.yml000066400000000000000000000007121451700765000232360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6935c614cdd8c2ff4f31d203e1c6c3e810992659d1780aae96bd4969b6457d24 file: statement: transaction_statement: keyword: rollback sqlfluff-2.3.5/test/fixtures/dialects/ansi/rollback_and_no_chain.sql000066400000000000000000000000261451700765000257120ustar00rootroot00000000000000rollback and no chain sqlfluff-2.3.5/test/fixtures/dialects/ansi/rollback_and_no_chain.yml000066400000000000000000000010061451700765000257130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 86784a218718ec1870247ec1a3b586b7bdef8ba4fad2f464d3e89da0e455e594 file: statement: transaction_statement: - keyword: rollback - keyword: and - keyword: 'no' - keyword: chain sqlfluff-2.3.5/test/fixtures/dialects/ansi/rollback_work.sql000066400000000000000000000000161451700765000242730ustar00rootroot00000000000000rollback work sqlfluff-2.3.5/test/fixtures/dialects/ansi/rollback_work.yml000066400000000000000000000007361451700765000243060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5ca7dcd8037e150a8f4b5e125f52ea9a337ada16beabff610feb32d92a02139 file: statement: transaction_statement: - keyword: rollback - keyword: work sqlfluff-2.3.5/test/fixtures/dialects/ansi/rollback_work_and_no_chain.sql000066400000000000000000000000331451700765000267520ustar00rootroot00000000000000rollback work and no chain sqlfluff-2.3.5/test/fixtures/dialects/ansi/rollback_work_and_no_chain.yml000066400000000000000000000010321451700765000267540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3c14181da00acf0aadc77c0de0522c7c49123f94fd96f2d0aa0283289b15d2c9 file: statement: transaction_statement: - keyword: rollback - keyword: work - keyword: and - keyword: 'no' - keyword: chain sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_a.sql000066400000000000000000000000351451700765000232200ustar00rootroot00000000000000select a,b, c from sch."blah"sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_a.yml000066400000000000000000000020341451700765000232230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 264966256d79237298c65904f5e9b688beb292b2a0c39d20a879467f4fac2efe file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sch dot: . quoted_identifier: '"blah"' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_a_and_not_b.sql000066400000000000000000000001211451700765000252170ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/827 SELECT a AND NOT i.b FROM isqlfluff-2.3.5/test/fixtures/dialects/ansi/select_a_and_not_b.yml000066400000000000000000000017061451700765000252330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b4ece8e4414be9539e889fbb3876c9e1046c7ea8e4dbbef100ed3ffb536cea38 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: a - binary_operator: AND - keyword: NOT - column_reference: - naked_identifier: i - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: i sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_b.sql000066400000000000000000000000551451700765000232230ustar00rootroot00000000000000select * from foo JOIN bar ON (foo.a = bar.a)sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_b.yml000066400000000000000000000030021451700765000232200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0df936d2bcdda936bef477103ec7616d96829041deedb0f20cee737779892555 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: bar join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: foo - dot: . - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bar - dot: . - naked_identifier: a end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_bitwise_operators.sql000066400000000000000000000001121451700765000265400ustar00rootroot00000000000000select c1 & c2, c3 | c4, c5 ^ c6, c7 << c8, c9 >> c10 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_bitwise_operators.yml000066400000000000000000000034521451700765000265540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7c5075062f6d6da74e8109008486152d5135e26ba31c9d725ad4bac17ee3dfeb file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: - column_reference: naked_identifier: c1 - binary_operator: ampersand: '&' - column_reference: naked_identifier: c2 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c3 - binary_operator: pipe: '|' - column_reference: naked_identifier: c4 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c5 - binary_operator: ^ - column_reference: naked_identifier: c6 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c7 - binary_operator: - raw_comparison_operator: < - raw_comparison_operator: < - column_reference: naked_identifier: c8 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c9 - binary_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '>' - column_reference: naked_identifier: c10 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_c.sql000066400000000000000000000004051451700765000232230ustar00rootroot00000000000000-- Thanks @mrshu for this query, it tests functions and order by SELECT col_a, col_b, date_col_a, date_col_b FROM "database"."sample_table" WHERE DATE(date_col_b) >= current_date AND length(col_a) = 4 ORDER BY date_col_a DESC NULLS LAST sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_c.yml000066400000000000000000000044101451700765000232250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b60f1ebfcd8254f2a20c4714da4d12aa00ba2d83045dc50cb84ff86645e41b68 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col_a - comma: ',' - select_clause_element: column_reference: naked_identifier: col_b - comma: ',' - select_clause_element: column_reference: naked_identifier: date_col_a - comma: ',' - select_clause_element: column_reference: naked_identifier: date_col_b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"database"' - dot: . - quoted_identifier: '"sample_table"' where_clause: keyword: WHERE expression: - function: function_name: function_name_identifier: DATE bracketed: start_bracket: ( expression: column_reference: naked_identifier: date_col_b end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - bare_function: current_date - binary_operator: AND - function: function_name: function_name_identifier: length bracketed: start_bracket: ( expression: column_reference: naked_identifier: col_a end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '4' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: date_col_a - keyword: DESC - keyword: NULLS - keyword: LAST sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_case_a.sql000066400000000000000000000002341451700765000242140ustar00rootroot00000000000000SELECT CASE WHEN 1 = 2 THEN 3 WHEN 4 > 3 THEN 5 + 2 WHEN some_var IN (1,2,3) then "nothing" ELSE "boo" END as a_case_statement FROM boo sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_case_a.yml000066400000000000000000000046571451700765000242330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 963649156afb03054773a1e4826722fbe7066f3962098f7086d70f6e4376807f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - keyword: THEN - expression: numeric_literal: '3' - when_clause: - keyword: WHEN - expression: - numeric_literal: '4' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '3' - keyword: THEN - expression: - numeric_literal: '5' - binary_operator: + - numeric_literal: '2' - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: some_var keyword: IN bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - keyword: then - expression: column_reference: quoted_identifier: '"nothing"' - else_clause: keyword: ELSE expression: column_reference: quoted_identifier: '"boo"' - keyword: END alias_expression: keyword: as naked_identifier: a_case_statement from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_case_b.sql000066400000000000000000000001671451700765000242220ustar00rootroot00000000000000SELECT CASE some_var WHEN 'hello' THEN 3 WHEN 'hi' THEN 12 ELSE 0 END as a_case_statement FROM boo sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_case_b.yml000066400000000000000000000030651451700765000242240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 18a3be314cec3c454e5681171b9a5b224faf1176377a018a6a1909f7898be0f6 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - expression: column_reference: naked_identifier: some_var - when_clause: - keyword: WHEN - expression: quoted_literal: "'hello'" - keyword: THEN - expression: numeric_literal: '3' - when_clause: - keyword: WHEN - expression: quoted_literal: "'hi'" - keyword: THEN - expression: numeric_literal: '12' - else_clause: keyword: ELSE expression: numeric_literal: '0' - keyword: END alias_expression: keyword: as naked_identifier: a_case_statement from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_case_c.sql000066400000000000000000000005541451700765000242230ustar00rootroot00000000000000select col0, case when col1 then col2 else col3 end::text as mycol from table1; select col0, case when col1 then col2 else col3 end::int::float as mycol from table1; select col0, cast(case when col1 then col2 else col3 end as text) as mycol from table1; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_case_c.yml000066400000000000000000000106401451700765000242220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ea171e2218df3a613023d2a683870954ca0b9e3d75cc3dc5b42592f7fcf71db0 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end casting_operator: '::' data_type: data_type_identifier: text alias_expression: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: - case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end - casting_operator: '::' - data_type: data_type_identifier: int - casting_operator: '::' - data_type: data_type_identifier: float alias_expression: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end keyword: as data_type: data_type_identifier: text end_bracket: ) alias_expression: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_cross_join.sql000066400000000000000000000004141451700765000251510ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/871 WITH constants AS ( SELECT 8760 AS hours_per_year ) SELECT table1.name, foo.name, foo.value * constants.hours_per_year AS some_value FROM table1 CROSS JOIN constants JOIN table2 AS foo USING (id) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_cross_join.yml000066400000000000000000000052331451700765000251570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 88b3ab39535c542376342c5f2fbe15fd9002aa56a3c54335755af5a0df4a36d6 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: constants keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '8760' alias_expression: keyword: AS naked_identifier: hours_per_year end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: foo - dot: . - naked_identifier: name - comma: ',' - select_clause_element: expression: - column_reference: - naked_identifier: foo - dot: . - naked_identifier: value - binary_operator: '*' - column_reference: - naked_identifier: constants - dot: . - naked_identifier: hours_per_year alias_expression: keyword: AS naked_identifier: some_value from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: table1 - join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: constants - join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: keyword: AS naked_identifier: foo - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_d.sql000066400000000000000000000001631451700765000232250ustar00rootroot00000000000000SELECT col_a, col_b FROM some_table WHERE col_a IS NOT NULL AND col_b NOT IN (SELECT c FROM another_table) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_d.yml000066400000000000000000000033661451700765000232370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d954262ab58fdf0d7eaec4c6fcec951e8ff789a701dc1acaabe5e7f8ac66d675 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col_a - comma: ',' - select_clause_element: column_reference: naked_identifier: col_b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: col_a - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: AND - column_reference: naked_identifier: col_b - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_e.sql000066400000000000000000000003421451700765000232250ustar00rootroot00000000000000-- Union expressions SELECT col_a as foo FROM some_table UNION SELECT col_b as foo FROM another_table UNION ALL SELECT col_c as foo FROM this_other_table INTERSECT SELECT col_d as foo FROM the_last_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_e.yml000066400000000000000000000047561451700765000232440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d3da742b9a33497266a15d531ad2678a0dcca95800aa45529fcfdfe7ba9e360f file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a alias_expression: keyword: as naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_b alias_expression: keyword: as naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_c alias_expression: keyword: as naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: this_other_table - set_operator: keyword: INTERSECT - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_d alias_expression: keyword: as naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: the_last_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_except.sql000066400000000000000000000000731451700765000242720ustar00rootroot00000000000000SELECT 1 EXCEPT SELECT 2 ; SELECT 1 EXCEPT ALL SELECT 2 ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_except.yml000066400000000000000000000022311451700765000242720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 164b93288c1ad3ddb071071686b3c34e1fe2e93f29f6feaa4c068e458d3701b5 file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: - keyword: EXCEPT - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_f.sql000066400000000000000000000001271451700765000232270ustar00rootroot00000000000000-- Test the parser on complex maths SELECT COS(2*ACOS(-1)*2*y/53) AS c2 FROM t sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_f.yml000066400000000000000000000032311451700765000232300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8007c4b0d5c5943b92df867ae12da9dbba41cae3df98087faa17dcf4a7fb9ca8 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COS bracketed: start_bracket: ( expression: - numeric_literal: '2' - binary_operator: '*' - function: function_name: function_name_identifier: ACOS bracketed: start_bracket: ( expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' end_bracket: ) - binary_operator: '*' - numeric_literal: '2' - binary_operator: '*' - column_reference: naked_identifier: y - binary_operator: / - numeric_literal: '53' end_bracket: ) alias_expression: keyword: AS naked_identifier: c2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_fetch.sql000066400000000000000000000001041451700765000240660ustar00rootroot00000000000000SELECT EMPLOYEE.EMPNO FROM EMPLOYEE FETCH FIRST 3 ROWS ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_fetch.yml000066400000000000000000000017131451700765000240770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8019caf6295be540dd5bef757766ba8f960d1425d6cb0d1db4779b1816c11f2e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: EMPLOYEE - dot: . - naked_identifier: EMPNO from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EMPLOYEE fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '3' - keyword: ROWS - keyword: ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_fn_square_bracket_array_parameter.sql000066400000000000000000000000271451700765000317150ustar00rootroot00000000000000SELECT myfn([1, 2, 3]) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_fn_square_bracket_array_parameter.yml000066400000000000000000000017461451700765000317300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b62cad35e7e521f55894a70b88b5dc1d32b335dd6f98784a28e25585c4287841 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: myfn bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_from_where_overlaps.sql000066400000000000000000000001031451700765000270440ustar00rootroot00000000000000SELECT column_name FROM table_name WHERE period1 OVERLAPS period2; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_from_where_overlaps.yml000066400000000000000000000020301451700765000270470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9cedfe94bf4b39aa6ab3800f1ff478ea2e253d375df4aea21e254523c2923827 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: WHERE expression: column_reference: naked_identifier: period1 overlaps_clause: keyword: OVERLAPS column_reference: naked_identifier: period2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_function_in_group_by.sql000066400000000000000000000001041451700765000272160ustar00rootroot00000000000000SELECT COALESCE(id, 1) FROM some_table GROUP BY COALESCE(id, 1) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_function_in_group_by.yml000066400000000000000000000027711451700765000272340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 231dc2c88d71e4bffab5c38b88e9c477da4ffdb3d3420fbd003423bab59b7aaa file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table groupby_clause: - keyword: GROUP - keyword: BY - expression: function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_g.sql000066400000000000000000000001641451700765000232310ustar00rootroot00000000000000-- More complex select clause without from clause SELECT NULL::INT AS user_id, NULL::INT AS is_paid LIMIT 0 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_g.yml000066400000000000000000000022431451700765000232330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eeb5e85a03436e111c18455cebbb29ce7d010c7edd5e6d81fa7469ff79e359c3 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: null_literal: 'NULL' casting_operator: '::' data_type: data_type_identifier: INT alias_expression: keyword: AS naked_identifier: user_id - comma: ',' - select_clause_element: expression: cast_expression: null_literal: 'NULL' casting_operator: '::' data_type: data_type_identifier: INT alias_expression: keyword: AS naked_identifier: is_paid limit_clause: keyword: LIMIT numeric_literal: '0' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_g_fetch.sql000066400000000000000000000002041451700765000243750ustar00rootroot00000000000000-- More complex select clause without from clause SELECT NULL::INT AS user_id, NULL::INT AS is_paid FETCH FIRST 0 ROWS ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_g_fetch.yml000066400000000000000000000023461451700765000244100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43986e018fd3b9246c7ee47a06aab2694d44e695d5da72dc0ccae734c19b6985 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: null_literal: 'NULL' casting_operator: '::' data_type: data_type_identifier: INT alias_expression: keyword: AS naked_identifier: user_id - comma: ',' - select_clause_element: expression: cast_expression: null_literal: 'NULL' casting_operator: '::' data_type: data_type_identifier: INT alias_expression: keyword: AS naked_identifier: is_paid fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '0' - keyword: ROWS - keyword: ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_h.sql000066400000000000000000000003311451700765000232260ustar00rootroot00000000000000SELECT DATE(zendesk.created_at, 'America/New_York') AS date, COUNT( CASE WHEN zendesk.support_team IN ('tech support', 'taskus', 'onc') THEN 1 END ) AS tech_support FROM zendesksqlfluff-2.3.5/test/fixtures/dialects/ansi/select_h.yml000066400000000000000000000046211451700765000232360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9e5c3e91429466790bf4c4d3c4b401d13c502e5383a2a24e2b9d7bf14a7a95ea file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATE bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: zendesk - dot: . - naked_identifier: created_at - comma: ',' - expression: quoted_literal: "'America/New_York'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: date - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: zendesk - dot: . - naked_identifier: support_team keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'tech support'" - comma: ',' - quoted_literal: "'taskus'" - comma: ',' - quoted_literal: "'onc'" - end_bracket: ) - keyword: THEN - expression: numeric_literal: '1' - keyword: END end_bracket: ) alias_expression: keyword: AS naked_identifier: tech_support from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: zendesk sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_in_multiline_comment.sql000066400000000000000000000001651451700765000272160ustar00rootroot00000000000000with a as ( select * from table_a ), /* select */ b as ( select * from a ) select * from bsqlfluff-2.3.5/test/fixtures/dialects/ansi/select_in_multiline_comment.yml000066400000000000000000000041171451700765000272210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 63dd7e7ed5683b256493aa01bcebb3091cb0058c3c4246230200a506d4b5eecb file: statement: with_compound_statement: - keyword: with - common_table_expression: naked_identifier: a keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: b keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a end_bracket: ) - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_intersect.sql000066400000000000000000000001011451700765000247720ustar00rootroot00000000000000SELECT 1 INTERSECT SELECT 2 ; SELECT 1 INTERSECT ALL SELECT 2 ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_intersect.yml000066400000000000000000000022371451700765000250100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2de94a03cd14dd4823bc2a5043a3418e8994b289c1013ec32aa4df7a832cc65 file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: INTERSECT - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: - keyword: INTERSECT - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_j.sql000066400000000000000000000004211451700765000232300ustar00rootroot00000000000000-- Aliasing without AS -- https://github.com/sqlfluff/sqlfluff/issues/149 SELECT (POW(sd2,2) + POW(sd3,2) + POW(sd4,2) + POW(sd4,2)) w1 FROM dat; -- Another Aliasing without AS SELECT CASE WHEN order_month = max_month THEN 1 ELSE 0 END churnsqlfluff-2.3.5/test/fixtures/dialects/ansi/select_j.yml000066400000000000000000000070311451700765000232360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ffec6e9074ce8a42448d7c3d36e8a74fc167e756673f39e2140078afcc746d79 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - function: function_name: function_name_identifier: POW bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: sd2 - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - binary_operator: + - function: function_name: function_name_identifier: POW bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: sd3 - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - binary_operator: + - function: function_name: function_name_identifier: POW bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: sd4 - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - binary_operator: + - function: function_name: function_name_identifier: POW bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: sd4 - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) end_bracket: ) alias_expression: naked_identifier: w1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dat - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: order_month - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: max_month - keyword: THEN - expression: numeric_literal: '1' - else_clause: keyword: ELSE expression: numeric_literal: '0' - keyword: END alias_expression: naked_identifier: churn sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_l.sql000066400000000000000000000001671451700765000232410ustar00rootroot00000000000000-- Nested scalar query -- https://github.com/sqlfluff/sqlfluff/issues/147 SELECT a FROM dat WHERE c >= (SELECT 1)sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_l.yml000066400000000000000000000024011451700765000232340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d73dada9ab50d1658f9be0779284a5152fe088c3625647d3d44a4ec701730684 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dat where_clause: keyword: WHERE expression: column_reference: naked_identifier: c comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_m.sql000066400000000000000000000005671451700765000232460ustar00rootroot00000000000000-- On clause without brackets -- https://github.com/sqlfluff/sqlfluff/issues/146 SELECT a FROM zendesk LEFT JOIN ticket ON zendesk.ticket_id = ticket.id; SELECT low_user_counts FROM acceptable_buckets JOIN small_buckets ON (business_type = low_business_type) AND (business_type = low_business_type OR size_label = low_size_label);sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_m.yml000066400000000000000000000064231451700765000232450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2eea1fa41db10c8151633f3fd9f63f4bd1c2849ca484e69fc3eefaccdfcd08b2 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: zendesk join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: ticket - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: zendesk - dot: . - naked_identifier: ticket_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: ticket - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: low_user_counts from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: acceptable_buckets join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: small_buckets join_on_condition: keyword: 'ON' expression: - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: business_type - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: low_business_type end_bracket: ) - binary_operator: AND - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: business_type - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: low_business_type - binary_operator: OR - column_reference: naked_identifier: size_label - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: low_size_label end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_many_join.sql000066400000000000000000000001651451700765000247670ustar00rootroot00000000000000SELECT * FROM a as foo JOIN b JOIN c as foobar JOIN d, e as bar JOIN f JOIN g('blah') as tbl_func JOIN h, baz as buzzsqlfluff-2.3.5/test/fixtures/dialects/ansi/select_many_join.yml000066400000000000000000000056401451700765000247740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e793cae121c70c3c4b4f8a603ab769fbcffdc6d8caa624287a1d3566bff724d0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: FROM - from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: a alias_expression: keyword: as naked_identifier: foo - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: b - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: c alias_expression: keyword: as naked_identifier: foobar - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: d - comma: ',' - from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: e alias_expression: keyword: as naked_identifier: bar - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: f - join_clause: keyword: JOIN from_expression_element: table_expression: function: function_name: function_name_identifier: g bracketed: start_bracket: ( expression: quoted_literal: "'blah'" end_bracket: ) alias_expression: keyword: as naked_identifier: tbl_func - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: h - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: baz alias_expression: keyword: as naked_identifier: buzz sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_multiple_named_windows.sql000066400000000000000000000003411451700765000275510ustar00rootroot00000000000000SELECT item, purchases, category, LAST_VALUE(item) OVER (d) AS most_popular FROM Produce WINDOW a AS (PARTITION BY category), b AS (a ORDER BY purchases), c AS (b ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING), d AS (c) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_multiple_named_windows.yml000066400000000000000000000063201451700765000275560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6d6e864ab20eaec92e75a3099486029ce9c0cfba4be61fc49746d85ea2e7e38f file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: d end_bracket: ) alias_expression: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: - keyword: WINDOW - named_window_expression: naked_identifier: a keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: b keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: c keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: b frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: d keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: c end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_n.sql000066400000000000000000000005101451700765000232330ustar00rootroot00000000000000-- Full Join -- https://github.com/sqlfluff/sqlfluff/issues/144 SELECT exists_left.business_type AS business_type_left, exists_right.business_type AS business_type_right FROM benchmark_summaries AS exists_left FULL JOIN business_types AS exists_right ON exists_left.business_type = exists_right.business_typesqlfluff-2.3.5/test/fixtures/dialects/ansi/select_n.yml000066400000000000000000000041601451700765000232420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d7e87d353954c6cf8cb521155d461614e6034ccc99b3ad3297b608fd6bf0c677 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: exists_left - dot: . - naked_identifier: business_type alias_expression: keyword: AS naked_identifier: business_type_left - comma: ',' - select_clause_element: column_reference: - naked_identifier: exists_right - dot: . - naked_identifier: business_type alias_expression: keyword: AS naked_identifier: business_type_right from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: benchmark_summaries alias_expression: keyword: AS naked_identifier: exists_left join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: business_types alias_expression: keyword: AS naked_identifier: exists_right - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: exists_left - dot: . - naked_identifier: business_type - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: exists_right - dot: . - naked_identifier: business_type sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_named_window.sql000066400000000000000000000001071451700765000254530ustar00rootroot00000000000000SELECT AVG(col) OVER(win) FROM table WINDOW win AS (PARTITION BY date) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_named_window.yml000066400000000000000000000031571451700765000254650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b48d50f7b6fc41c925c24c63cee0e745a1543b6431fd0de796ca38e1ced325f1 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: col end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: win end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table named_window: keyword: WINDOW named_window_expression: naked_identifier: win keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: date end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_named_window_no_parentheses.sql000066400000000000000000000002131451700765000305460ustar00rootroot00000000000000SELECT NTH_VALUE(bar, 1) OVER w1 AS baz FROM t WINDOW w1 AS ( PARTITION BY x, y, z ORDER BY abc DESC ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_named_window_no_parentheses.yml000066400000000000000000000041361451700765000305600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 630d7f9e734a4e8b76d8d830ac098842e3fa78c42b6354f3fadae513fd472806 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: NTH_VALUE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: bar - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) over_clause: keyword: OVER naked_identifier: w1 alias_expression: keyword: AS naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t named_window: keyword: WINDOW named_window_expression: naked_identifier: w1 keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - comma: ',' - expression: column_reference: naked_identifier: z orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: abc - keyword: DESC end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_named_window_with_parentheses.sql000066400000000000000000000002611451700765000311100ustar00rootroot00000000000000SELECT NTH_VALUE(bar, 1) OVER(w1) AS baz, NTH_VALUE(bar, 1) OVER() AS foo FROM t WINDOW w1 AS ( PARTITION BY x, y, z ORDER BY abc DESC ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_named_window_with_parentheses.yml000066400000000000000000000055471451700765000311260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e8e8673b1c2cfd9a8d10aed2e4584b1e967d309e6c23d23c14014e6e90ada267 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: bar - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: w1 end_bracket: ) alias_expression: keyword: AS naked_identifier: baz - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: bar - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( end_bracket: ) alias_expression: keyword: AS naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t named_window: keyword: WINDOW named_window_expression: naked_identifier: w1 keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - comma: ',' - expression: column_reference: naked_identifier: z orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: abc - keyword: DESC end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_named_windows_each_window_specification.sql000066400000000000000000000003201451700765000331020ustar00rootroot00000000000000SELECT item, purchases, category, LAST_VALUE(item) OVER (c) AS most_popular FROM Produce WINDOW a AS (PARTITION BY category), b AS (ORDER BY purchases), c AS (ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_named_windows_each_window_specification.yml000066400000000000000000000056231451700765000331170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b35baee4afa7aea4adf039229968c415ba614894fec3e02a001aa1ecfcddf7cb file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: c end_bracket: ) alias_expression: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: - keyword: WINDOW - named_window_expression: naked_identifier: a keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: b keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: c keyword: AS bracketed: start_bracket: ( window_specification: frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_natural_join.sql000066400000000000000000000006171451700765000254730ustar00rootroot00000000000000SELECT * FROM table1 NATURAL JOIN table2; SELECT * FROM table1 NATURAL INNER JOIN table2; SELECT * FROM table1 NATURAL LEFT JOIN table2; SELECT * FROM table1 NATURAL LEFT OUTER JOIN table2; SELECT * FROM table1 NATURAL RIGHT JOIN table2; SELECT * FROM table1 NATURAL RIGHT OUTER JOIN table2; SELECT * FROM table1 NATURAL FULL JOIN table2; SELECT * FROM table1 NATURAL FULL OUTER JOIN table2; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_natural_join.yml000066400000000000000000000133541451700765000254770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 83e3312a773ee3e3dee22bd7341b922507c1aaee51ef41c02c3ea414ef6b8c82 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: NATURAL - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_nested_join.sql000066400000000000000000000036161451700765000253110ustar00rootroot00000000000000-- query with no brackets select orders.order_id AS order_id, customers.email AS email from orders join customers on( ( customers.customer_id = orders.customer_id ) ) where (customers.email = 'sample@gmail.com') group by orders.order_id, customers.email order by orders.order_id; -- nested bracketed up to 1 levels select orders.order_id AS order_id, customers.email AS email from ( orders join customers on( ( customers.customer_id = orders.customer_id ) ) ) where (customers.email = 'sample@gmail.com') group by orders.order_id, customers.email order by orders.order_id; -- nested bracketed up to 2 levels select orders.order_id AS order_id, customers.email AS email from ( ( orders join customers on( ( customers.customer_id = orders.customer_id ) ) ) join products on( (products.product_id = orders.product_id) ) ) where (customers.email = 'sample@gmail.com') group by orders.order_id, customers.email order by orders.order_id; -- nested bracketed up to 3 levels select orders.order_id AS order_id, customers.email AS email from ( ( ( orders join customers on( ( customers.customer_id = orders.customer_id ) ) ) join products on( (products.product_id = orders.product_id) ) ) join random on( (random.product_id = products.product_id) ) ) where (customers.email = 'sample@gmail.com') group by orders.order_id, customers.email order by orders.order_id;sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_nested_join.yml000066400000000000000000000356141451700765000253160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54f9a4ef99ecbd8dbf774f944026394cf359e85da31d734032e587af7c720353 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id alias_expression: keyword: AS naked_identifier: order_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email alias_expression: keyword: AS naked_identifier: email from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: customers join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id end_bracket: ) end_bracket: ) where_clause: keyword: where bracketed: start_bracket: ( expression: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sample@gmail.com'" end_bracket: ) groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - comma: ',' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: email orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id alias_expression: keyword: AS naked_identifier: order_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email alias_expression: keyword: AS naked_identifier: email from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: naked_identifier: orders join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: customers join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id end_bracket: ) end_bracket: ) end_bracket: ) where_clause: keyword: where bracketed: start_bracket: ( expression: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sample@gmail.com'" end_bracket: ) groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - comma: ',' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: email orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id alias_expression: keyword: AS naked_identifier: order_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email alias_expression: keyword: AS naked_identifier: email from_clause: keyword: from from_expression: bracketed: start_bracket: ( bracketed: start_bracket: ( from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: customers join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id end_bracket: ) end_bracket: ) end_bracket: ) join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: products join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: products - dot: . - naked_identifier: product_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: product_id end_bracket: ) end_bracket: ) end_bracket: ) where_clause: keyword: where bracketed: start_bracket: ( expression: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sample@gmail.com'" end_bracket: ) groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - comma: ',' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: email orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id alias_expression: keyword: AS naked_identifier: order_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email alias_expression: keyword: AS naked_identifier: email from_clause: keyword: from from_expression: bracketed: start_bracket: ( bracketed: start_bracket: ( from_expression: bracketed: start_bracket: ( from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: customers join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id end_bracket: ) end_bracket: ) end_bracket: ) join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: products join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: products - dot: . - naked_identifier: product_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: product_id end_bracket: ) end_bracket: ) end_bracket: ) join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: random join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: random - dot: . - naked_identifier: product_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: products - dot: . - naked_identifier: product_id end_bracket: ) end_bracket: ) end_bracket: ) where_clause: keyword: where bracketed: start_bracket: ( expression: column_reference: - naked_identifier: customers - dot: . - naked_identifier: email comparison_operator: raw_comparison_operator: '=' quoted_literal: "'sample@gmail.com'" end_bracket: ) groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - comma: ',' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: email orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_id - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_numeric_literal_exponential_format.sql000066400000000000000000000001761451700765000321420ustar00rootroot00000000000000SELECT 1e-9, -12.345e12, .0123E-6, 25, +6.34, 0.5, 25e-03, -1, 25, -col1, -+-col2 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_numeric_literal_exponential_format.yml000066400000000000000000000034011451700765000321360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e84987da703b2565d55268ce089edd8ec71b9e94963f4e7216f0c82dfe3d7107 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: 1e-9 - comma: ',' - select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: 12.345e12 - comma: ',' - select_clause_element: numeric_literal: '.0123E-6' - comma: ',' - select_clause_element: numeric_literal: '25' - comma: ',' - select_clause_element: numeric_literal: sign_indicator: + numeric_literal: '6.34' - comma: ',' - select_clause_element: numeric_literal: '0.5' - comma: ',' - select_clause_element: numeric_literal: 25e-03 - comma: ',' - select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '25' - comma: ',' - select_clause_element: expression: sign_indicator: '-' column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: expression: - sign_indicator: '-' - sign_indicator: + - sign_indicator: '-' - column_reference: naked_identifier: col2 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_o.sql000066400000000000000000000006311451700765000232400ustar00rootroot00000000000000-- Between and Not Between -- https://github.com/sqlfluff/sqlfluff/issues/142 -- https://github.com/sqlfluff/sqlfluff/issues/478 -- https://github.com/sqlfluff/sqlfluff/issues/2845 SELECT business_type FROM benchmark_summaries WHERE avg_click_rate NOT BETWEEN 0 and 1 + 1 + some_value AND some_other_thing BETWEEN 0 - 1 * another_value and 1 AND another_thing BETWEEN -another_value and 0 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_o.yml000066400000000000000000000035431451700765000232470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 27f7510e3029d7c72b72d88565fe738581add837efc8e0a16900e17670aec6f3 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: business_type from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: benchmark_summaries where_clause: keyword: WHERE expression: - column_reference: naked_identifier: avg_click_rate - keyword: NOT - keyword: BETWEEN - numeric_literal: '0' - keyword: and - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' - binary_operator: + - column_reference: naked_identifier: some_value - binary_operator: AND - column_reference: naked_identifier: some_other_thing - keyword: BETWEEN - numeric_literal: '0' - binary_operator: '-' - numeric_literal: '1' - binary_operator: '*' - column_reference: naked_identifier: another_value - keyword: and - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: another_thing - keyword: BETWEEN - sign_indicator: '-' - column_reference: naked_identifier: another_value - keyword: and - numeric_literal: '0' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_order_fetch.sql000066400000000000000000000001351451700765000252650ustar00rootroot00000000000000SELECT EMPLOYEE.EMPNO FROM EMPLOYEE ORDER BY SALARY DESC FETCH FIRST 3 ROWS ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_order_fetch.yml000066400000000000000000000021371451700765000252730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2f9ffd79819b655896c0464ea8f489882038a7ca53e79331662bb85d9a830cd file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: EMPLOYEE - dot: . - naked_identifier: EMPNO from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EMPLOYEE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: SALARY - keyword: DESC fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '3' - keyword: ROWS - keyword: ONLY sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_overlaps.sql000066400000000000000000000001361451700765000246350ustar00rootroot00000000000000SELECT (DATE '2001-02-16', DATE '2001-12-21') OVERLAPS (DATE '2001-10-30', DATE '2002-10-30');sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_overlaps.yml000066400000000000000000000021361451700765000246410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2612d1a58bb48dbe2540541e2caf23f7e80ddba8c415a4ba5c4c132e24592f0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: - start_bracket: ( - keyword: DATE - date_constructor_literal: "'2001-02-16'" - comma: ',' - keyword: DATE - date_constructor_literal: "'2001-12-21'" - end_bracket: ) overlaps_clause: keyword: OVERLAPS bracketed: - start_bracket: ( - keyword: DATE - date_constructor_literal: "'2001-10-30'" - comma: ',' - keyword: DATE - date_constructor_literal: "'2002-10-30'" - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_p.sql000066400000000000000000000003471451700765000232450ustar00rootroot00000000000000-- Case and Extract Expressions -- https://github.com/sqlfluff/sqlfluff/issues/143 SELECT CAST(25.65 AS int), SAFE_CAST(NULL AS STRING) AS age_label, EXTRACT(day FROM end_time) AS day FROM benchmark_with_performancesqlfluff-2.3.5/test/fixtures/dialects/ansi/select_p.yml000066400000000000000000000037441451700765000232530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 52a429dfcb1863cb70cd11d53dae4b271d435e7e754faf00e6c75858ecfe635c file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: numeric_literal: '25.65' keyword: AS data_type: data_type_identifier: int end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SAFE_CAST bracketed: start_bracket: ( expression: null_literal: 'NULL' keyword: AS data_type: data_type_identifier: STRING end_bracket: ) alias_expression: keyword: AS naked_identifier: age_label - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: day keyword: FROM expression: column_reference: naked_identifier: end_time end_bracket: ) alias_expression: keyword: AS naked_identifier: day from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: benchmark_with_performance sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_q.sql000066400000000000000000000003571451700765000232470ustar00rootroot00000000000000-- Unexpected Join Fail -- https://github.com/sqlfluff/sqlfluff/issues/163 SELECT data.id FROM data JOIN data_max ON data.event_id = data_max.event_id LEFT JOIN "other_db"."other_data" AS od ON od.fid = data_max.fidsqlfluff-2.3.5/test/fixtures/dialects/ansi/select_q.yml000066400000000000000000000044341451700765000232510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 191b67b4a58bf430fb73e862f208c904da17506b99b96cebed432d2dabbfb1f1 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: data - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: data - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: data_max join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: data - dot: . - naked_identifier: event_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: data_max - dot: . - naked_identifier: event_id - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: - quoted_identifier: '"other_db"' - dot: . - quoted_identifier: '"other_data"' alias_expression: keyword: AS naked_identifier: od - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: od - dot: . - naked_identifier: fid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: data_max - dot: . - naked_identifier: fid sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_r.sql000066400000000000000000000003471451700765000232470ustar00rootroot00000000000000-- Union All in a With -- https://github.com/sqlfluff/sqlfluff/issues/162 WITH result AS ( SELECT customer FROM sales_eu AS s UNION ALL SELECT customer FROM sales_us AS s2 ) SELECT * FROM resultsqlfluff-2.3.5/test/fixtures/dialects/ansi/select_r.yml000066400000000000000000000044111451700765000232450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5c18ab0f9d16ff117431a97e8a940fe2372692deac372efdb9d6b6b190e6ea72 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: result keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: customer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales_eu alias_expression: keyword: AS naked_identifier: s - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: customer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales_us alias_expression: keyword: AS naked_identifier: s2 end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: result sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_right.sql000066400000000000000000000001331451700765000241140ustar00rootroot00000000000000SELECT column_name FROM table1 RIGHT JOIN table2 ON table1.column_name = table2.column_namesqlfluff-2.3.5/test/fixtures/dialects/ansi/select_right.yml000066400000000000000000000026741451700765000241320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b3fc1e70bdaf4d47492cf7f44f0786aff1532c440338f40f26d7b9611b3e63d0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: column_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: column_name sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_s.sql000066400000000000000000000006411451700765000232450ustar00rootroot00000000000000-- Array notation (BigQuery and Postgres) -- https://github.com/sqlfluff/sqlfluff/issues/59 SELECT user_id, list_id, (count_18_24 * bits[OFFSET(0)] + count_25_34 * bits[OFFSET(1)] + count_35_44 * bits[OFFSET(2)] + count_45_54 * bits[OFFSET(3)] + count_55_64 * bits[OFFSET(4)] + count_65_plus * bits[OFFSET(5)]) / audience_size AS relative_abundance FROM gcp_project.dataset.audience_counts_gender_age sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_s.yml000066400000000000000000000127161451700765000232550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 528835729e7c059d03d13e3c3aacd0887f9390c572fa9e8f764cedb66972a37c file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: user_id - comma: ',' - select_clause_element: column_reference: naked_identifier: list_id - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: count_18_24 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_25_34 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_35_44 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_45_54 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_55_64 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_65_plus - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) end_square_bracket: ']' end_bracket: ) binary_operator: / column_reference: naked_identifier: audience_size alias_expression: keyword: AS naked_identifier: relative_abundance from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: gcp_project - dot: . - naked_identifier: dataset - dot: . - naked_identifier: audience_counts_gender_age sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_bernoulli_10.sql000066400000000000000000000000551451700765000266360ustar00rootroot00000000000000SELECT * FROM foo TABLESAMPLE BERNOULLI (10) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_bernoulli_10.yml000066400000000000000000000017731451700765000266500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f43ddfc65bea81777b3409c1dfbc58879c76dd5c10217ac854b6315fd23fa6e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_bernoulli_10_aliased.sql000066400000000000000000000000721451700765000303170ustar00rootroot00000000000000SELECT f.colname FROM foo AS f TABLESAMPLE BERNOULLI (10) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_bernoulli_10_aliased.yml000066400000000000000000000021621451700765000303230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1c54f754a3d6727cc3a9a3757f484858b4557f276c35b2f230ae96ee56fdfcde file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: f - dot: . - naked_identifier: colname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo alias_expression: keyword: AS naked_identifier: f sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_bernoulli_10_repeatable.sql000066400000000000000000000000761451700765000310250ustar00rootroot00000000000000SELECT * FROM foo TABLESAMPLE BERNOULLI (10) REPEATABLE (100) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_bernoulli_10_repeatable.yml000066400000000000000000000022351451700765000310260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 91865cea4612ace8ed9777150a6122d2cf5c307fa5a15b4a8e55048c16b7d74d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: REPEATABLE - bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_bernoulli_order_limit.sql000066400000000000000000000001161451700765000307250ustar00rootroot00000000000000SELECT col1, col2 FROM foo TABLESAMPLE BERNOULLI (10) ORDER BY col1 LIMIT 100 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_bernoulli_order_limit.yml000066400000000000000000000024311451700765000307310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f21525a5906c37fc87da1b4790b90701a283c29277b569a3f542beaa477d661e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 limit_clause: keyword: LIMIT numeric_literal: '100' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_system_10.sql000066400000000000000000000000521451700765000261640ustar00rootroot00000000000000SELECT * FROM foo TABLESAMPLE SYSTEM (10) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_system_10.yml000066400000000000000000000017701451700765000261760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7478fb95270d15dd89cfcbed39cfc04de7a90854b40acdc7d240910dbf0d322d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_system_10_aliased.sql000066400000000000000000000000671451700765000276540ustar00rootroot00000000000000SELECT f.colname FROM foo AS f TABLESAMPLE SYSTEM (10) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_system_10_aliased.yml000066400000000000000000000021571451700765000276600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f70b2c953321552da2425ae6861cb3f0c997202b89778bbc53f17259ce333fb file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: f - dot: . - naked_identifier: colname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo alias_expression: keyword: AS naked_identifier: f sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_system_10_repeatable.sql000066400000000000000000000000731451700765000303530ustar00rootroot00000000000000SELECT * FROM foo TABLESAMPLE SYSTEM (10) REPEATABLE (100) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_sample_system_10_repeatable.yml000066400000000000000000000022321451700765000303540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7da96fb9cdc292e207aaf5fb2ca24010332d4aae5187fb19abb6e55dc4b7ed97 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: REPEATABLE - bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_a.sql000066400000000000000000000000101451700765000245620ustar00rootroot00000000000000select 1sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_a.yml000066400000000000000000000010301451700765000245670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2d5b2a76d63773e6faa9f682b2b616e05a1cead7f3ae1232e86cbcf87e7c1ce1 file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_b.sql000066400000000000000000000000221451700765000245660ustar00rootroot00000000000000select * from blahsqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_b.yml000066400000000000000000000014321451700765000245760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 322fb1e6da9414c65b760e1c0d87ea0b579ba73cdbca3365afa79b1eba267475 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: blah sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_c.sql000066400000000000000000000000261451700765000245730ustar00rootroot00000000000000select * from foo, barsqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_c.yml000066400000000000000000000017131451700765000246010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4173e890171054d33cb16b159a27866bc73734bedeea1605b2023aab0ce13fea file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_d.sql000066400000000000000000000000371451700765000245760ustar00rootroot00000000000000 select 12 -- ends with commentsqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_d.yml000066400000000000000000000010311451700765000245730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b45317c5e006ec28acb8a50d078c16d3036b90f0e8c266793d85ee615368f21e file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '12' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_e.sql000066400000000000000000000001361451700765000245770ustar00rootroot00000000000000SELECT my_var::date as casted_variable, 123::bigint as another_casted_number FROM boo sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_e.yml000066400000000000000000000025451451700765000246070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b1e92fc48188489244018689fb653d596842ae99efe4798bbdb7c64c606e1e8e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: my_var casting_operator: '::' data_type: data_type_identifier: date alias_expression: keyword: as naked_identifier: casted_variable - comma: ',' - select_clause_element: expression: cast_expression: numeric_literal: '123' casting_operator: '::' data_type: data_type_identifier: bigint alias_expression: keyword: as naked_identifier: another_casted_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_f.sql000066400000000000000000000001401451700765000245730ustar00rootroot00000000000000-- test some more advanced constructs SELECT a, b FROM boo GROUP BY 1 ORDER BY b, 1 LIMIT 5 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_f.yml000066400000000000000000000022721451700765000246050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dfaa42a3cdec07d3ade8545214ad52704e0ff48d62b8ae1d85f48dece573cc9b file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: b - comma: ',' - numeric_literal: '1' limit_clause: keyword: LIMIT numeric_literal: '5' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_g.sql000066400000000000000000000001251451700765000245770ustar00rootroot00000000000000-- Having Clause SELECT id FROM test WHERE id >= 4 GROUP BY id HAVING id < 5 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_g.yml000066400000000000000000000025501451700765000246050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 046e4b32715f69c01155d2d5909f6322d3ed4ec353ef7cff62f341c194733540 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '4' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id having_clause: keyword: HAVING expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: < numeric_literal: '5' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_h.sql000066400000000000000000000002211451700765000245750ustar00rootroot00000000000000-- test window functions in functions with casting SELECT DATEADD(DAY, ROW_NUMBER() OVER (ORDER BY DateCD ASC), '2014-01-01') AS dt FROM boo sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_h.yml000066400000000000000000000034521451700765000246100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f56963da3a216de70b934ee167d8db50a006582417d4a0767b305ed3f3d02a2 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATEADD bracketed: - start_bracket: ( - date_part: DAY - comma: ',' - expression: function: function_name: function_name_identifier: ROW_NUMBER bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: DateCD - keyword: ASC end_bracket: ) - comma: ',' - expression: quoted_literal: "'2014-01-01'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: dt from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: boo sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_i.sql000066400000000000000000000006751451700765000246130ustar00rootroot00000000000000-- test aliasing SELECT raw_column, raw_sch.raw_col, simple_explicit as aliased_column_1, simple_implicit aliased_column_2, an_unaliased + calculation, -- We know that the following one doesn't parse... -- an_implicitly + aliased calculation, an_explicitly - aliased as calculation, 'an unalised string', 123.6, -786 as aliased_column3 FROM unaliased JOIN aliased_1 as foo JOIN aliased_2 bar USING(b) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_i.yml000066400000000000000000000060401451700765000246050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 439777acce2a03abf4e6d38e91bcb975783d146ca114877e07030c501bf8ce2f file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: raw_column - comma: ',' - select_clause_element: column_reference: - naked_identifier: raw_sch - dot: . - naked_identifier: raw_col - comma: ',' - select_clause_element: column_reference: naked_identifier: simple_explicit alias_expression: keyword: as naked_identifier: aliased_column_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: simple_implicit alias_expression: naked_identifier: aliased_column_2 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: an_unaliased - binary_operator: + - column_reference: naked_identifier: calculation - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: an_explicitly - binary_operator: '-' - column_reference: naked_identifier: aliased alias_expression: keyword: as naked_identifier: calculation - comma: ',' - select_clause_element: quoted_literal: "'an unalised string'" - comma: ',' - select_clause_element: numeric_literal: '123.6' - comma: ',' - select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '786' alias_expression: keyword: as naked_identifier: aliased_column3 from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: unaliased - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: aliased_1 alias_expression: keyword: as naked_identifier: foo - join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: aliased_2 alias_expression: naked_identifier: bar - keyword: USING - bracketed: start_bracket: ( naked_identifier: b end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_j.sql000066400000000000000000000002721451700765000246050ustar00rootroot00000000000000-- test parsing of cross join and outer join SELECT count_correctly_substituted FROM correctly_substituted CROSS JOIN needs_substitution LEFT OUTER JOIN some_other_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_simple_j.yml000066400000000000000000000024231451700765000246070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cda21556c8c2d93b02b3817f5e0eb3616d69740b706d026b8f7f6fb2a4477da5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: count_correctly_substituted from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: correctly_substituted - join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: needs_substitution - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: some_other_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_t.sql000066400000000000000000000004301451700765000232420ustar00rootroot00000000000000SELECT * FROM TABLE_1 FULL OUTER JOIN -- comment1 ( SELECT * FROM Table_B WHERE COL_2 = 'B' UNION ALL SELECT * FROM TABLE_C WHERE COL_1 = 0 ) ON TABLE_1.A = TABLE_2.A sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_t.yml000066400000000000000000000070171451700765000232540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad320f80888d19c93bf2a8e5dadcd51ae14bd9089f4161ea96f6718bf5a0e6cb file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABLE_1 join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Table_B where_clause: keyword: WHERE expression: column_reference: naked_identifier: COL_2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'B'" - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TABLE_C where_clause: keyword: WHERE expression: column_reference: naked_identifier: COL_1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: TABLE_1 - dot: . - naked_identifier: A - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: TABLE_2 - dot: . - naked_identifier: A sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_table_named_group.sql000066400000000000000000000000531451700765000264470ustar00rootroot00000000000000select 1 from group; select 1 from groups; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_table_named_group.yml000066400000000000000000000021571451700765000264600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a290676e6fee30d9f4fb94419484ce3a2c078833f421d3db14f26e776593a5d4 file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: group - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: groups - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_trailing_comma_column_list.sql000066400000000000000000000000431451700765000303740ustar00rootroot00000000000000SELECT user_id, FROM table sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_trailing_comma_column_list.yml000066400000000000000000000014301451700765000303770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 000c55713505ad198a4c74a474808e536cdc1ae9010c0ba276ac0a524f82d63c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_true_and_not_false.sql000066400000000000000000000002351451700765000266350ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/874 SELECT TRUE AND NOT FALSE; SELECT TRUE; SELECT TRUE AND FALSE; SELECT NOT TRUE; SELECT NOT TRUE AND FALSE;sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_true_and_not_false.yml000066400000000000000000000031361451700765000266420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1eab200cd987526c057e1c1be42cf2bfdba7609dadf9e6b155fbd91b01860942 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'TRUE' - binary_operator: AND - keyword: NOT - boolean_literal: 'FALSE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: boolean_literal: 'TRUE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'TRUE' - binary_operator: AND - boolean_literal: 'FALSE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: keyword: NOT boolean_literal: 'TRUE' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: NOT - boolean_literal: 'TRUE' - binary_operator: AND - boolean_literal: 'FALSE' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_u.sql000066400000000000000000000000661451700765000232500ustar00rootroot00000000000000select substring(a from 'abc') as b from my_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_u.yml000066400000000000000000000022161451700765000232510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 52df4200de8c7ed290a1b2fd819ff42afa28560198fc95213930c6a3e680919f file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: substring bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - keyword: from - expression: quoted_literal: "'abc'" - end_bracket: ) alias_expression: keyword: as naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_union.sql000066400000000000000000000000341451700765000241270ustar00rootroot00000000000000SELECT 0 UNION SELECT 1 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_union.yml000066400000000000000000000013541451700765000241370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 763e7cd57e8af9de1c189ad32fe63f08dae1344a36e08ba4166fc9b61b549427 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_union_all.sql000066400000000000000000000000401451700765000247540ustar00rootroot00000000000000SELECT 0 UNION ALL SELECT 1 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_union_all.yml000066400000000000000000000014011451700765000247600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d8c79f2b1ea2ff055e8214c02fedae7d34c5d1a726a51fdfd37a7a571cc182d4 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_union_bracketed.sql000066400000000000000000000000771451700765000261420ustar00rootroot00000000000000(SELECT 0) UNION (SELECT 1); ((SELECT 0)) UNION ((SELECT 1)); sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_union_bracketed.yml000066400000000000000000000031201451700765000261340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab415fa5d23ce0bdcac9d878f877f9bf64ec2975194120cc86daf2d250457660 file: - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' end_bracket: ) - set_operator: keyword: UNION - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' end_bracket: ) end_bracket: ) - set_operator: keyword: UNION - bracketed: start_bracket: ( bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_union_distinct.sql000066400000000000000000000000451451700765000260320ustar00rootroot00000000000000SELECT 0 UNION DISTINCT SELECT 1 sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_union_distinct.yml000066400000000000000000000014061451700765000260360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 795fef86836ffa436be22add911aa3cbeeb458ef7b309514ce28639f926f0ef0 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' - set_operator: - keyword: UNION - keyword: DISTINCT - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_v.sql000066400000000000000000000002101451700765000232400ustar00rootroot00000000000000-- Test Nested WITH WITH counter AS ( WITH ladder AS ( SELECT 1 ) SELECT * FROM ladder ORDER BY 1 ) SELECT * FROM counter sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_v.yml000066400000000000000000000040621451700765000232530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 80d0bbf478f03f9f206c631fc99854a4f644a68936073e9e5744ecee82394cec file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: counter keyword: AS bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: ladder keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ladder orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_where_in_unnest.sql000066400000000000000000000000711451700765000261740ustar00rootroot00000000000000SELECT user_id FROM t WHERE 1 IN UNNEST(t.c) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_where_in_unnest.yml000066400000000000000000000023071451700765000262020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2f7c7fbea97f596f00fbfad9866143b45ed3da0c51215624d8aaab358f0dde42 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: numeric_literal: '1' keyword: IN function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: c end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_a.sql000066400000000000000000000000621451700765000242530ustar00rootroot00000000000000WITH cte as (select a from tbla) select a from ctesqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_a.yml000066400000000000000000000026251451700765000242640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1298ca21b1fa3bc9eedfd815d67ba202ec5342bbc767d995aac381df569fb4a9 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbla end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_b.sql000066400000000000000000000000761451700765000242610ustar00rootroot00000000000000WITH blah AS (select x,y,z FROM foo) select z, y, x from blah;sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_b.yml000066400000000000000000000036501451700765000242640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d30f0d1b4b6f309151e12d5aa38d0a47c9cd50518fa65e4f4634e17c573ad731 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: blah keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: column_reference: naked_identifier: z from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: z - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: blah statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_brackets.sql000066400000000000000000000001721451700765000256330ustar00rootroot00000000000000select * from (my_table); select * from (my_table tt); select * from ((my_table tt)); select * from (((my_table tt))); sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_brackets.yml000066400000000000000000000057441451700765000256470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70c5e3e63b30f283c89a64ba05a0e4dde05e2ce93552e936260796a173afbe23 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: naked_identifier: tt end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( bracketed: start_bracket: ( from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: naked_identifier: tt end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( bracketed: start_bracket: ( from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: naked_identifier: tt end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_limit_and_offset.sql000066400000000000000000000000501451700765000273360ustar00rootroot00000000000000SELECT * FROM counter LIMIT 10 OFFSET 10sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_limit_and_offset.yml000066400000000000000000000016341451700765000273510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e87a966441c7e2091e72dcbfec0b7999c9a79b610855befa30fd945e874126bd file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter limit_clause: - keyword: LIMIT - numeric_literal: '10' - keyword: OFFSET - numeric_literal: '10' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_offset_limit.sql000066400000000000000000000000421451700765000265150ustar00rootroot00000000000000SELECT * FROM counter LIMIT 10, 10sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_offset_limit.yml000066400000000000000000000016271451700765000265310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bc4f9329c02c258f49e16d548f61579b65ad37eb64b34da9b1ca062aef44b5ff file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter limit_clause: - keyword: LIMIT - numeric_literal: '10' - comma: ',' - numeric_literal: '10' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_recursive.sql000066400000000000000000000001411451700765000260400ustar00rootroot00000000000000WITH RECURSIVE cte(a) AS (SELECT 1 UNION ALL SELECT a+1 FROM cte WHERE a < 5) SELECT a FROM cte; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_recursive.yml000066400000000000000000000044551451700765000260560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f38848372150f3c627659cbd7c834658cfb3b43acd04f3bde9143764e21d5944 file: statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: naked_identifier: cte cte_column_list: bracketed: start_bracket: ( identifier_list: naked_identifier: a end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: a binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: < numeric_literal: '5' end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_simple_limit.sql000066400000000000000000000000361451700765000265230ustar00rootroot00000000000000SELECT * FROM counter LIMIT 10sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_simple_limit.yml000066400000000000000000000015461451700765000265340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5003ef6c80948fdbb87c1249b0f4265d93037606edf5722d991576dcd822075 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: counter limit_clause: keyword: LIMIT numeric_literal: '10' sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_where_clause_functions.sql000066400000000000000000000001441451700765000305720ustar00rootroot00000000000000select t.column1 from sch.table1 as t where t.b_year in (year(getdate()) , year(getdate()) - 1); sqlfluff-2.3.5/test/fixtures/dialects/ansi/select_with_where_clause_functions.yml000066400000000000000000000043131451700765000305760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7455cf4e932a1f45055e2b98d219cf6227ef6e37023639a31acd3ba0422a716a file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: column1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: table1 alias_expression: keyword: as naked_identifier: t where_clause: keyword: where expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: b_year keyword: in bracketed: - start_bracket: ( - function: function_name: function_name_identifier: year bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: getdate bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: year bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: getdate bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - binary_operator: '-' - numeric_literal: '1' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/semicolon_delimiters.sql000066400000000000000000000002051451700765000256510ustar00rootroot00000000000000-- It's possible to have multiple semicolons between statements. SELECT foo FROM bar;; SELECT foo FROM bar; ; ; SELECT foo FROM bar; sqlfluff-2.3.5/test/fixtures/dialects/ansi/semicolon_delimiters.yml000066400000000000000000000032051451700765000256560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0726a750be417f4364e1de4f4378a1d0c23c0ab9b1c6c524bc6d83f5dafd80b8 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/set_order_by.sql000066400000000000000000000001441451700765000241220ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/852 SELECT 1 AS a UNION ALL SELECT 1 AS a ORDER BY a sqlfluff-2.3.5/test/fixtures/dialects/ansi/set_order_by.yml000066400000000000000000000020561451700765000241300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12d9bcc62e4629e9d202ce68a3298de14bbc67dc4c61e75e5a03a8c07d23b13b file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: a - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: a - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a sqlfluff-2.3.5/test/fixtures/dialects/ansi/set_order_by_complex.sql000066400000000000000000000003331451700765000256510ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/852 -- ORDER BY and LIMIT are allowed when bracketed. Otherwise not. (SELECT * FROM a ORDER BY 1 LIMIT 1) UNION ALL (SELECT * FROM b ORDER BY 1 LIMIT 1) ORDER BY 1 LIMIT 1 sqlfluff-2.3.5/test/fixtures/dialects/ansi/set_order_by_complex.yml000066400000000000000000000040071451700765000256550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f7aadd671fed70160b3eda465490c09c32a77fe77145b3a8bd37bcfbf532cd0 file: statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' limit_clause: keyword: LIMIT numeric_literal: '1' end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' limit_clause: keyword: LIMIT numeric_literal: '1' end_bracket: ) - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - limit_clause: keyword: LIMIT numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/ansi/set_schema_a.sql000066400000000000000000000000241451700765000240520ustar00rootroot00000000000000set schema my_schemasqlfluff-2.3.5/test/fixtures/dialects/ansi/set_schema_a.yml000066400000000000000000000010261451700765000240570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f948d86ae8ddb413b44c667a3dee6096cf7db4569f3ab72d981b230bf87d3a2 file: statement: set_schema_statement: - keyword: set - keyword: schema - schema_reference: naked_identifier: my_schema sqlfluff-2.3.5/test/fixtures/dialects/ansi/shorthand_cast.sql000066400000000000000000000001031451700765000244410ustar00rootroot00000000000000select '1' :: INT as id1, '2'::int as id2 from table_asqlfluff-2.3.5/test/fixtures/dialects/ansi/shorthand_cast.yml000066400000000000000000000024411451700765000244520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bc226d85f73ff6d64c06d81582ca627fcfd21b3810d6da4013d89d1a7b0676c9 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: cast_expression: quoted_literal: "'1'" casting_operator: '::' data_type: data_type_identifier: INT alias_expression: keyword: as naked_identifier: id1 - comma: ',' - select_clause_element: expression: cast_expression: quoted_literal: "'2'" casting_operator: '::' data_type: data_type_identifier: int alias_expression: keyword: as naked_identifier: id2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a sqlfluff-2.3.5/test/fixtures/dialects/ansi/table_expression.sql000066400000000000000000000003071451700765000250110ustar00rootroot00000000000000SELECT y AS woy FROM UNNEST(GENERATE_ARRAY(1, 53)) AS y; SELECT id, name FROM UNNEST([1, 2, 3]) id WITH OFFSET pos1, UNNEST(['a', 'b', 'c']) name WITH OFFSET pos2 WHERE pos1 = pos2; sqlfluff-2.3.5/test/fixtures/dialects/ansi/table_expression.yml000066400000000000000000000074431451700765000250230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cef3c0ce8f899b9aaf73447c99616ef7b20cee593d5599461084a48820e0b34a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: y alias_expression: keyword: AS naked_identifier: woy from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GENERATE_ARRAY bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '53' - end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: y - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: - keyword: FROM - from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) - alias_expression: naked_identifier: id - keyword: WITH - keyword: OFFSET - alias_expression: naked_identifier: pos1 - comma: ',' - from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' end_bracket: ) - alias_expression: naked_identifier: name - keyword: WITH - keyword: OFFSET - alias_expression: naked_identifier: pos2 where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pos1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: pos2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/transactions.sql000066400000000000000000000001111451700765000241440ustar00rootroot00000000000000BEGIN TRANSACTION; UPDATE tbl SET foo = 1 WHERE bar = 2; END TRANSACTION;sqlfluff-2.3.5/test/fixtures/dialects/ansi/transactions.yml000066400000000000000000000022751451700765000241630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ae293181f9a25549b65004c3536b6f0101395df493dfb722b17f9bccf0a3480 file: - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: tbl set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' where_clause: keyword: WHERE expression: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: - keyword: END - keyword: TRANSACTION - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/trim_functions.sql000066400000000000000000000005261451700765000245110ustar00rootroot00000000000000SELECT trim(' SparkSQL '); SELECT trim(BOTH FROM ' SparkSQL '); SELECT trim(LEADING FROM ' SparkSQL '); SELECT trim(TRAILING FROM ' SparkSQL '); SELECT trim('SL' FROM 'SSparkSQLS'); SELECT trim(BOTH 'SL' FROM 'SSparkSQLS'); SELECT trim(LEADING 'SL' FROM 'SSparkSQLS'); SELECT trim(TRAILING 'SL' FROM 'SSparkSQLS'); sqlfluff-2.3.5/test/fixtures/dialects/ansi/trim_functions.yml000066400000000000000000000100441451700765000245070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 07214dfc654a54fa01f59d32c129ad3cf5a614556fd09deff2f76324c00a34ca file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim bracketed: start_bracket: ( expression: quoted_literal: "' SparkSQL '" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim bracketed: - start_bracket: ( - keyword: BOTH - keyword: FROM - expression: quoted_literal: "' SparkSQL '" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim bracketed: - start_bracket: ( - keyword: LEADING - keyword: FROM - expression: quoted_literal: "' SparkSQL '" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim bracketed: - start_bracket: ( - keyword: TRAILING - keyword: FROM - expression: quoted_literal: "' SparkSQL '" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim bracketed: - start_bracket: ( - expression: quoted_literal: "'SL'" - keyword: FROM - expression: quoted_literal: "'SSparkSQLS'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim bracketed: - start_bracket: ( - keyword: BOTH - expression: quoted_literal: "'SL'" - keyword: FROM - expression: quoted_literal: "'SSparkSQLS'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim bracketed: - start_bracket: ( - keyword: LEADING - expression: quoted_literal: "'SL'" - keyword: FROM - expression: quoted_literal: "'SSparkSQLS'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: trim bracketed: - start_bracket: ( - keyword: TRAILING - expression: quoted_literal: "'SL'" - keyword: FROM - expression: quoted_literal: "'SSparkSQLS'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/truncate_a.sql000066400000000000000000000000131451700765000235620ustar00rootroot00000000000000truncate a sqlfluff-2.3.5/test/fixtures/dialects/ansi/truncate_a.yml000066400000000000000000000007661451700765000236030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d4f2f7b0b94694925f4ed333c4b00f0cb6d50b58ccbd037a9bbfb8657ba833e file: statement: truncate_table: keyword: truncate table_reference: naked_identifier: a sqlfluff-2.3.5/test/fixtures/dialects/ansi/truncate_table_a.sql000066400000000000000000000000211451700765000247300ustar00rootroot00000000000000truncate table a sqlfluff-2.3.5/test/fixtures/dialects/ansi/truncate_table_a.yml000066400000000000000000000010131451700765000247340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1b1d7fc327f208104f290374b0813024d71f9d97f340558a45007ed81a0c6eef file: statement: truncate_table: - keyword: truncate - keyword: table - table_reference: naked_identifier: a sqlfluff-2.3.5/test/fixtures/dialects/ansi/unaliased_using_subquery.sql000066400000000000000000000001341451700765000265520ustar00rootroot00000000000000SELECT * FROM A_TABLE INNER JOIN ( SELECT margin FROM B_TABLE ) USING (SOME_COLUMN) sqlfluff-2.3.5/test/fixtures/dialects/ansi/unaliased_using_subquery.yml000066400000000000000000000033141451700765000265570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2475b577af1c428903288f5ab85677960322310c8fd1b7a4e48734431c703e68 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A_TABLE join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: margin from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: B_TABLE end_bracket: ) - keyword: USING - bracketed: start_bracket: ( naked_identifier: SOME_COLUMN end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/ansi/update.sql000066400000000000000000000001041451700765000227200ustar00rootroot00000000000000UPDATE table_name SET column1 = value1, column2 = value2 WHERE a=1; sqlfluff-2.3.5/test/fixtures/dialects/ansi/update.yml000066400000000000000000000023701451700765000227310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 97f0f9ca17728f0b84dace4787d5c607fdc080b9e8f0bb2214229bbf290140d3 file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table_name set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: column1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: value1 - comma: ',' - set_clause: - column_reference: naked_identifier: column2 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: value2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/update_set_case.sql000066400000000000000000000002321451700765000245700ustar00rootroot00000000000000UPDATE table1 SET a = CASE WHEN t2.column = 'T' THEN TRUE WHEN t2.column = 'F' THEN FALSE ELSE NULL END FROM table2 t2; sqlfluff-2.3.5/test/fixtures/dialects/ansi/update_set_case.yml000066400000000000000000000041501451700765000245750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8bda546d9e67ac39e4b62470c06e8bd4b711510e0a1aa122c158b2eb78c3b911 file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: column comparison_operator: raw_comparison_operator: '=' quoted_literal: "'T'" - keyword: THEN - expression: boolean_literal: 'TRUE' - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: column comparison_operator: raw_comparison_operator: '=' quoted_literal: "'F'" - keyword: THEN - expression: boolean_literal: 'FALSE' - else_clause: keyword: ELSE expression: null_literal: 'NULL' - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table2 alias_expression: naked_identifier: t2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/update_with_from_clause.sql000066400000000000000000000001731451700765000263400ustar00rootroot00000000000000UPDATE my_table SET my_table.days=other_table.days FROM other_table WHERE my_table.po_number=other_table.po_number sqlfluff-2.3.5/test/fixtures/dialects/ansi/update_with_from_clause.yml000066400000000000000000000027201451700765000263420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c5d4563686f1cda32dd215be7a532e24aa7f86300c59bf96319b1f4247a4419d file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: my_table set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: my_table - dot: . - naked_identifier: days - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: other_table - dot: . - naked_identifier: days from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: my_table - dot: . - naked_identifier: po_number - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: other_table - dot: . - naked_identifier: po_number sqlfluff-2.3.5/test/fixtures/dialects/ansi/update_with_table_alias.sql000066400000000000000000000000631451700765000262770ustar00rootroot00000000000000UPDATE my_table AS tttd SET tttd.days=ttu.days sqlfluff-2.3.5/test/fixtures/dialects/ansi/update_with_table_alias.yml000066400000000000000000000016711451700765000263070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 650dd765ac5565a68d8f0cc226b38a8cb87cb1a6f31ed0a546da4a6eb61b9a7d file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: my_table alias_expression: keyword: AS naked_identifier: tttd set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: tttd - dot: . - naked_identifier: days - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: ttu - dot: . - naked_identifier: days sqlfluff-2.3.5/test/fixtures/dialects/ansi/where_fetch.sql000066400000000000000000000001061451700765000237230ustar00rootroot00000000000000SELECT * FROM Persons WHERE Country='France' FETCH FIRST 5 ROWS ONLY; sqlfluff-2.3.5/test/fixtures/dialects/ansi/where_fetch.yml000066400000000000000000000022601451700765000237300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 551b47f53b17ce9ec37f64ff155f33d78305241d2b2bca2b76718016bbaededd file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Persons where_clause: keyword: WHERE expression: column_reference: naked_identifier: Country comparison_operator: raw_comparison_operator: '=' quoted_literal: "'France'" fetch_clause: - keyword: FETCH - keyword: FIRST - numeric_literal: '5' - keyword: ROWS - keyword: ONLY statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_insert_bracketed_with_statement.sql000066400000000000000000000004731451700765000311310ustar00rootroot00000000000000WITH mycte1 AS ( SELECT foo, bar, baz FROM mytable ) INSERT INTO table2 (column1, column2, column3) ( WITH mycte2 AS ( SELECT foo, bar, baz FROM mycte1 ) SELECT foo, bar, baz FROM mycte2 ); sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_insert_bracketed_with_statement.yml000066400000000000000000000071751451700765000311410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21d731739663b535b09c9ea09b64a89ee8c60cb29a01e8d7ac6221153d2a32ca file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte1 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte2 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_insert_statement_a.sql000066400000000000000000000006501451700765000263670ustar00rootroot00000000000000-- with insert statement with `AS` keyword WITH mycte AS ( SELECT foo, bar, baz FROM mytable1 ) INSERT INTO table2 (column1, column2, column3) VALUES ('value1', 'value2', 'value3'); -- with statement without `AS` keyword WITH mycte ( SELECT foo, bar, baz FROM mytable1 ) INSERT INTO table2 (column1, column2, column3) VALUES ('value1', 'value2', 'value3'); sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_insert_statement_a.yml000066400000000000000000000072441451700765000263770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 591ca08f5767a3cd9ddc497f6242f0e51267cc8863bd688fd4b85e073f0dc35d file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - quoted_literal: "'value1'" - comma: ',' - quoted_literal: "'value2'" - comma: ',' - quoted_literal: "'value3'" - end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - quoted_literal: "'value1'" - comma: ',' - quoted_literal: "'value2'" - comma: ',' - quoted_literal: "'value3'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_insert_statement_b.sql000066400000000000000000000002621451700765000263670ustar00rootroot00000000000000WITH mycte AS ( SELECT foo, bar, baz FROM mytable1 ) INSERT INTO table2 (column1, column2, column3) SELECT foo, bar, baz FROM mycte; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_insert_statement_b.yml000066400000000000000000000046361451700765000264020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 33da5bd44e3638d1708630829459d10c3c04684fb9e15d802c75f8d186da1de6 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_insert_with_statement.sql000066400000000000000000000004101451700765000271140ustar00rootroot00000000000000WITH mycte1 AS ( SELECT foo, bar, baz FROM mytable ) INSERT INTO table2 (column1, column2, column3) WITH mycte2 AS ( SELECT foo, bar, baz FROM mycte1 ) SELECT foo, bar, baz FROM mycte2; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_insert_with_statement.yml000066400000000000000000000067221451700765000271320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5c5586f9471e934aaae4230cd18a11da243eeb04fd581eeced85fe9e7061841e file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte1 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable end_bracket: ) insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte2 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_nested_in_with_statement.sql000066400000000000000000000005031451700765000275630ustar00rootroot00000000000000( WITH mycte2 AS ( WITH mycte1 AS ( SELECT foo, bar, baz FROM mytable ) SELECT foo, bar, baz FROM mycte1 ) SELECT foo, bar, baz FROM mycte2 ); sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_nested_in_with_statement.yml000066400000000000000000000064021451700765000275710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 949eef4c8f91534769e7a2f739a5e760e42f6fb6e22d66f63cc2b57cd75b3d29 file: statement: bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte2 keyword: AS bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte1 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_no_schema_binding.sql000066400000000000000000000001231451700765000261200ustar00rootroot00000000000000create view my_schema.my_view as select * from schema.table with no schema binding;sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_no_schema_binding.yml000066400000000000000000000023321451700765000261260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ea812fc14414f5738321bfb716f97ea4e58cdf87c32d5726d981a0bf1ebcc38 file: statement: create_view_statement: - keyword: create - keyword: view - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_view - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: schema - dot: . - naked_identifier: table - with_no_schema_binding_clause: - keyword: with - keyword: 'no' - keyword: schema - keyword: binding statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_update_statement.sql000066400000000000000000000002141451700765000260410ustar00rootroot00000000000000WITH mycte AS ( SELECT foo, bar FROM mytable1 ) UPDATE sometable SET sometable.baz = mycte.bar FROM mycte; sqlfluff-2.3.5/test/fixtures/dialects/ansi/with_update_statement.yml000066400000000000000000000036261451700765000260550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e7bacd843605728db15379d32320886ef0a608c3fbfcae64d08a14fd592df72e file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mycte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 end_bracket: ) update_statement: keyword: UPDATE table_reference: naked_identifier: sometable set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: sometable - dot: . - naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: mycte - dot: . - naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mycte statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/000077500000000000000000000000001451700765000212305ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/athena/.sqlfluff000066400000000000000000000000341451700765000230500ustar00rootroot00000000000000[sqlfluff] dialect = athena sqlfluff-2.3.5/test/fixtures/dialects/athena/create_array_table.sql000066400000000000000000000001531451700765000255600ustar00rootroot00000000000000CREATE TABLE array_table (c1 array) LOCATION '...'; INSERT INTO array_table values(ARRAY[1,2,3]); sqlfluff-2.3.5/test/fixtures/dialects/athena/create_array_table.yml000066400000000000000000000031611451700765000255640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dcff7cb3422d87dfe7c9fc2259035093dcfb97b78d628f9531e8da8f939377dc file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: array_table - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: array_type: keyword: array array_type_schema: start_angle_bracket: < data_type: primitive_type: keyword: integer end_angle_bracket: '>' end_bracket: ) - keyword: LOCATION - quoted_literal: "'...'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: array_table - values_clause: keyword: values bracketed: start_bracket: ( typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_database.sql000066400000000000000000000000351451700765000250360ustar00rootroot00000000000000create database my_database; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_database.yml000066400000000000000000000010761451700765000250460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 92367f186eab8a420d5599e66571f75d5d5b92592afc9599a887b2be6bd5769a file: statement: create_database_statement: - keyword: create - keyword: database - database_reference: naked_identifier: my_database statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_database_if_not_exists.sql000066400000000000000000000000531451700765000277730ustar00rootroot00000000000000create database if not exists my_database; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_database_if_not_exists.yml000066400000000000000000000011711451700765000277770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48281e65f2141e81f9e67a84ce552e21c5c731039e29cdd35a38d8cf7e17de87 file: statement: create_database_statement: - keyword: create - keyword: database - keyword: if - keyword: not - keyword: exists - database_reference: naked_identifier: my_database statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_external_table.sql000066400000000000000000000023321451700765000262650ustar00rootroot00000000000000create external table my_database.my_table( field_1 string, field_2 int, field_3 float ) PARTITIONED BY (field_partition string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\' LINES TERMINATED BY '\n' LOCATION 's3://athena-examples-myregion/flight/csv/'; CREATE TABLE bucketed_table WITH ( bucketed_by = ARRAY[column_name], bucket_count = 30, format = 'PARQUET', external_location ='s3://DOC-EXAMPLE-BUCKET/tables/parquet_table/' ) AS SELECT * FROM table_name; CREATE EXTERNAL TABLE `tpch100.lineitem_parq_partitioned`( `l_orderkey` int, `l_partkey` int, `l_suppkey` int, `l_linenumber` int, `l_quantity` double, `l_extendedprice` double, `l_discount` double, `l_tax` double, `l_returnflag` string, `l_linestatus` string, `l_commitdate` string, `l_receiptdate` string, `l_shipinstruct` string, `l_comment` string) PARTITIONED BY ( `l_shipdate` string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' LOCATION 's3:///lineitem/' sqlfluff-2.3.5/test/fixtures/dialects/athena/create_external_table.yml000066400000000000000000000152631451700765000262760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 68b5d13306d23d5e9b8c3df0bd4c40672178cb10f21bdb196c90cd0549a27744 file: - statement: create_table_statement: - keyword: create - keyword: external - keyword: table - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: field_1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: field_2 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: field_3 data_type: primitive_type: keyword: float - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: field_partition data_type: primitive_type: keyword: string end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: LOCATION - quoted_literal: "'s3://athena-examples-myregion/flight/csv/'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: bucketed_table - keyword: WITH - bracketed: - start_bracket: ( - keyword: bucketed_by - comparison_operator: raw_comparison_operator: '=' - typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' column_reference: naked_identifier: column_name end_square_bracket: ']' - comma: ',' - keyword: bucket_count - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '30' - comma: ',' - keyword: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - comma: ',' - keyword: external_location - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://DOC-EXAMPLE-BUCKET/tables/parquet_table/'" - end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: quoted_identifier: '`tpch100.lineitem_parq_partitioned`' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`l_orderkey`' data_type: primitive_type: keyword: int - comma: ',' - column_definition: quoted_identifier: '`l_partkey`' data_type: primitive_type: keyword: int - comma: ',' - column_definition: quoted_identifier: '`l_suppkey`' data_type: primitive_type: keyword: int - comma: ',' - column_definition: quoted_identifier: '`l_linenumber`' data_type: primitive_type: keyword: int - comma: ',' - column_definition: quoted_identifier: '`l_quantity`' data_type: primitive_type: keyword: double - comma: ',' - column_definition: quoted_identifier: '`l_extendedprice`' data_type: primitive_type: keyword: double - comma: ',' - column_definition: quoted_identifier: '`l_discount`' data_type: primitive_type: keyword: double - comma: ',' - column_definition: quoted_identifier: '`l_tax`' data_type: primitive_type: keyword: double - comma: ',' - column_definition: quoted_identifier: '`l_returnflag`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_linestatus`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_commitdate`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_receiptdate`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_shipinstruct`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: quoted_identifier: '`l_comment`' data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: quoted_identifier: '`l_shipdate`' data_type: primitive_type: keyword: string end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'" - keyword: OUTPUTFORMAT - quoted_literal: "'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'" - keyword: LOCATION - quoted_literal: "'s3:///lineitem/'" sqlfluff-2.3.5/test/fixtures/dialects/athena/create_external_table_input_format.sql000066400000000000000000000005661451700765000310630ustar00rootroot00000000000000CREATE EXTERNAL TABLE my_table( col_1 string, col_2 boolean, col_3 bigint, col_4 string, col_5 string ) PARTITIONED BY (field_partition string) ROW FORMAT SERDE 'some row format' STORED AS INPUTFORMAT 'some input format' OUTPUTFORMAT 'some output format' LOCATION 's3://athena-examples-myregion/some_data/' TBLPROPERTIES ('has_encrypted_data' = 'true'); sqlfluff-2.3.5/test/fixtures/dialects/athena/create_external_table_input_format.yml000066400000000000000000000044461451700765000310660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d5589c53a4c69fbe84bd27c9e8d251f43a311f7a1a826e9a105ec1e715c27cf5 file: statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: col_1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col_2 data_type: primitive_type: keyword: boolean - comma: ',' - column_definition: naked_identifier: col_3 data_type: primitive_type: keyword: bigint - comma: ',' - column_definition: naked_identifier: col_4 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col_5 data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: field_partition data_type: primitive_type: keyword: string end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'some row format'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'some input format'" - keyword: OUTPUTFORMAT - quoted_literal: "'some output format'" - keyword: LOCATION - quoted_literal: "'s3://athena-examples-myregion/some_data/'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'has_encrypted_data'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'true'" - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_external_table_struct.sql000066400000000000000000000021701451700765000276710ustar00rootroot00000000000000create external table my_database.my_table( `date` string, campaignId string, campaignName string, deleted string, campaignStatus string, app struct, servingStatus string, servingStateReasons string, countriesOrRegions array, modificationTime string, totalBudget struct, dailyBudget struct, displayStatus string, supplySources array, adChannelType string, orgId string, billingEvent string, countryOrRegionServingStateReasons string, other boolean, impressions int, taps int, installs int, newDownloads int, redownloads int, latOnInstalls int, latOffInstalls int, ttr int, avgCPA struct, avgCPT struct, avgCPM struct, localSpend struct, conversionRate int ) PARTITIONED BY (field_partition string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\' LINES TERMINATED BY '\n' LOCATION 's3://athena-examples-myregion/flight/csv/'; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_external_table_struct.yml000066400000000000000000000241421451700765000276760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02a71f810091ee83c7d409ce6521263ea4063d017bee6cccd54bc366e1c6192e file: statement: create_table_statement: - keyword: create - keyword: external - keyword: table - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_table - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`date`' data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: campaignId data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: campaignName data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: deleted data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: campaignStatus data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: app data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: appName - colon: ':' - data_type: primitive_type: keyword: string - comma: ',' - naked_identifier: adamId - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: servingStatus data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: servingStateReasons data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: countriesOrRegions data_type: array_type: keyword: array array_type_schema: start_angle_bracket: < data_type: primitive_type: keyword: string end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: modificationTime data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: totalBudget data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: dailyBudget data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: displayStatus data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: supplySources data_type: array_type: keyword: array array_type_schema: start_angle_bracket: < data_type: primitive_type: keyword: string end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: adChannelType data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: orgId data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: billingEvent data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: countryOrRegionServingStateReasons data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: other data_type: primitive_type: keyword: boolean - comma: ',' - column_definition: naked_identifier: impressions data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: taps data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: installs data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: newDownloads data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: redownloads data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: latOnInstalls data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: latOffInstalls data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: ttr data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: avgCPA data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: avgCPT data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: avgCPM data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: localSpend data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: amount - colon: ':' - data_type: primitive_type: keyword: int - comma: ',' - naked_identifier: currency - colon: ':' - data_type: primitive_type: keyword: string - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: conversionRate data_type: primitive_type: keyword: int - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: field_partition data_type: primitive_type: keyword: string end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: LOCATION - quoted_literal: "'s3://athena-examples-myregion/flight/csv/'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_map_table.sql000066400000000000000000000002051451700765000252150ustar00rootroot00000000000000CREATE TABLE map_table(c1 map) LOCATION '...'; INSERT INTO map_table values(MAP(ARRAY['foo', 'bar'], ARRAY[1, 2])); sqlfluff-2.3.5/test/fixtures/dialects/athena/create_map_table.yml000066400000000000000000000045351451700765000252310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9ce164e521bde2f59e168d96fa1333eab314cb0e034162c363d9e79413c946e file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: map_table - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: map_type: keyword: map map_type_schema: start_angle_bracket: < primitive_type: keyword: string comma: ',' data_type: primitive_type: keyword: integer end_angle_bracket: '>' end_bracket: ) - keyword: LOCATION - quoted_literal: "'...'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: map_table - values_clause: keyword: values bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: MAP bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_partitioned_table.sql000066400000000000000000000015171451700765000267710ustar00rootroot00000000000000CREATE table my_lineitem_parq_partitioned WITH (partitioned_by = ARRAY['l_shipdate']) AS SELECT l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_commitdate, l_receiptdate, l_shipinstruct, l_comment, l_shipdate FROM tpch100.lineitem_parq_partitioned WHERE cast(l_shipdate as timestamp) < DATE('1992-02-01'); CREATE TABLE ctas_iceberg WITH ( table_type = 'ICEBERG', format = 'PARQUET', location = 's3://my_athena_results/ctas_iceberg_parquet/', is_external = false, partitioning = ARRAY['month(dt)'], vacuum_min_snapshots_to_keep = 10, vacuum_max_snapshot_age_ms = 259200 ) AS SELECT key1, name1, 'date' FROM table1; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_partitioned_table.yml000066400000000000000000000141761451700765000270000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c5ce1d7c663f64372f59dfc0f2028db823240dbb4eeb85312cb99f45af4edca5 file: - statement: create_table_statement: - keyword: CREATE - keyword: table - table_reference: naked_identifier: my_lineitem_parq_partitioned - keyword: WITH - bracketed: start_bracket: ( keyword: partitioned_by comparison_operator: raw_comparison_operator: '=' typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' quoted_literal: "'l_shipdate'" end_square_bracket: ']' end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: l_orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: l_partkey - comma: ',' - select_clause_element: column_reference: naked_identifier: l_suppkey - comma: ',' - select_clause_element: column_reference: naked_identifier: l_linenumber - comma: ',' - select_clause_element: column_reference: naked_identifier: l_quantity - comma: ',' - select_clause_element: column_reference: naked_identifier: l_extendedprice - comma: ',' - select_clause_element: column_reference: naked_identifier: l_discount - comma: ',' - select_clause_element: column_reference: naked_identifier: l_tax - comma: ',' - select_clause_element: column_reference: naked_identifier: l_returnflag - comma: ',' - select_clause_element: column_reference: naked_identifier: l_linestatus - comma: ',' - select_clause_element: column_reference: naked_identifier: l_commitdate - comma: ',' - select_clause_element: column_reference: naked_identifier: l_receiptdate - comma: ',' - select_clause_element: column_reference: naked_identifier: l_shipinstruct - comma: ',' - select_clause_element: column_reference: naked_identifier: l_comment - comma: ',' - select_clause_element: column_reference: naked_identifier: l_shipdate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: tpch100 - dot: . - naked_identifier: lineitem_parq_partitioned where_clause: keyword: WHERE expression: - function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: l_shipdate keyword: as data_type: primitive_type: keyword: timestamp end_bracket: ) - comparison_operator: raw_comparison_operator: < - function: function_name: function_name_identifier: DATE bracketed: start_bracket: ( expression: quoted_literal: "'1992-02-01'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: ctas_iceberg - keyword: WITH - bracketed: - start_bracket: ( - keyword: table_type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ICEBERG'" - comma: ',' - keyword: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - comma: ',' - keyword: location - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://my_athena_results/ctas_iceberg_parquet/'" - comma: ',' - keyword: is_external - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - comma: ',' - keyword: partitioning - comparison_operator: raw_comparison_operator: '=' - typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' quoted_literal: "'month(dt)'" end_square_bracket: ']' - comma: ',' - keyword: vacuum_min_snapshots_to_keep - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: vacuum_max_snapshot_age_ms - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '259200' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: key1 - comma: ',' - select_clause_element: column_reference: naked_identifier: name1 - comma: ',' - select_clause_element: quoted_literal: "'date'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_struct_table.sql000066400000000000000000000002611451700765000257660ustar00rootroot00000000000000CREATE TABLE struct_table(c1 struct) LOCATION '...'; INSERT INTO struct_table SELECT CAST(ROW('Bob', 38) AS ROW(name VARCHAR(10), age INTEGER)); sqlfluff-2.3.5/test/fixtures/dialects/athena/create_struct_table.yml000066400000000000000000000060731451700765000257770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 38d98df551428927f3dfbb68de7c816f03dfbd3cc494a3b07241c92db86247fe file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: struct_table - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: name - colon: ':' - data_type: primitive_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - naked_identifier: age - colon: ':' - data_type: primitive_type: keyword: integer - end_angle_bracket: '>' end_bracket: ) - keyword: LOCATION - quoted_literal: "'...'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: struct_table - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: - start_bracket: ( - expression: quoted_literal: "'Bob'" - comma: ',' - expression: numeric_literal: '38' - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: name - data_type: primitive_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - naked_identifier: age - data_type: primitive_type: keyword: INTEGER - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_table_as_select.sql000066400000000000000000000003341451700765000264050ustar00rootroot00000000000000CREATE TABLE my_ctas WITH ( format='Parquet', external_location='s3://my-bucket/my-path-level-1/my-path-level-2', partitioned_by=array['load_date'] ) AS SELECT field_1, field_2, field_3 from my_table; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_table_as_select.yml000066400000000000000000000036621451700765000264160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 910d1634c67b90facb16c20670e54e39bdfd55df92bf42b4c4fcb5940377dd27 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_ctas - keyword: WITH - bracketed: - start_bracket: ( - keyword: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Parquet'" - comma: ',' - keyword: external_location - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3://my-bucket/my-path-level-1/my-path-level-2'" - comma: ',' - keyword: partitioned_by - comparison_operator: raw_comparison_operator: '=' - typed_array_literal: array_type: keyword: array array_literal: start_square_bracket: '[' quoted_literal: "'load_date'" end_square_bracket: ']' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_view.sql000066400000000000000000000002731451700765000242500ustar00rootroot00000000000000CREATE VIEW test AS SELECT orderkey, orderstatus, totalprice / 2 AS half FROM orders; CREATE OR REPLACE VIEW test AS SELECT orderkey, orderstatus, totalprice / 4 AS quarter FROM orders; sqlfluff-2.3.5/test/fixtures/dialects/athena/create_view.yml000066400000000000000000000046521451700765000242570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 145fb6ff6109069e686a48e68e4fc89cca03c4b317daf3fd87cf60e1cb440fd3 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: test - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: orderstatus - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: totalprice binary_operator: / numeric_literal: '2' alias_expression: keyword: AS naked_identifier: half from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: test - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: orderstatus - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: totalprice binary_operator: / numeric_literal: '4' alias_expression: keyword: AS naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/element_at.sql000066400000000000000000000003671451700765000240740ustar00rootroot00000000000000SELECT COALESCE( element_at(rq.hiring_managers, 1), element_at(rq.hiring_managers, 2), rq.creator_id ) AS part1, element_at(pl.hiring_managers, 1).id AS part2, element_at(pl.hiring_managers, 2).id AS part3; sqlfluff-2.3.5/test/fixtures/dialects/athena/element_at.yml000066400000000000000000000067531451700765000241030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5127c65677a4d987ab01ccfd4bfc6a86ce2dd0f7317002ae12e04869c5812f1c file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: element_at bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: rq - dot: . - naked_identifier: hiring_managers - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: element_at bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: rq - dot: . - naked_identifier: hiring_managers - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - expression: column_reference: - naked_identifier: rq - dot: . - naked_identifier: creator_id - end_bracket: ) alias_expression: keyword: AS naked_identifier: part1 - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: element_at bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: pl - dot: . - naked_identifier: hiring_managers - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) dot: . object_reference: naked_identifier: id alias_expression: keyword: AS naked_identifier: part2 - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: element_at bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: pl - dot: . - naked_identifier: hiring_managers - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) dot: . object_reference: naked_identifier: id alias_expression: keyword: AS naked_identifier: part3 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/insert_into.sql000066400000000000000000000002201451700765000243000ustar00rootroot00000000000000INSERT INTO canada_pageviews SELECT * FROM vancouver_pageviews WHERE pageview_date BETWEEN date '2019-07-01' AND date '2019-07-31'; sqlfluff-2.3.5/test/fixtures/dialects/athena/insert_into.yml000066400000000000000000000025061451700765000243130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 438a76e77b3742e90d4210c0c4a3d87fb85e62dbdc161c859542d5e31b93341c file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: canada_pageviews - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: vancouver_pageviews where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pageview_date - keyword: BETWEEN - keyword: date - date_constructor_literal: "'2019-07-01'" - keyword: AND - keyword: date - date_constructor_literal: "'2019-07-31'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/msck_repair_table.sql000066400000000000000000000000341451700765000254140ustar00rootroot00000000000000MSCK REPAIR TABLE my_table; sqlfluff-2.3.5/test/fixtures/dialects/athena/msck_repair_table.yml000066400000000000000000000011131451700765000254150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aadaff42833cff5f19411fa8875d9468514306cdb9448f96c00d03fe7815e21a file: statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/prepared_statements.sql000066400000000000000000000010651451700765000260240ustar00rootroot00000000000000PREPARE my_select1 FROM SELECT * FROM nation; PREPARE my_select2 FROM SELECT * FROM "my_database"."my_table" WHERE year = ?; PREPARE my_select3 FROM SELECT 'order' FROM orders WHERE productid = ? and quantity < ?; PREPARE my_insert FROM INSERT INTO cities_usa (city, state) SELECT city, state FROM cities_world WHERE country = ?; PREPARE my_unload FROM UNLOAD (SELECT * FROM table1 WHERE productid < ?) TO 's3://my_output_bucket/' WITH (format='PARQUET'); EXECUTE statement_name; EXECUTE statement_name USING 'value'; EXECUTE statement_name USING 'value', 10; sqlfluff-2.3.5/test/fixtures/dialects/athena/prepared_statements.yml000066400000000000000000000137401451700765000260310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f85e8d54a28c0e52d2d49d051ebd3f71ef569db9a0d70e041845406903b4235 file: - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_select1 - keyword: FROM - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nation - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_select2 - keyword: FROM - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '"my_database"' - dot: . - quoted_identifier: '"my_table"' where_clause: keyword: WHERE expression: column_reference: naked_identifier: year comparison_operator: raw_comparison_operator: '=' parameter: '?' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_select3 - keyword: FROM - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'order'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: - column_reference: naked_identifier: productid - comparison_operator: raw_comparison_operator: '=' - parameter: '?' - binary_operator: and - column_reference: naked_identifier: quantity - comparison_operator: raw_comparison_operator: < - parameter: '?' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_insert - keyword: FROM - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: cities_usa - bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: state - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cities_world where_clause: keyword: WHERE expression: column_reference: naked_identifier: country comparison_operator: raw_comparison_operator: '=' parameter: '?' - statement_terminator: ; - statement: prepare_statement: - keyword: PREPARE - table_reference: naked_identifier: my_unload - keyword: FROM - unload_statement: - keyword: UNLOAD - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: productid comparison_operator: raw_comparison_operator: < parameter: '?' end_bracket: ) - keyword: TO - quoted_literal: "'s3://my_output_bucket/'" - keyword: WITH - bracketed: start_bracket: ( keyword: format comparison_operator: raw_comparison_operator: '=' quoted_literal: "'PARQUET'" end_bracket: ) - statement_terminator: ; - statement: execute_statement: keyword: EXECUTE table_reference: naked_identifier: statement_name - statement_terminator: ; - statement: execute_statement: - keyword: EXECUTE - table_reference: naked_identifier: statement_name - keyword: USING - quoted_literal: "'value'" - statement_terminator: ; - statement: execute_statement: - keyword: EXECUTE - table_reference: naked_identifier: statement_name - keyword: USING - quoted_literal: "'value'" - comma: ',' - numeric_literal: '10' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_a.sql000066400000000000000000000001301451700765000235220ustar00rootroot00000000000000SELECT field_1 , field_2 , field_3 , time , date , timestamp FROM my_table; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_a.yml000066400000000000000000000025421451700765000235350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 287e4f998b4695a1d733c87a6a1ecad0735c474c0d5ed78422173e0d823736c5 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_3 - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: date - comma: ',' - select_clause_element: column_reference: naked_identifier: timestamp from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_array_of_rows.sql000066400000000000000000000000611451700765000261610ustar00rootroot00000000000000SELECT ARRAY[CAST(ROW(1) AS ROW(x INT))][1].x.y; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_array_of_rows.yml000066400000000000000000000036761451700765000262020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 246b131824ddd434f87567ae1f0cf10a63055d0c33282c264e12c5f22081cd9c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: x data_type: primitive_type: keyword: INT end_bracket: ) end_bracket: ) end_square_bracket: ']' array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' dot: . object_reference: - naked_identifier: x - dot: . - naked_identifier: y statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_b.sql000066400000000000000000000003231451700765000235270ustar00rootroot00000000000000WITH with_query as ( select field_1, field_2 from table_1) select field_1, field_2, count(1) from with_query where field_1 = 'value' group by 1, 2 having count(1) > 10 order by 1 DESC limit 10; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_b.yml000066400000000000000000000060411451700765000235340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b43b9ac93be471486d66e0621012bb8ac4304318743e2ab360bb4f1d7b868878 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: with_query keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: with_query where_clause: keyword: where expression: column_reference: naked_identifier: field_1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" groupby_clause: - keyword: group - keyword: by - numeric_literal: '1' - comma: ',' - numeric_literal: '2' having_clause: keyword: having expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - keyword: DESC limit_clause: keyword: limit numeric_literal: '10' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_base_operators.sql000066400000000000000000000003211451700765000263140ustar00rootroot00000000000000SELECT CAST(null AS boolean) OR CAST(null AS boolean); -- null SELECT CAST(null AS boolean) OR false; -- null SELECT CAST(null AS boolean) OR true; -- true SELECT least(1, 2, 3); SELECT greatest(1, 2, 3); sqlfluff-2.3.5/test/fixtures/dialects/athena/select_base_operators.yml000066400000000000000000000071231451700765000263250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2fd22465fd9f2536786679db1dd1683e0ed247a44f20c469d12c8975d08fd304 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: null_literal: 'null' keyword: AS data_type: primitive_type: keyword: boolean end_bracket: ) - binary_operator: OR - function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: null_literal: 'null' keyword: AS data_type: primitive_type: keyword: boolean end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: null_literal: 'null' keyword: AS data_type: primitive_type: keyword: boolean end_bracket: ) binary_operator: OR boolean_literal: 'false' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: null_literal: 'null' keyword: AS data_type: primitive_type: keyword: boolean end_bracket: ) binary_operator: OR boolean_literal: 'true' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: least bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: greatest bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_cast_withtimezone.sql000066400000000000000000000001501451700765000270440ustar00rootroot00000000000000SELECT cast(field_1 as time with time zone), cast(field_2 as timestamp with time zone) FROM my_table; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_cast_withtimezone.yml000066400000000000000000000032331451700765000270530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 335468b5c467677aabed89e846c2fc395ae5ecce235e2c57ea8630f3817ff611 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: field_1 keyword: as data_type: - keyword: time - keyword: with - keyword: time - keyword: zone end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: field_2 keyword: as data_type: - keyword: timestamp - keyword: with - keyword: time - keyword: zone end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_datetime_functions.sql000066400000000000000000000021071451700765000271740ustar00rootroot00000000000000-- https://prestodb.io/docs/0.217/functions/datetime.html select date '2012-08-08' + interval '2' day; select time '01:00' + interval '3' hour; select timestamp '2012-08-08 01:00' + interval '29' hour; select timestamp '2012-10-31 01:00' + interval '1' month; select interval '2' day + interval '3' hour; select interval '3' year + interval '5' month; select date '2012-08-08' - interval '2' day; select time '01:00' - interval '3' hour; select timestamp '2012-08-08 01:00' - interval '29' hour; select timestamp '2012-10-31 01:00' - interval '1' month; select interval '2' day - interval '3' hour; select interval '3' year - interval '5' month; select current_time; select current_date; select current_timestamp; select current_timezone(); select date('1970-01-01'); select cast('1970-01-01' as date); select from_iso8601_timestamp('2019-09-07T-15:50+00'); select from_iso8601_date('2019-09-07T-15:50+00'); select from_unixtime(1556285138); select localtime; select localtimestamp; select now(); select to_iso8601('1970-01-01'); select to_unixtime(current_timestamp); sqlfluff-2.3.5/test/fixtures/dialects/athena/select_datetime_functions.yml000066400000000000000000000232721451700765000272040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00bf746e10cd81ce9aef26bca92f1cc25f8ad66393903ca01b08e0b8076b72e0 file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: date date_constructor_literal: "'2012-08-08'" binary_operator: + interval_expression: keyword: interval quoted_literal: "'2'" date_part: day - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: time date_constructor_literal: "'01:00'" binary_operator: + interval_expression: keyword: interval quoted_literal: "'3'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: timestamp date_constructor_literal: "'2012-08-08 01:00'" binary_operator: + interval_expression: keyword: interval quoted_literal: "'29'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: timestamp date_constructor_literal: "'2012-10-31 01:00'" binary_operator: + interval_expression: keyword: interval quoted_literal: "'1'" date_part: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - interval_expression: keyword: interval quoted_literal: "'2'" date_part: day - binary_operator: + - interval_expression: keyword: interval quoted_literal: "'3'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - interval_expression: keyword: interval quoted_literal: "'3'" date_part: year - binary_operator: + - interval_expression: keyword: interval quoted_literal: "'5'" date_part: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: date date_constructor_literal: "'2012-08-08'" binary_operator: '-' interval_expression: keyword: interval quoted_literal: "'2'" date_part: day - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: time date_constructor_literal: "'01:00'" binary_operator: '-' interval_expression: keyword: interval quoted_literal: "'3'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: timestamp date_constructor_literal: "'2012-08-08 01:00'" binary_operator: '-' interval_expression: keyword: interval quoted_literal: "'29'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: keyword: timestamp date_constructor_literal: "'2012-10-31 01:00'" binary_operator: '-' interval_expression: keyword: interval quoted_literal: "'1'" date_part: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - interval_expression: keyword: interval quoted_literal: "'2'" date_part: day - binary_operator: '-' - interval_expression: keyword: interval quoted_literal: "'3'" date_part: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - interval_expression: keyword: interval quoted_literal: "'3'" date_part: year - binary_operator: '-' - interval_expression: keyword: interval quoted_literal: "'5'" date_part: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: bare_function: current_time - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: bare_function: current_date - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: bare_function: current_timestamp - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: current_timezone bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date bracketed: start_bracket: ( expression: quoted_literal: "'1970-01-01'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: quoted_literal: "'1970-01-01'" keyword: as data_type: primitive_type: keyword: date end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: from_iso8601_timestamp bracketed: start_bracket: ( expression: quoted_literal: "'2019-09-07T-15:50+00'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: from_iso8601_date bracketed: start_bracket: ( expression: quoted_literal: "'2019-09-07T-15:50+00'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: from_unixtime bracketed: start_bracket: ( expression: numeric_literal: '1556285138' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: localtime - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: localtimestamp - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: now bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: to_iso8601 bracketed: start_bracket: ( expression: quoted_literal: "'1970-01-01'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: to_unixtime bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_filter.sql000066400000000000000000000003201451700765000245700ustar00rootroot00000000000000SELECT ARRAY [5, NULL, 7, NULL]; SELECT filter(ARRAY [], x -> true); -- [] SELECT filter(ARRAY [5, -6, NULL, 7], x -> x > 0); -- [5, 7] SELECT filter(ARRAY [5, NULL, 7, NULL], x -> x IS NOT NULL); -- [5, 7] sqlfluff-2.3.5/test/fixtures/dialects/athena/select_filter.yml000066400000000000000000000100051451700765000245730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b5c07fcfa9add39719d29df5f870aa2e6441447fd3ea11fa25d5dd77eac0ca9e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '7' - comma: ',' - null_literal: 'NULL' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: filter bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - expression: column_reference: naked_identifier: x binary_operator: -> boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: filter bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '6' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '7' - end_square_bracket: ']' - comma: ',' - expression: - column_reference: naked_identifier: x - binary_operator: -> - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: filter bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '7' - comma: ',' - null_literal: 'NULL' - end_square_bracket: ']' - comma: ',' - expression: - column_reference: naked_identifier: x - binary_operator: -> - column_reference: naked_identifier: x - keyword: IS - keyword: NOT - null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_group_by.sql000066400000000000000000000013631451700765000251410ustar00rootroot00000000000000select as_of_date, channel, sum(total_count) as cnt from agg.aggregate_total group by cube (as_of_date, channel); select as_of_date, channel, sum(total_count) as cnt from agg.aggregate_total group by rollup (as_of_date, channel); select as_of_date, channel, sum(total_count) as cnt from agg.aggregate_total group by grouping sets (as_of_date, channel); -- complex sets select as_of_date, channel, sum(total_count) as cnt from agg.aggregate_total group by grouping sets ((as_of_date, channel), (as_of_date), ()); -- "weird" cases select as_of_date, channel, platform, sum(total_count) as cnt from agg.aggregate_total group by as_of_date, grouping sets ((platform, channel), channel, ()); sqlfluff-2.3.5/test/fixtures/dialects/athena/select_group_by.yml000066400000000000000000000211601451700765000251400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9c009740fd13350526c115479fe223a5375d63df546038453248221abcf1ba9 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: as_of_date - comma: ',' - column_reference: naked_identifier: channel end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - cube_rollup_clause: function_name: function_name_identifier: rollup bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: as_of_date - comma: ',' - column_reference: naked_identifier: channel end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: as_of_date - comma: ',' - column_reference: naked_identifier: channel end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: as_of_date - comma: ',' - column_reference: naked_identifier: channel - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: as_of_date end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: as_of_date - comma: ',' - select_clause_element: column_reference: naked_identifier: channel - comma: ',' - select_clause_element: column_reference: naked_identifier: platform - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_count end_bracket: ) alias_expression: keyword: as naked_identifier: cnt from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: agg - dot: . - naked_identifier: aggregate_total groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: as_of_date - comma: ',' - grouping_sets_clause: - keyword: grouping - keyword: sets - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: platform - comma: ',' - column_reference: naked_identifier: channel - end_bracket: ) - comma: ',' - column_reference: naked_identifier: channel - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_map_function.sql000066400000000000000000000007701451700765000257760ustar00rootroot00000000000000SELECT map(); WITH dataset AS ( SELECT map( ARRAY['first', 'last', 'age'], ARRAY['Bob', 'Smith', '35'] ) AS a_map ) SELECT a_map FROM dataset; SELECT map_filter(map(ARRAY[], ARRAY[]), (k, v) -> true); -- -- {} SELECT map_filter( map( ARRAY[10, 20, 30], ARRAY['a', null, 'c'] ), (k, v) -> v IS NOT NULL ); -- -- {10 -> a, 30 -> c} SELECT map_filter( map( ARRAY['k1', 'k2', 'k3'], ARRAY[20, 3, 15] ), (k, v) -> v > 10 ); sqlfluff-2.3.5/test/fixtures/dialects/athena/select_map_function.yml000066400000000000000000000210231451700765000257720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 99a237d11640a33fbd36edc57ed22013a69bc9c5c6a030f1b033c6a739c55df0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: dataset keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'first'" - comma: ',' - quoted_literal: "'last'" - comma: ',' - quoted_literal: "'age'" - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'Bob'" - comma: ',' - quoted_literal: "'Smith'" - comma: ',' - quoted_literal: "'35'" - end_square_bracket: ']' - end_bracket: ) alias_expression: keyword: AS naked_identifier: a_map end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a_map from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dataset - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map_filter bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: map bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: k - comma: ',' - column_reference: naked_identifier: v - end_bracket: ) binary_operator: -> boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map_filter bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: map bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '10' - comma: ',' - numeric_literal: '20' - comma: ',' - numeric_literal: '30' - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - null_literal: 'null' - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' - end_bracket: ) - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: k - comma: ',' - column_reference: naked_identifier: v - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: v - keyword: IS - keyword: NOT - null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: map_filter bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: map bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'k1'" - comma: ',' - quoted_literal: "'k2'" - comma: ',' - quoted_literal: "'k3'" - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '20' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '15' - end_square_bracket: ']' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: k - comma: ',' - column_reference: naked_identifier: v - end_bracket: ) binary_operator: -> column_reference: naked_identifier: v comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_map_type.sql000066400000000000000000000003761451700765000251340ustar00rootroot00000000000000SELECT CAST( JSON_PARSE(table_name.column_name) AS MAP ) AS json_map FROM table_name; CREATE TABLE map_table(c1 map) LOCATION '...'; INSERT INTO map_table values(MAP(ARRAY['foo', 'bar'], ARRAY[1, 2])); sqlfluff-2.3.5/test/fixtures/dialects/athena/select_map_type.yml000066400000000000000000000075011451700765000251330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ccb2a26dc4a874a4a86947e81c2e5548f2ec40c503dce5a045498898229c542a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: JSON_PARSE bracketed: start_bracket: ( expression: column_reference: - naked_identifier: table_name - dot: . - naked_identifier: column_name end_bracket: ) keyword: AS data_type: map_type: keyword: MAP map_type_schema: start_angle_bracket: < primitive_type: keyword: VARCHAR comma: ',' data_type: primitive_type: keyword: VARCHAR end_angle_bracket: '>' end_bracket: ) alias_expression: keyword: AS naked_identifier: json_map from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: map_table - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: map_type: keyword: map map_type_schema: start_angle_bracket: < primitive_type: keyword: string comma: ',' data_type: primitive_type: keyword: integer end_angle_bracket: '>' end_bracket: ) - keyword: LOCATION - quoted_literal: "'...'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: map_table - values_clause: keyword: values bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: MAP bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_reduce.sql000066400000000000000000000014061451700765000245600ustar00rootroot00000000000000SELECT reduce(ARRAY [], 0, (s, x) -> s + x, s -> s); -- 0 SELECT reduce(ARRAY [5, 20, 50], 0, (s, x) -> s + x, s -> s); -- 75 SELECT reduce(ARRAY [5, 20, NULL, 50], 0, (s, x) -> s + x, s -> s); -- NULL SELECT reduce(ARRAY [5, 20, NULL, 50], 0, (s, x) -> s + COALESCE(x, 0), s -> s); -- 75 SELECT reduce(ARRAY [5, 20, NULL, 50], 0, (s, x) -> IF(x IS NULL, s, s + x), s -> s); -- 75 SELECT reduce(ARRAY [2147483647, 1], CAST (0 AS BIGINT), (s, x) -> s + x, s -> s); -- 2147483648 SELECT reduce(ARRAY [5, 6, 10, 20], -- calculates arithmetic average: 10.25 CAST(ROW(0.0, 0) AS ROW(sum DOUBLE, count INTEGER)), (s, x) -> CAST(ROW(x + s.sum, s.count + 1) AS ROW(sum DOUBLE, count INTEGER)), s -> IF(s.count = 0, NULL, s.sum / s.count)); sqlfluff-2.3.5/test/fixtures/dialects/athena/select_reduce.yml000066400000000000000000000411341451700765000245640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1df9cb1b41eb49646c10bbaa02cc3ead9ec6607066d1234d89d89f0eea4774f5 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '20' - comma: ',' - numeric_literal: '50' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '20' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '50' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '20' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '50' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '20' - comma: ',' - null_literal: 'NULL' - comma: ',' - numeric_literal: '50' - end_square_bracket: ']' - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) binary_operator: -> function: function_name: function_name_identifier: IF bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x keyword: IS null_literal: 'NULL' - comma: ',' - expression: column_reference: naked_identifier: s - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - end_bracket: ) - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '2147483647' - comma: ',' - numeric_literal: '1' - end_square_bracket: ']' - comma: ',' - expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: numeric_literal: '0' keyword: AS data_type: primitive_type: keyword: BIGINT end_bracket: ) - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: s - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: s - binary_operator: -> - column_reference: naked_identifier: s - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - comma: ',' - numeric_literal: '10' - comma: ',' - numeric_literal: '20' - end_square_bracket: ']' - comma: ',' - expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: - start_bracket: ( - expression: numeric_literal: '0.0' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: sum - data_type: primitive_type: keyword: DOUBLE - comma: ',' - naked_identifier: count - data_type: primitive_type: keyword: INTEGER - end_bracket: ) end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: s - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) binary_operator: -> function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: x - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: sum - comma: ',' - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: count binary_operator: + numeric_literal: '1' - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: sum - data_type: primitive_type: keyword: DOUBLE - comma: ',' - naked_identifier: count - data_type: primitive_type: keyword: INTEGER - end_bracket: ) end_bracket: ) - comma: ',' - expression: column_reference: naked_identifier: s binary_operator: -> function: function_name: function_name_identifier: IF bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: count comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - comma: ',' - expression: null_literal: 'NULL' - comma: ',' - expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: sum - binary_operator: / - column_reference: - naked_identifier: s - dot: . - naked_identifier: count - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_row.sql000066400000000000000000000007251451700765000241230ustar00rootroot00000000000000SELECT ROW(1, 2.0); SELECT CAST(ROW(1, 2.0) AS ROW(x BIGINT, y DOUBLE)); SELECT ARRAY[CAST(ROW(1) AS ROW(x INT))][1].x; SELECT CAST( ROW( ARRAY[ CAST(ROW('') AS ROW(id varchar)) ], CAST(ROW('') AS ROW(id varchar)), 'Approved' ) AS ROW( approvers ARRAY, performer ROW(id varchar), approvalStatus varchar ) ) as test; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_row.yml000066400000000000000000000203741451700765000241270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f7fa9aefe356b727b9a732c5acbae4cb77decee6ec6a4925dfc537c64194b225 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ROW bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2.0' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2.0' - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: x - data_type: primitive_type: keyword: BIGINT - comma: ',' - naked_identifier: y - data_type: primitive_type: keyword: DOUBLE - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: x data_type: primitive_type: keyword: INT end_bracket: ) end_bracket: ) end_square_bracket: ']' array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' dot: . object_reference: naked_identifier: x - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: start_bracket: ( expression: quoted_literal: "''" end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: id data_type: primitive_type: keyword: varchar end_bracket: ) end_bracket: ) end_square_bracket: ']' - comma: ',' - expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROW bracketed: start_bracket: ( expression: quoted_literal: "''" end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: id data_type: primitive_type: keyword: varchar end_bracket: ) end_bracket: ) - comma: ',' - expression: quoted_literal: "'Approved'" - end_bracket: ) keyword: AS data_type: keyword: ROW bracketed: - start_bracket: ( - naked_identifier: approvers - data_type: array_type: keyword: ARRAY array_type_schema: start_angle_bracket: < data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: id data_type: primitive_type: keyword: varchar end_bracket: ) end_angle_bracket: '>' - comma: ',' - naked_identifier: performer - data_type: keyword: ROW bracketed: start_bracket: ( naked_identifier: id data_type: primitive_type: keyword: varchar end_bracket: ) - comma: ',' - naked_identifier: approvalStatus - data_type: primitive_type: keyword: varchar - end_bracket: ) end_bracket: ) alias_expression: keyword: as naked_identifier: test - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_underscore.sql000066400000000000000000000002261451700765000254610ustar00rootroot00000000000000SELECT 1 AS _; SELECT 1 AS __; SELECT 1 AS __TEST; SELECT a FROM ( VALUES ('a'), ('b') ) AS _(a); SELECT a FROM ( VALUES ('a'), ('b') ) AS __(a); sqlfluff-2.3.5/test/fixtures/dialects/athena/select_underscore.yml000066400000000000000000000062531451700765000254710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 333615bfdb031f9d78f403d6a2e6afa402ea41e9e1fa18ce2384dc1b013b7ee9 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: _ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: __ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: __TEST - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: start_bracket: ( quoted_literal: "'a'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'b'" end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: _ bracketed: start_bracket: ( identifier_list: naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: start_bracket: ( quoted_literal: "'a'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'b'" end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: __ bracketed: start_bracket: ( identifier_list: naked_identifier: a end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_unnest.sql000066400000000000000000000004211451700765000246210ustar00rootroot00000000000000SELECT field_1, field_2, column_value FROM my_table CROSS JOIN UNNEST(array_field) AS my_unnested_table(column_value); SELECT numbers, n, a FROM ( VALUES (ARRAY[2, 5]), (ARRAY[7, 8, 9]) ) AS x (numbers) CROSS JOIN UNNEST(numbers) WITH ORDINALITY AS t (n, a); sqlfluff-2.3.5/test/fixtures/dialects/athena/select_unnest.yml000066400000000000000000000111621451700765000246270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 270021a08e9c72974e0a292bb178e35b9f32af7a316cb4dcc11c4600a499710f file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_field end_bracket: ) alias_expression: keyword: AS naked_identifier: my_unnested_table bracketed: start_bracket: ( identifier_list: naked_identifier: column_value end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: numbers - comma: ',' - select_clause_element: column_reference: naked_identifier: n - comma: ',' - select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: start_bracket: ( typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '2' - comma: ',' - numeric_literal: '5' - end_square_bracket: ']' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '7' - comma: ',' - numeric_literal: '8' - comma: ',' - numeric_literal: '9' - end_square_bracket: ']' end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: x bracketed: start_bracket: ( identifier_list: naked_identifier: numbers end_bracket: ) join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: function: - function_name: function_name_identifier: UNNEST - bracketed: start_bracket: ( expression: column_reference: naked_identifier: numbers end_bracket: ) - keyword: WITH - keyword: ORDINALITY alias_expression: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: n - comma: ',' - naked_identifier: a end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_widow_functions.sql000066400000000000000000000005631451700765000265350ustar00rootroot00000000000000SELECT orderkey, clerk, totalprice, rank() OVER (PARTITION BY clerk ORDER BY totalprice DESC) AS rnk FROM orders ORDER BY clerk, rnk; SELECT clerk, orderdate, orderkey, totalprice, sum(totalprice) OVER (PARTITION BY clerk ORDER BY orderdate) AS rolling_sum FROM orders ORDER BY clerk, orderdate, orderkey; sqlfluff-2.3.5/test/fixtures/dialects/athena/select_widow_functions.yml000066400000000000000000000102331451700765000265320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3232d0d90b81fd0dbb44d151d98315d247e8e671cd6d3f281dce17753c9cb9b8 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: clerk - comma: ',' - select_clause_element: column_reference: naked_identifier: totalprice - comma: ',' - select_clause_element: function: function_name: function_name_identifier: rank bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: clerk orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: totalprice - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rnk from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: clerk - comma: ',' - column_reference: naked_identifier: rnk - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: clerk - comma: ',' - select_clause_element: column_reference: naked_identifier: orderdate - comma: ',' - select_clause_element: column_reference: naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: totalprice - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: clerk orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: orderdate end_bracket: ) alias_expression: keyword: AS naked_identifier: rolling_sum from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: clerk - comma: ',' - column_reference: naked_identifier: orderdate - comma: ',' - column_reference: naked_identifier: orderkey - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_columns.sql000066400000000000000000000002271451700765000244720ustar00rootroot00000000000000SHOW COLUMNS FROM dbname.tablename; SHOW COLUMNS IN dbname.tablename; SHOW COLUMNS FROM tablename FROM dbname; SHOW COLUMNS IN tablename IN dbname; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_columns.yml000066400000000000000000000025261451700765000245000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4cf7849864cf63b656aaf25f20f39df68107810001b8716f702864a4d105f9b6 file: - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: FROM - table_reference: - naked_identifier: dbname - dot: . - naked_identifier: tablename - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_reference: - naked_identifier: dbname - dot: . - naked_identifier: tablename - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: FROM - table_reference: naked_identifier: tablename - keyword: FROM - database_reference: naked_identifier: dbname - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_reference: naked_identifier: tablename - keyword: IN - database_reference: naked_identifier: dbname - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_create_table.sql000066400000000000000000000001021451700765000254140ustar00rootroot00000000000000SHOW CREATE TABLE tablename; SHOW CREATE TABLE dbname.tablename; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_create_table.yml000066400000000000000000000014441451700765000254300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 93acdb802aed4a1977116a63a9695421637970b639b23d173e06f365d2419409 file: - statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tablename - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbname - dot: . - naked_identifier: tablename - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_create_view.sql000066400000000000000000000000331451700765000253020ustar00rootroot00000000000000SHOW CREATE VIEW viewname; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_create_view.yml000066400000000000000000000010751451700765000253130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca2d569ef7d4217449e981c819684f9eb31297df4430c689d6bc85b66276247b file: statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: viewname statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_databases.sql000066400000000000000000000000541451700765000247370ustar00rootroot00000000000000SHOW DATABASES; SHOW SCHEMAS LIKE 'regex'; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_databases.yml000066400000000000000000000012141451700765000247400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f5094dcacff4eeabd192d17a6219b268ef5814bd4df9d97cab0e0122310abae file: - statement: show_statement: - keyword: SHOW - keyword: DATABASES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - keyword: LIKE - quoted_literal: "'regex'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_partitions.sql000066400000000000000000000000331451700765000252010ustar00rootroot00000000000000SHOW PARTITIONS tablename; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_partitions.yml000066400000000000000000000010561451700765000252110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 724b6a64dbad1cd0cdb0dbdfcf2668db8b880d6da57a2e8723b9d4f8372d2ed6 file: statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: tablename statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_tables.sql000066400000000000000000000001471451700765000242650ustar00rootroot00000000000000SHOW TABLES; SHOW TABLES IN sampledb; SHOW TABLES '*myregex*'; SHOW TABLES IN sampledb '*myregex*'; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_tables.yml000066400000000000000000000020041451700765000242610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b65a73fc309b26adc5da6ca959eba1f706afcd4230bfca35c5979cb21e34d40b file: - statement: show_statement: - keyword: SHOW - keyword: TABLES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: IN - database_reference: naked_identifier: sampledb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - quoted_literal: "'*myregex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: IN - database_reference: naked_identifier: sampledb - quoted_literal: "'*myregex*'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_tblproperties.sql000066400000000000000000000001101451700765000256770ustar00rootroot00000000000000SHOW TBLPROPERTIES tablename; SHOW TBLPROPERTIES tablename('tblname'); sqlfluff-2.3.5/test/fixtures/dialects/athena/show_tblproperties.yml000066400000000000000000000014751451700765000257200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da66430c35ef016cf8c09d5e7f936557f01055fc5c5f0c00b05a85af7be61feb file: - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: tablename - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: tablename - bracketed: start_bracket: ( quoted_literal: "'tblname'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_views.sql000066400000000000000000000001471451700765000241500ustar00rootroot00000000000000SHOW VIEWS; SHOW VIEWS IN sampledb; SHOW VIEWS LIKE 'regex*'; SHOW VIEWS IN sampledb LIKE 'regex*'; sqlfluff-2.3.5/test/fixtures/dialects/athena/show_views.yml000066400000000000000000000020421451700765000241460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4713819d65a5ffe038d5eb39a83d2efd8419ccb3651897fb07d3278595833f26 file: - statement: show_statement: - keyword: SHOW - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: IN - database_reference: naked_identifier: sampledb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: LIKE - quoted_literal: "'regex*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: IN - database_reference: naked_identifier: sampledb - keyword: LIKE - quoted_literal: "'regex*'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/athena/unload_select.sql000066400000000000000000000002711451700765000245720ustar00rootroot00000000000000UNLOAD (SELECT field_1, field_2 FROM my_table) TO 's3://my_athena_data_location/my_folder/' WITH (format='CSV', compression='gzip', field_delimiter=',', partitioned_by=ARRAY[field_2]); sqlfluff-2.3.5/test/fixtures/dialects/athena/unload_select.yml000066400000000000000000000040051451700765000245730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 97416e6461f0ca98486c4cddc02056d41faa0e64cfda38300f45495e37e58571 file: statement: unload_statement: - keyword: UNLOAD - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field_2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'s3://my_athena_data_location/my_folder/'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - keyword: compression - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gzip'" - comma: ',' - keyword: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "','" - comma: ',' - keyword: partitioned_by - comparison_operator: raw_comparison_operator: '=' - typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' column_reference: naked_identifier: field_2 end_square_bracket: ']' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/000077500000000000000000000000001451700765000216175ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/bigquery/.sqlfluff000066400000000000000000000000361451700765000234410ustar00rootroot00000000000000[sqlfluff] dialect = bigquery sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_materialized_view_set_options.sql000066400000000000000000000003201451700765000316540ustar00rootroot00000000000000ALTER MATERIALIZED VIEW mydataset.my_mv SET OPTIONS ( enable_refresh=false ); ALTER MATERIALIZED VIEW mydataset.my_mv SET OPTIONS ( friendly_name="my_mv", labels=[("org_unit", "development")] ); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_materialized_view_set_options.yml000066400000000000000000000036231451700765000316670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f920358a40ae793dac230cb79235d4b77daac481e600270d82c487e513493ed0 file: - statement: alter_materialized_view_set_options_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: enable_refresh comparison_operator: raw_comparison_operator: '=' boolean_literal: 'false' end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_set_options_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: SET - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: friendly_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"my_mv"' - comma: ',' - parameter: labels - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' expression: bracketed: - start_bracket: ( - quoted_literal: '"org_unit"' - comma: ',' - quoted_literal: '"development"' - end_bracket: ) end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_add_column.sql000066400000000000000000000005641451700765000270100ustar00rootroot00000000000000ALTER TABLE mydataset.mytable ADD COLUMN A STRING, ADD COLUMN IF NOT EXISTS B GEOGRAPHY, ADD COLUMN C ARRAY, ADD COLUMN D DATE OPTIONS(description="my description"); ALTER TABLE mydataset.mytable ADD COLUMN A STRUCT< B GEOGRAPHY, C ARRAY, D INT64 NOT NULL, E TIMESTAMP OPTIONS(description="creation time") >; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_add_column.yml000066400000000000000000000064061451700765000270130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11ca5f74cdd2c59f465e6e8a9e7a57d7717e10129695dcec229045391f266f4a file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: A data_type: data_type_identifier: STRING - comma: ',' - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_definition: naked_identifier: B data_type: data_type_identifier: GEOGRAPHY - comma: ',' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: C data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: NUMERIC end_angle_bracket: '>' - comma: ',' - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: D data_type: data_type_identifier: DATE options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"my description"' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: A data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: B - data_type: data_type_identifier: GEOGRAPHY - comma: ',' - parameter: C - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' - comma: ',' - parameter: D - data_type: data_type_identifier: INT64 - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - parameter: E - data_type: data_type_identifier: TIMESTAMP - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"creation time"' end_bracket: ) - end_angle_bracket: '>' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_alter_column.sql000066400000000000000000000005261451700765000273650ustar00rootroot00000000000000ALTER TABLE mydataset.mytable ALTER COLUMN IF EXISTS A SET OPTIONS ( description='some description here' ), ALTER COLUMN IF EXISTS B DROP NOT NULL, ALTER COLUMN IF EXISTS C DROP DEFAULT, ALTER COLUMN IF EXISTS D SET DATA TYPE FLOAT64, ALTER COLUMN IF EXISTS E SET DEFAULT 0, ALTER COLUMN IF EXISTS F SET DEFAULT CURRENT_TIMESTAMP() ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_alter_column.yml000066400000000000000000000041311451700765000273630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db72467b0a47c3b7dc7b7528b5298b87bf78740e19446fed70a2fec6f3862b63 file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: A - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'some description here'" end_bracket: ) - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: B - keyword: DROP - keyword: NOT - keyword: 'NULL' - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: C - keyword: DROP - keyword: DEFAULT - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: D - keyword: SET - keyword: DATA - keyword: TYPE - data_type: data_type_identifier: FLOAT64 - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: E - keyword: SET - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - keyword: ALTER - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: F - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: CURRENT_TIMESTAMP bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_drop_column.sql000066400000000000000000000001121451700765000272110ustar00rootroot00000000000000ALTER TABLE mydataset.mytable DROP COLUMN A, DROP COLUMN IF EXISTS B; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_drop_column.yml000066400000000000000000000014431451700765000272230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aeacd5c9b123877e49a7d44a88ed9f7b000f317f00b87f018609c220839906ab file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: DROP - keyword: COLUMN - naked_identifier: A - comma: ',' - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: B statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_rename_column.sql000066400000000000000000000003511451700765000275210ustar00rootroot00000000000000ALTER TABLE mydataset.mytable RENAME COLUMN A TO columnA, RENAME COLUMN IF EXISTS B TO columnB; ALTER TABLE mydataset.mytable RENAME COLUMN columnA TO temp, RENAME COLUMN columnB TO columnA, RENAME COLUMN temp TO columnB; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_rename_column.yml000066400000000000000000000027711451700765000275330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4aa26e86554f6b3078f8555a0121be2fd24f333e52d54d9f7ef94346ca7d0e1e file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: RENAME - keyword: COLUMN - naked_identifier: A - keyword: TO - naked_identifier: columnA - comma: ',' - keyword: RENAME - keyword: COLUMN - keyword: IF - keyword: EXISTS - naked_identifier: B - keyword: TO - naked_identifier: columnB - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: RENAME - keyword: COLUMN - naked_identifier: columnA - keyword: TO - naked_identifier: temp - comma: ',' - keyword: RENAME - keyword: COLUMN - naked_identifier: columnB - keyword: TO - naked_identifier: columnA - comma: ',' - keyword: RENAME - keyword: COLUMN - naked_identifier: temp - keyword: TO - naked_identifier: columnB - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_rename_to.sql000066400000000000000000000000641451700765000266470ustar00rootroot00000000000000ALTER TABLE mydataset.mytable RENAME TO mynewtable; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_rename_to.yml000066400000000000000000000013061451700765000266510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ead70684bf792fdc289833285129002f41a92bb5f4c930fb4279bb04aed655c file: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: RENAME - keyword: TO - table_reference: naked_identifier: mynewtable statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_set_options.sql000066400000000000000000000003601451700765000272430ustar00rootroot00000000000000ALTER TABLE mydataset.mytable SET OPTIONS ( expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 7 DAY), description="Table that expires seven days from now" ); ALTER TABLE table SET OPTIONS (expiration_timestamp = NULL) ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_table_set_options.yml000066400000000000000000000041651451700765000272540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 337e774718873b99a59ccc416f8dfd887b30f51835c7bc1af3b10649192a557d file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: mytable - keyword: SET - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: expiration_timestamp - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: TIMESTAMP_ADD bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '7' date_part: DAY - end_bracket: ) - comma: ',' - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"Table that expires seven days from now"' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table - keyword: SET - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: expiration_timestamp comparison_operator: raw_comparison_operator: '=' null_literal: 'NULL' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_view_set_options.sql000066400000000000000000000002561451700765000271320ustar00rootroot00000000000000ALTER VIEW mydataset.myview SET OPTIONS ( expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 7 DAY), description="View that expires seven days from now" ); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/alter_view_set_options.yml000066400000000000000000000032631451700765000271350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f71083b1799f2398c52344de1bf9e0d4fe5bd38e8de20622f9d591948264bde file: statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: myview - keyword: SET - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: expiration_timestamp - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: TIMESTAMP_ADD bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '7' date_part: DAY - end_bracket: ) - comma: ',' - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"View that expires seven days from now"' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/any_value_having.sql000066400000000000000000000002041451700765000256530ustar00rootroot00000000000000SELECT ANY_VALUE(foo HAVING MIN bar) AS any_value_having_min, ANY_VALUE(foo HAVING MAX bar) AS any_value_having_max, FROM t sqlfluff-2.3.5/test/fixtures/dialects/bigquery/any_value_having.yml000066400000000000000000000034741451700765000256710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa831896a2007e5458f7164a539356855167ec1b4947f32c8a281bb9290c3dc7 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ANY_VALUE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: foo - keyword: HAVING - keyword: MIN - expression: column_reference: naked_identifier: bar - end_bracket: ) alias_expression: keyword: AS naked_identifier: any_value_having_min - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ANY_VALUE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: foo - keyword: HAVING - keyword: MAX - expression: column_reference: naked_identifier: bar - end_bracket: ) alias_expression: keyword: AS naked_identifier: any_value_having_max - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-2.3.5/test/fixtures/dialects/bigquery/array_agg_distinct_ignore_nulls.sql000066400000000000000000000000761451700765000307600ustar00rootroot00000000000000SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS) AS array_agg FROM t sqlfluff-2.3.5/test/fixtures/dialects/bigquery/array_agg_distinct_ignore_nulls.yml000066400000000000000000000022141451700765000307560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: de3bd26409d96fecf29597498e175cacc89eb58ca55cfc9b74c611542323e64f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG bracketed: - start_bracket: ( - keyword: DISTINCT - expression: column_reference: naked_identifier: x - keyword: IGNORE - keyword: NULLS - end_bracket: ) alias_expression: keyword: AS naked_identifier: array_agg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-2.3.5/test/fixtures/dialects/bigquery/array_agg_ignore_nulls.sql000066400000000000000000000000651451700765000270550ustar00rootroot00000000000000SELECT ARRAY_AGG(x IGNORE NULLS) AS array_agg FROM t sqlfluff-2.3.5/test/fixtures/dialects/bigquery/array_agg_ignore_nulls.yml000066400000000000000000000021541451700765000270600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: afe206e5856c0a20b918fd55b32ea77d5e3d0c2887a8e53e3faa161c377437a6 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - keyword: IGNORE - keyword: NULLS - end_bracket: ) alias_expression: keyword: AS naked_identifier: array_agg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-2.3.5/test/fixtures/dialects/bigquery/array_agg_respect_nulls.sql000066400000000000000000000000661451700765000272400ustar00rootroot00000000000000SELECT ARRAY_AGG(x RESPECT NULLS) AS array_agg FROM t sqlfluff-2.3.5/test/fixtures/dialects/bigquery/array_agg_respect_nulls.yml000066400000000000000000000021551451700765000272430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff858cb48e736948350641eeb8ef1669a4aa0503624c2f55ab2a64632bc4941c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - keyword: RESPECT - keyword: NULLS - end_bracket: ) alias_expression: keyword: AS naked_identifier: array_agg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t sqlfluff-2.3.5/test/fixtures/dialects/bigquery/assert.sql000066400000000000000000000004041451700765000236370ustar00rootroot00000000000000ASSERT ( (SELECT COUNT(*) FROM UNNEST([1, 2, 3, 4, 5, 6])) > 5 ) AS 'Table must contain more than 5 rows.'; ASSERT EXISTS( SELECT X FROM UNNEST([7877, 7879, 7883, 7901, 7907]) AS X WHERE X = 7919 ) AS 'Column X must contain the value 7919'; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/assert.yml000066400000000000000000000104541451700765000236470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 22c4c59aa20ff6975daccb6e8dd8da9b0c33c1bb97c7426f9ac80f4e94f33fbc file: - statement: assert_statement: - keyword: ASSERT - expression: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_square_bracket: ']' end_bracket: ) end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' end_bracket: ) - keyword: AS - quoted_literal: "'Table must contain more than 5 rows.'" - statement_terminator: ; - statement: assert_statement: - keyword: ASSERT - expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: X from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '7877' - comma: ',' - numeric_literal: '7879' - comma: ',' - numeric_literal: '7883' - comma: ',' - numeric_literal: '7901' - comma: ',' - numeric_literal: '7907' - end_square_bracket: ']' end_bracket: ) alias_expression: keyword: AS naked_identifier: X where_clause: keyword: WHERE expression: column_reference: naked_identifier: X comparison_operator: raw_comparison_operator: '=' numeric_literal: '7919' end_bracket: ) - keyword: AS - quoted_literal: "'Column X must contain the value 7919'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/call_procedure.sql000066400000000000000000000003061451700765000253220ustar00rootroot00000000000000CALL mydataset.create_customer(); DECLARE retCode INT64; -- Procedure signature: (IN account_id STRING, OUT retCode INT64) CALL mySchema.UpdateSomeTables('someAccountId', retCode); SELECT retCode; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/call_procedure.yml000066400000000000000000000026761451700765000253400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 20d0866fe65ff41e8d638794bcff9561bab6a995fe35b41353111e4b0361c1d4 file: - statement: call_statement: keyword: CALL procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: create_customer bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: declare_segment: keyword: DECLARE naked_identifier: retCode data_type: data_type_identifier: INT64 - statement_terminator: ; - statement: call_statement: keyword: CALL procedure_name: naked_identifier: mySchema dot: . procedure_name_identifier: UpdateSomeTables bracketed: - start_bracket: ( - expression: quoted_literal: "'someAccountId'" - comma: ',' - expression: column_reference: naked_identifier: retCode - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: retCode - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_external_table.sql000066400000000000000000000021041451700765000266510ustar00rootroot00000000000000CREATE EXTERNAL TABLE dataset.CsvTable OPTIONS ( format = 'CSV', uris = ['gs://bucket/path1.csv', 'gs://bucket/path2.csv'] ); CREATE OR REPLACE EXTERNAL TABLE dataset.CsvTable ( x INT64, y STRING ) OPTIONS ( format = 'CSV', uris = ['gs://bucket/path1.csv'], field_delimiter = '|', max_bad_records = 5 ); CREATE EXTERNAL TABLE dataset.AutoHivePartitionedTable WITH PARTITION COLUMNS OPTIONS ( uris=['gs://bucket/path/*'], format=csv, hive_partition_uri_prefix='gs://bucket/path' ); CREATE EXTERNAL TABLE dataset.CustomHivePartitionedTable WITH PARTITION COLUMNS ( field_1 STRING, -- column order must match the external path field_2 INT64 ) OPTIONS ( uris=['gs://bucket/path/*'], format=csv, hive_partition_uri_prefix='gs://bucket/path' ); -- Test arbritary ordering of optional arguments CREATE EXTERNAL TABLE dataset.CustomHivePartitionedTable OPTIONS ( uris=['gs://bucket/path/*'], format=csv, hive_partition_uri_prefix='gs://bucket/path' ) WITH PARTITION COLUMNS ( field_1 STRING, -- column order must match the external path field_2 INT64 ); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_external_table.yml000066400000000000000000000146751451700765000266730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c1a2b25cdbdf47210e66ae07cab232d7a6820dfc411e0505064e56c472390cfd file: - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: CsvTable - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: - start_square_bracket: '[' - quoted_literal: "'gs://bucket/path1.csv'" - comma: ',' - quoted_literal: "'gs://bucket/path2.csv'" - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: CsvTable - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: STRING - end_bracket: ) - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path1.csv'" end_square_bracket: ']' - comma: ',' - parameter: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'|'" - comma: ',' - parameter: max_bad_records - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: AutoHivePartitionedTable - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: csv - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: CustomHivePartitionedTable - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - bracketed: - start_bracket: ( - column_definition: naked_identifier: field_1 data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: field_2 data_type: data_type_identifier: INT64 - end_bracket: ) - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: csv - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: CustomHivePartitionedTable - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: uris - comparison_operator: raw_comparison_operator: '=' - array_literal: start_square_bracket: '[' quoted_literal: "'gs://bucket/path/*'" end_square_bracket: ']' - comma: ',' - parameter: format - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: csv - comma: ',' - parameter: hive_partition_uri_prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/path'" - end_bracket: ) - keyword: WITH - keyword: PARTITION - keyword: COLUMNS - bracketed: - start_bracket: ( - column_definition: naked_identifier: field_1 data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: field_2 data_type: data_type_identifier: INT64 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_function_no_args.sql000066400000000000000000000001751451700765000272230ustar00rootroot00000000000000CREATE FUNCTION add() RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; DROP FUNCTION myproject.mydataset.addfunc; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_function_no_args.yml000066400000000000000000000022171451700765000272240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4931a20648137c67b8a0523b177168eeb14ba8cfa8c6ca932cf62df543df9002 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - function_definition: - keyword: AS - udf_body: "'select $1 + $2;'" - keyword: LANGUAGE - naked_identifier: SQL - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: - naked_identifier: myproject - dot: . - naked_identifier: mydataset - dot: . - function_name_identifier: addfunc - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_complex_types.sql000066400000000000000000000004161451700765000311600ustar00rootroot00000000000000CREATE TEMP FUNCTION qs( foo1 INT64, foo2 ARRAY, foo3 STRUCT, foo4 STRUCT, foo5 STRUCT, b STRUCT> ) RETURNS STRUCT> LANGUAGE js AS """ CODE GOES HERE """ sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_complex_types.yml000066400000000000000000000066601451700765000311710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f953523c623c2f0f692913757e4e5b6b3166da87fe5b8f571796a8e326977cb file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: - start_bracket: ( - parameter: foo1 - data_type: data_type_identifier: INT64 - comma: ',' - parameter: foo2 - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: STRING end_angle_bracket: '>' - comma: ',' - parameter: foo3 - data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: x data_type: data_type_identifier: INT64 end_angle_bracket: '>' - comma: ',' - parameter: foo4 - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: x - data_type: data_type_identifier: INT64 - comma: ',' - parameter: y - data_type: data_type_identifier: INT64 - end_angle_bracket: '>' - comma: ',' - parameter: foo5 - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: a - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: FLOAT end_angle_bracket: '>' - comma: ',' - parameter: b - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: x - data_type: data_type_identifier: INT64 - comma: ',' - parameter: y - data_type: data_type_identifier: INT64 - end_angle_bracket: '>' - end_angle_bracket: '>' - end_bracket: ) - keyword: RETURNS - data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: product_id data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' end_angle_bracket: '>' - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: "\"\"\"\n CODE GOES HERE\n\"\"\"" sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_deterministic.sql000066400000000000000000000001161451700765000311250ustar00rootroot00000000000000CREATE FUNCTION qs( y STRING ) DETERMINISTIC LANGUAGE js AS " return y; " sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_deterministic.yml000066400000000000000000000016001451700765000311260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5215816c5689dccbcbf896f437850b2dc91b0efde7362006d1db916fd58fab59 file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: start_bracket: ( parameter: y data_type: data_type_identifier: STRING end_bracket: ) - function_definition: - keyword: DETERMINISTIC - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: '" return y; "' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_options_library_array.sql000066400000000000000000000003641451700765000327040ustar00rootroot00000000000000CREATE TEMP FUNCTION parseTopSellers(arr_str STRING) RETURNS ARRAY> LANGUAGE js OPTIONS ( library=["gs://my-bucket/path/to/lib1.js", "gs://my-bucket/path/to/lib2.js"] ) AS """ CODE GOES HERE """ sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_options_library_array.yml000066400000000000000000000035751451700765000327150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 61c3e66ff3de5079c9134b67ac6d4eb08d90ccbeeb79ef35a01d5588eee8a2a5 file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: parseTopSellers - function_parameter_list: bracketed: start_bracket: ( parameter: arr_str data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: product_id - data_type: data_type_identifier: INT64 - comma: ',' - parameter: rating - data_type: data_type_identifier: FLOAT64 - end_angle_bracket: '>' end_angle_bracket: '>' - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: OPTIONS - bracketed: - start_bracket: ( - parameter: library - comparison_operator: raw_comparison_operator: '=' - start_square_bracket: '[' - double_quote: '"gs://my-bucket/path/to/lib1.js"' - comma: ',' - double_quote: '"gs://my-bucket/path/to/lib2.js"' - end_square_bracket: ']' - end_bracket: ) - keyword: AS - udf_body: "\"\"\"\n CODE GOES HERE\n\"\"\"" sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_quoted_name.sql000066400000000000000000000001641451700765000305660ustar00rootroot00000000000000CREATE TEMP FUNCTION qs( y STRING ) RETURNS STRUCT<`$=` ARRAY> LANGUAGE js AS """ CODE GOES HERE """ sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_quoted_name.yml000066400000000000000000000025121451700765000305670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4a4cb38f3e6f423ed83783a235c1c7ac5c0a268cfd2a4903b20ccba8352d5faa file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: start_bracket: ( parameter: y data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: '`$=`' data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' end_angle_bracket: '>' - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: "\"\"\"\n CODE GOES HERE\n\"\"\"" sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_simple.sql000066400000000000000000000001001451700765000275440ustar00rootroot00000000000000CREATE FUNCTION qs( y STRING ) LANGUAGE js AS " return y; " sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_simple.yml000066400000000000000000000015411451700765000275600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cd6b2acb838c5b14165ae5ff3d733beed753ba1cf49ed4f8c057ff0c60c1e41d file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: start_bracket: ( parameter: y data_type: data_type_identifier: STRING end_bracket: ) - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: '" return y; "' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_underscore_name.sql000066400000000000000000000001741451700765000314370ustar00rootroot00000000000000CREATE TEMP FUNCTION _qs( y STRING ) RETURNS STRUCT<_product_id ARRAY> LANGUAGE js AS """ CODE GOES HERE """ sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_js_function_underscore_name.yml000066400000000000000000000025201451700765000314360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd3ec6684af84a81c35279fd471dd61e0f8e6b4947388c123b76bd6d21cb80ea file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: _qs - function_parameter_list: bracketed: start_bracket: ( parameter: y data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: _product_id data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: INT64 end_angle_bracket: '>' end_angle_bracket: '>' - function_definition: - keyword: LANGUAGE - naked_identifier: js - keyword: AS - udf_body: "\"\"\"\n CODE GOES HERE\n\"\"\"" sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_materialized_view.sql000066400000000000000000000006731451700765000273750ustar00rootroot00000000000000CREATE MATERIALIZED VIEW mydataset.my_mv AS SELECT * FROM anotherdataset.mv_base_table; CREATE MATERIALIZED VIEW IF NOT EXISTS mydataset.my_mv AS SELECT * FROM anotherdataset.mv_base_table; CREATE MATERIALIZED VIEW mydataset.my_mv OPTIONS( friendly_name="my_mv" ) AS SELECT * FROM anotherdataset.mv_base_table; CREATE MATERIALIZED VIEW mydataset.my_mv PARTITION BY DATE(x) CLUSTER BY y AS SELECT x, y FROM anotherdataset.mv_base_table; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_materialized_view.yml000066400000000000000000000104761451700765000274010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa0c77fc692915b62e59448c86e6a13dd1a72bec82c6e436dff5b7130f2a0076 file: - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherdataset - dot: . - naked_identifier: mv_base_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherdataset - dot: . - naked_identifier: mv_base_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: friendly_name comparison_operator: raw_comparison_operator: '=' quoted_literal: '"my_mv"' end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherdataset - dot: . - naked_identifier: mv_base_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: DATE bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - cluster_by_segment: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: y - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherdataset - dot: . - naked_identifier: mv_base_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_or_replace_sql_function_any_type.sql000066400000000000000000000003751451700765000324770ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION qs( y ANY TYPE ) AS ( CASE WHEN y = 1 THEN 'low' WHEN y = 2 THEN 'midlow' WHEN y = 3 THEN 'mid' WHEN y = 4 THEN 'midhigh' WHEN y = 5 THEN 'high' ELSE "unknown" END ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_or_replace_sql_function_any_type.yml000066400000000000000000000057621451700765000325060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 07939b1502c3d5a56b29565a8927c4229cf388ead897001f227365c5e5a1143f file: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: qs - function_parameter_list: bracketed: - start_bracket: ( - parameter: y - keyword: ANY - keyword: TYPE - end_bracket: ) - function_definition: keyword: AS bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'low'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - keyword: THEN - expression: quoted_literal: "'midlow'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - keyword: THEN - expression: quoted_literal: "'mid'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '4' - keyword: THEN - expression: quoted_literal: "'midhigh'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - keyword: THEN - expression: quoted_literal: "'high'" - else_clause: keyword: ELSE expression: quoted_literal: '"unknown"' - keyword: END end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_or_replace_table_options_as.sql000066400000000000000000000001451451700765000314030ustar00rootroot00000000000000CREATE OR REPLACE TABLE foo OPTIONS (description = 'copy of bar') AS ( SELECT * from bar ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_or_replace_table_options_as.yml000066400000000000000000000025231451700765000314070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2d3d2282b592cfaec503c5d89dbe0a259650df10bf946215d2f24e7cb1ae0e0 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: foo - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'copy of bar'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_procedure.sql000066400000000000000000000033171451700765000256570ustar00rootroot00000000000000CREATE OR REPLACE PROCEDURE `mfrm_working_temp_dataset.temp` (incremental INT64) BEGIN SELECT CURRENT_DATETIME(); END; CREATE PROCEDURE myProject.myDataset.QueryTable() BEGIN SELECT * FROM anotherDataset.myTable; END; CREATE PROCEDURE mydataset.AddDelta(INOUT x INT64, delta INT64) BEGIN SET x = x + delta; END; CREATE PROCEDURE mydataset.SelectFromTablesAndAppend( target_date DATE, OUT rows_added INT64) BEGIN CREATE TEMP TABLE DataForTargetDate AS SELECT t1.id, t1.x, t2.y FROM dataset.partitioned_table1 AS t1 JOIN dataset.partitioned_table2 AS t2 ON t1.id = t2.id WHERE t1.date = target_date AND t2.date = target_date; SET rows_added = (SELECT COUNT(*) FROM DataForTargetDate); SELECT id, x, y, target_date -- note that target_date is a parameter FROM DataForTargetDate; DROP TABLE DataForTargetDate; END; CREATE OR REPLACE PROCEDURE mydataset.create_customer() BEGIN DECLARE id STRING; SET id = GENERATE_UUID(); INSERT INTO mydataset.customers (customer_id) VALUES(id); SELECT FORMAT("Created customer %s", id); END; CREATE OR REPLACE PROCEDURE mydataset.create_customer(name STRING) BEGIN DECLARE id STRING; SET id = GENERATE_UUID(); INSERT INTO mydataset.customers (customer_id, name) VALUES(id, name); SELECT FORMAT("Created customer %s (%s)", id, name); END; CREATE OR REPLACE PROCEDURE mydataset.create_customer(name STRING, OUT id STRING) BEGIN SET id = GENERATE_UUID(); INSERT INTO mydataset.customers (customer_id, name) VALUES(id, name); SELECT FORMAT("Created customer %s (%s)", id, name); END; CREATE OR REPLACE PROCEDURE mydataset.test_raise_return(error_message STRING) BEGIN RETURN; RAISE; RAISE USING MESSAGE = "Test"; RAISE USING MESSAGE = error_message; END; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_procedure.yml000066400000000000000000000447021451700765000256640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6e3d437bfcd1728d0466ceeaf60034b799468b6393673dabe02d5d6fccbc004 file: - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: procedure_name_identifier: '`mfrm_working_temp_dataset.temp`' - procedure_parameter_list: bracketed: start_bracket: ( parameter: incremental data_type: data_type_identifier: INT64 end_bracket: ) - keyword: BEGIN - procedure_statements: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CURRENT_DATETIME bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - procedure_name: - naked_identifier: myProject - dot: . - naked_identifier: myDataset - dot: . - procedure_name_identifier: QueryTable - procedure_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: BEGIN - procedure_statements: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: anotherDataset - dot: . - naked_identifier: myTable statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: AddDelta - procedure_parameter_list: bracketed: - start_bracket: ( - keyword: INOUT - parameter: x - data_type: data_type_identifier: INT64 - comma: ',' - parameter: delta - data_type: data_type_identifier: INT64 - end_bracket: ) - keyword: BEGIN - procedure_statements: statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: x - binary_operator: + - column_reference: naked_identifier: delta statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: SelectFromTablesAndAppend - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: target_date - data_type: data_type_identifier: DATE - comma: ',' - keyword: OUT - parameter: rows_added - data_type: data_type_identifier: INT64 - end_bracket: ) - keyword: BEGIN - procedure_statements: - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: DataForTargetDate - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: x - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: partitioned_table1 alias_expression: keyword: AS naked_identifier: t1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: partitioned_table2 alias_expression: keyword: AS naked_identifier: t2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: target_date - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: target_date - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: rows_added comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DataForTargetDate end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y - comma: ',' - select_clause_element: column_reference: naked_identifier: target_date from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DataForTargetDate - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: DataForTargetDate - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: create_customer - procedure_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: BEGIN - procedure_statements: - statement: declare_segment: keyword: DECLARE naked_identifier: id data_type: data_type_identifier: STRING - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: GENERATE_UUID bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: customers - bracketed: start_bracket: ( column_reference: naked_identifier: customer_id end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: FORMAT bracketed: - start_bracket: ( - expression: quoted_literal: '"Created customer %s"' - comma: ',' - expression: column_reference: naked_identifier: id - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: create_customer - procedure_parameter_list: bracketed: start_bracket: ( parameter: name data_type: data_type_identifier: STRING end_bracket: ) - keyword: BEGIN - procedure_statements: - statement: declare_segment: keyword: DECLARE naked_identifier: id data_type: data_type_identifier: STRING - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: GENERATE_UUID bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: customers - bracketed: - start_bracket: ( - column_reference: naked_identifier: customer_id - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: FORMAT bracketed: - start_bracket: ( - expression: quoted_literal: '"Created customer %s (%s)"' - comma: ',' - expression: column_reference: naked_identifier: id - comma: ',' - expression: column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: create_customer - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: name - data_type: data_type_identifier: STRING - comma: ',' - keyword: OUT - parameter: id - data_type: data_type_identifier: STRING - end_bracket: ) - keyword: BEGIN - procedure_statements: - statement: set_segment: keyword: SET naked_identifier: id comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: GENERATE_UUID bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: customers - bracketed: - start_bracket: ( - column_reference: naked_identifier: customer_id - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - comma: ',' - expression: column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: FORMAT bracketed: - start_bracket: ( - expression: quoted_literal: '"Created customer %s (%s)"' - comma: ',' - expression: column_reference: naked_identifier: id - comma: ',' - expression: column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - procedure_name: naked_identifier: mydataset dot: . procedure_name_identifier: test_raise_return - procedure_parameter_list: bracketed: start_bracket: ( parameter: error_message data_type: data_type_identifier: STRING end_bracket: ) - keyword: BEGIN - procedure_statements: - statement: return_statement: keyword: RETURN - statement_terminator: ; - statement: raise_statement: keyword: RAISE - statement_terminator: ; - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: '"Test"' - statement_terminator: ; - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: naked_identifier: error_message - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_column_options.sql000066400000000000000000000006341451700765000301050ustar00rootroot00000000000000CREATE TABLE t_table1 ( x INT64 OPTIONS(description="An INTEGER field") ); CREATE TABLE t_table1 ( x INT64 NOT NULL OPTIONS(description="An INTEGER field that is NOT NULL") ); CREATE TABLE t_table1 ( x STRUCT< col1 INT64 OPTIONS(description="An INTEGER field in a STRUCT") >, y ARRAY> ); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_column_options.yml000066400000000000000000000072401451700765000301070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7c6425d4b2c260947fda76839d8c852def220cf4d503f57679b1e8a198e0dd73 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field"' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: INT64 column_constraint_segment: - keyword: NOT - keyword: 'NULL' options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field that is NOT NULL"' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: col1 data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field in a STRUCT"' end_bracket: ) end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: y data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < parameter: col1 data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field in a REPEATED STRUCT"' end_bracket: ) end_angle_bracket: '>' end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_columns_partition_options.sql000066400000000000000000000001671451700765000323620ustar00rootroot00000000000000CREATE TABLE newtable ( x TIMESTAMP, y INT64 ) PARTITION BY DATE(x) CLUSTER BY x, y OPTIONS(description="foo") sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_columns_partition_options.yml000066400000000000000000000033101451700765000323550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae438e26e9f52a679b12f43f407b03b739ffb201a67f744616effdbe07ccd83c file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: newtable - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: TIMESTAMP - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INT64 - end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: DATE bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - cluster_by_segment: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"foo"' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_hyphen_project.sql000066400000000000000000000002231451700765000300500ustar00rootroot00000000000000CREATE OR REPLACE TABLE project-name.dataset_name.table_name ( x INT64 OPTIONS(description="An INTEGER field") ) PARTITION BY DATE(import_ts); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_hyphen_project.yml000066400000000000000000000031271451700765000300600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4ed2102062a53d6ae9194517b919893ad55c8b6fa4bb6eced98d27a83f98668f file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: project - dash: '-' - naked_identifier: name - dot: . - naked_identifier: dataset_name - dot: . - naked_identifier: table_name - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: INT64 options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field"' end_bracket: ) end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: DATE bracketed: start_bracket: ( expression: column_reference: naked_identifier: import_ts end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_like_copy_clone.sql000066400000000000000000000004461451700765000301740ustar00rootroot00000000000000CREATE TABLE mydataset.newtable LIKE mydataset.sourcetable ; CREATE TABLE mydataset.newtable LIKE mydataset.sourcetable AS SELECT * FROM mydataset.myothertable ; CREATE TABLE mydataset.newtable COPY mydataset.sourcetable ; CREATE TABLE mydataset.newtable_clone CLONE mydataset.sourcetable ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_like_copy_clone.yml000066400000000000000000000044151451700765000301760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1d796951320912d1de1fb4f375e9c181b868fd162508bc7e60e82f88f0097b04 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable - keyword: LIKE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable - keyword: LIKE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: myothertable - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable - keyword: COPY - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: newtable_clone - keyword: CLONE - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: sourcetable - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_options.sql000066400000000000000000000005351451700765000265300ustar00rootroot00000000000000CREATE TABLE table_1 OPTIONS( expiration_timestamp = TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 24 HOUR) ); CREATE TABLE table_1 OPTIONS( expiration_timestamp = TIMESTAMP("2023-01-01 00:00:00 UTC") ); CREATE TABLE table_1 OPTIONS( description = "Test mixed options", expiration_timestamp = TIMESTAMP("2023-01-01 00:00:00 UTC") ); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_options.yml000066400000000000000000000056251451700765000265370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9626d2bc3f5efcefdc2860f3cc69f9760838065666af759ea6458370cc1e6b6f file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_1 - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: expiration_timestamp comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: TIMESTAMP_ADD bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '24' date_part: HOUR - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_1 - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: expiration_timestamp comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: TIMESTAMP bracketed: start_bracket: ( expression: quoted_literal: '"2023-01-01 00:00:00 UTC"' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_1 - options_segment: keyword: OPTIONS bracketed: - start_bracket: ( - parameter: description - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"Test mixed options"' - comma: ',' - parameter: expiration_timestamp - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: TIMESTAMP bracketed: start_bracket: ( expression: quoted_literal: '"2023-01-01 00:00:00 UTC"' end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_options_as.sql000066400000000000000000000001311451700765000272030ustar00rootroot00000000000000CREATE OR REPLACE TABLE foo OPTIONS (description = 'copy of bar') AS (SELECT * from bar) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_options_as.yml000066400000000000000000000025231451700765000272140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2d3d2282b592cfaec503c5d89dbe0a259650df10bf946215d2f24e7cb1ae0e0 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: foo - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'copy of bar'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_partition_by_as.sql000066400000000000000000000001511451700765000302150ustar00rootroot00000000000000CREATE TABLE newtable ( x INT64, y INT64 ) PARTITION BY y AS SELECT x, y FROM table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_partition_by_as.yml000066400000000000000000000027761451700765000302360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9efd71e7018e932691fde021d67c9ad35319421490736d07e46c78483b4c28b8 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: newtable - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INT64 - end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: y - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_partition_by_cluster_by_as.sql000066400000000000000000000001711451700765000324520ustar00rootroot00000000000000CREATE TABLE newtable ( x INT64, y INT64 ) PARTITION BY y CLUSTER BY x, y AS SELECT x, y FROM table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_partition_by_cluster_by_as.yml000066400000000000000000000033701451700765000324600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c4cacf9cdeb2b07a2f67ebf103013783bbffae4977f85e1043aaa02042153e6e file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: newtable - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INT64 - end_bracket: ) - partition_by_segment: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: y - cluster_by_segment: - keyword: CLUSTER - keyword: BY - expression: column_reference: naked_identifier: x - comma: ',' - expression: column_reference: naked_identifier: y - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_trailing_comma.sql000066400000000000000000000003601451700765000300160ustar00rootroot00000000000000-- Basic example of trailing comma CREATE TABLE t_table ( col1 STRING, ); -- Complex example with other variants CREATE TABLE t_table ( col1 STRING, x INT64 NOT NULL OPTIONS(description="An INTEGER field that is NOT NULL"), ); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_table_trailing_comma.yml000066400000000000000000000032331451700765000300220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 018364a69b948f44088a1b9f4dd3793605a728e0f7404b540764dd923a4f9917 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table - bracketed: start_bracket: ( column_definition: naked_identifier: col1 data_type: data_type_identifier: STRING comma: ',' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: data_type_identifier: STRING - comma: ',' - column_definition: naked_identifier: x data_type: data_type_identifier: INT64 column_constraint_segment: - keyword: NOT - keyword: 'NULL' options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: '"An INTEGER field that is NOT NULL"' end_bracket: ) - comma: ',' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_temp_function_with_select.sql000066400000000000000000000002551451700765000311310ustar00rootroot00000000000000CREATE TEMP FUNCTION URLDECODE ( url STRING ) RETURNS STRING AS (( SELECT 1 FROM UNNEST(REGEXP_EXTRACT_ALL(url, r"%[0-9a-fA-F]{2}|[^%]+")) AS y WITH OFFSET AS i )) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_temp_function_with_select.yml000066400000000000000000000053221451700765000311330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6c13780bf59f303ac44b547172661f69d0d29b0e55ce5676b9f6a53d76968672 file: statement: create_function_statement: - keyword: CREATE - keyword: TEMP - keyword: FUNCTION - function_name: function_name_identifier: URLDECODE - function_parameter_list: bracketed: start_bracket: ( parameter: url data_type: data_type_identifier: STRING end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: STRING - function_definition: keyword: AS bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: REGEXP_EXTRACT_ALL bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: url - comma: ',' - expression: quoted_literal: r"%[0-9a-fA-F]{2}|[^%]+" - end_bracket: ) end_bracket: ) - alias_expression: keyword: AS naked_identifier: y - keyword: WITH - keyword: OFFSET - alias_expression: keyword: AS naked_identifier: i end_bracket: ) end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_view_options_as.sql000066400000000000000000000003651451700765000270770ustar00rootroot00000000000000CREATE OR REPLACE VIEW foo AS (SELECT * from bar); CREATE OR REPLACE VIEW foo OPTIONS (description = 'copy of bar') AS (SELECT * from bar); CREATE OR REPLACE VIEW IF NOT EXISTS foo OPTIONS (description = 'copy of bar') AS (SELECT * from bar); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/create_view_options_as.yml000066400000000000000000000061361451700765000271030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5866457289782a3325bf4d8bcd5ca7d6159222350cb8339923133c5ca619730c file: - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: foo - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: foo - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'copy of bar'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foo - options_segment: keyword: OPTIONS bracketed: start_bracket: ( parameter: description comparison_operator: raw_comparison_operator: '=' quoted_literal: "'copy of bar'" end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/dateparts.sql000066400000000000000000000013231451700765000243260ustar00rootroot00000000000000SELECT col1, EXTRACT(MICROSECOND FROM col1), EXTRACT(MILLISECOND FROM col1), EXTRACT(SECOND FROM col1), EXTRACT(MINUTE FROM col1), EXTRACT(HOUR FROM col1), EXTRACT(DAY FROM col1), EXTRACT(DAYOFWEEK FROM col1), EXTRACT(DAYOFYEAR FROM col1), EXTRACT(WEEK FROM col1), EXTRACT(ISOWEEK FROM col1), EXTRACT(MONTH FROM col1), EXTRACT(QUARTER FROM col1), EXTRACT(YEAR FROM col1), EXTRACT(ISOYEAR FROM col1), EXTRACT(WEEK(SUNDAY) FROM col1), EXTRACT(WEEK FROM col1) AS week, EXTRACT(DATE FROM col1) AS week, EXTRACT(DATE FROM TIMESTAMP_SECONDS(1651135778)), LAST_DAY(col1, MONTH), LAST_DAY(col1, WEEK), LAST_DAY(col1, WEEK(SUNDAY)), FROM tbl1; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/dateparts.yml000066400000000000000000000227421451700765000243400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da0054010e44144fc453c616dacc58c6a075bb91364b9f45bb9fb901ed30bf84 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: MICROSECOND keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: MILLISECOND keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: SECOND keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: MINUTE keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: HOUR keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DAYOFWEEK keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DAYOFYEAR keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: WEEK keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: ISOWEEK keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: MONTH keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: QUARTER keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: YEAR keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: ISOYEAR keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part_week: keyword: WEEK bracketed: start_bracket: ( keyword: SUNDAY end_bracket: ) keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: WEEK keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) alias_expression: keyword: AS naked_identifier: week - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DATE keyword: FROM expression: column_reference: naked_identifier: col1 end_bracket: ) alias_expression: keyword: AS naked_identifier: week - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DATE keyword: FROM expression: function: function_name: function_name_identifier: TIMESTAMP_SECONDS bracketed: start_bracket: ( expression: numeric_literal: '1651135778' end_bracket: ) end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_DAY bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 comma: ',' date_part: MONTH end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_DAY bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 comma: ',' date_part: WEEK end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_DAY bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 comma: ',' date_part_week: keyword: WEEK bracketed: start_bracket: ( keyword: SUNDAY end_bracket: ) end_bracket: ) - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/declare_variable.sql000066400000000000000000000015501451700765000256050ustar00rootroot00000000000000declare var1 int64; declare var2, var3 string; declare var4 default 'value'; declare var5 int64 default 1 + 2; declare var6 string(10); declare var7 numeric(5, 2); declare arr1 array; declare arr2 default ['one', 'two']; declare arr3 default []; declare arr4 array default ['one', 'two']; declare arr5 array; declare str1 struct; declare str2 struct default struct('one', 'two'); declare str3 default struct('one', 'two'); declare str4 struct default ('one', 'two'); declare str5 struct; -- Defining variables in quoted names declare `var1` string; declare `var1` string default 'value'; declare `var1`, `var1` string; -- Defining variables mixing quoted and unquoted names declare var1, `var2` string; declare var1, `var2` string default 'value'; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/declare_variable.yml000066400000000000000000000175431451700765000256200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5b2050e76f6e2498f1fbdbe605abe116b14b2f43edfb1b5108fe2258010e715b file: - statement: declare_segment: keyword: declare naked_identifier: var1 data_type: data_type_identifier: int64 - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var2 - comma: ',' - naked_identifier: var3 - data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var4 - keyword: default - quoted_literal: "'value'" - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var5 - data_type: data_type_identifier: int64 - keyword: default - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: var6 data_type: data_type_identifier: string bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: var7 data_type: data_type_identifier: numeric bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: arr1 data_type: array_type: keyword: array start_angle_bracket: < data_type: data_type_identifier: string end_angle_bracket: '>' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: arr2 - keyword: default - array_literal: - start_square_bracket: '[' - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_square_bracket: ']' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: arr3 - keyword: default - array_literal: start_square_bracket: '[' end_square_bracket: ']' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: arr4 - data_type: array_type: keyword: array start_angle_bracket: < data_type: data_type_identifier: string end_angle_bracket: '>' - keyword: default - array_literal: - start_square_bracket: '[' - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_square_bracket: ']' - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: arr5 data_type: array_type: keyword: array start_angle_bracket: < data_type: data_type_identifier: string bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_angle_bracket: '>' - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: str1 data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - parameter: f1 - data_type: data_type_identifier: string - comma: ',' - parameter: f2 - data_type: data_type_identifier: string - end_angle_bracket: '>' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: str2 - data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - parameter: f1 - data_type: data_type_identifier: string - comma: ',' - parameter: f2 - data_type: data_type_identifier: string - end_angle_bracket: '>' - keyword: default - expression: typed_struct_literal: struct_type: keyword: struct struct_literal: bracketed: - start_bracket: ( - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_bracket: ) - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: str3 - keyword: default - expression: typed_struct_literal: struct_type: keyword: struct struct_literal: bracketed: - start_bracket: ( - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_bracket: ) - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: str4 - data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - parameter: f1 - data_type: data_type_identifier: string - comma: ',' - parameter: f2 - data_type: data_type_identifier: string - end_angle_bracket: '>' - keyword: default - tuple: bracketed: - start_bracket: ( - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_bracket: ) - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: str5 data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - parameter: f1 - data_type: data_type_identifier: string bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - parameter: f2 - data_type: data_type_identifier: string bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - end_angle_bracket: '>' - statement_terminator: ; - statement: declare_segment: keyword: declare quoted_identifier: '`var1`' data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: - keyword: declare - quoted_identifier: '`var1`' - data_type: data_type_identifier: string - keyword: default - quoted_literal: "'value'" - statement_terminator: ; - statement: declare_segment: - keyword: declare - quoted_identifier: '`var1`' - comma: ',' - quoted_identifier: '`var1`' - data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: keyword: declare naked_identifier: var1 comma: ',' quoted_identifier: '`var2`' data_type: data_type_identifier: string - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var1 - comma: ',' - quoted_identifier: '`var2`' - data_type: data_type_identifier: string - keyword: default - quoted_literal: "'value'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/declare_variable_with_default.sql000066400000000000000000000001621451700765000303420ustar00rootroot00000000000000declare var5 date default CURRENT_DATE(); declare var4 int64 default 1; declare var3 string default (SELECT "x"); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/declare_variable_with_default.yml000066400000000000000000000025171451700765000303520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0275969793cbdfd1540f5baec30ac3f9a0e7db84056b4d58905b363b6d40e6e file: - statement: declare_segment: - keyword: declare - naked_identifier: var5 - data_type: data_type_identifier: date - keyword: default - function: function_name: function_name_identifier: CURRENT_DATE bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var4 - data_type: data_type_identifier: int64 - keyword: default - numeric_literal: '1' - statement_terminator: ; - statement: declare_segment: - keyword: declare - naked_identifier: var3 - data_type: data_type_identifier: string - keyword: default - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"x"' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/delete.sql000066400000000000000000000005141451700765000236020ustar00rootroot00000000000000DELETE dataset.Inventory WHERE quantity = 0; DELETE dataset.Inventory i WHERE i.product NOT IN (SELECT product from dataset.NewArrivals); DELETE dataset.Inventory WHERE NOT EXISTS (SELECT * from dataset.NewArrivals WHERE Inventory.product = NewArrivals.product); DELETE FROM `project_id.dataset_id.target_name` WHERE TRUE ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/delete.yml000066400000000000000000000071221451700765000236060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0d792ccee74c7d010d7bc94eb4b3b7ac9f5f7c052ce3eb4de5401307396aa62 file: - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dataset - dot: . - naked_identifier: Inventory where_clause: keyword: WHERE expression: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dataset - dot: . - naked_identifier: Inventory alias_expression: naked_identifier: i where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: i - dot: . - naked_identifier: product - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: product from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: NewArrivals end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dataset - dot: . - naked_identifier: Inventory where_clause: keyword: WHERE expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: NewArrivals where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: Inventory - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: NewArrivals - dot: . - naked_identifier: product end_bracket: ) - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: quoted_identifier: '`project_id.dataset_id.target_name`' - where_clause: keyword: WHERE expression: boolean_literal: 'TRUE' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/drop_materialized_view.sql000066400000000000000000000001331451700765000270650ustar00rootroot00000000000000DROP MATERIALIZED VIEW mydataset.my_mv; DROP MATERIALIZED VIEW IF EXISTS mydataset.my_mv; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/drop_materialized_view.yml000066400000000000000000000016501451700765000270740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 023f2a8fc6cf7288d13cc3d91ec757da95570d47cfc90e3dca3f5abf0d2fff63 file: - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: my_mv - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/export_statement.sql000066400000000000000000000027261451700765000257540ustar00rootroot00000000000000EXPORT DATA WITH CONNECTION PROJECT_ID.LOCATION.CONNECTION_ID OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';' ) AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1; EXPORT DATA WITH CONNECTION `PROJECT_ID.LOCATION.CONNECTION_ID` OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';' ) AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1; EXPORT DATA OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';') AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10; EXPORT DATA OPTIONS( uri="gs://bucket/folder/*.csv", format="CSV", overwrite=true, header=true, field_delimiter=';') AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10; EXPORT DATA OPTIONS( uri='gs://bucket/folder/*', format='AVRO', compression='SNAPPY') AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10; EXPORT DATA OPTIONS( uri='gs://bucket/folder/*', format='PARQUET', overwrite=true) AS SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10; EXPORT DATA OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';') AS WITH cte AS ( SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10 ) SELECT * FROM cte; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/export_statement.yml000066400000000000000000000314001451700765000257450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff4d2fa69759372f4d3bc11a761c5120bd908dffde43fcec59efbf422f874dcc file: - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: WITH - keyword: CONNECTION - object_reference: - naked_identifier: PROJECT_ID - dot: . - naked_identifier: LOCATION - dot: . - naked_identifier: CONNECTION_ID - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: WITH - keyword: CONNECTION - object_reference: quoted_identifier: '`PROJECT_ID.LOCATION.CONNECTION_ID`' - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"gs://bucket/folder/*.csv"' - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"CSV"' - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AVRO'" - comma: ',' - export_option: compression - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SNAPPY'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PARQUET'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/for_in.sql000066400000000000000000000010751451700765000236170ustar00rootroot00000000000000 -- For statment FOR record IN (SELECT word, word_count FROM bigquery-public-data.samples.shakespeare LIMIT 5) DO SELECT record.word, record.word_count; END FOR; -- Multiple statements FOR record IN (SELECT word, word_count FROM bigquery-public-data.samples.shakespeare LIMIT 5) DO SELECT record.word, record.word_count; SELECT record.word, record.word_count; SELECT record.word, record.word_count; END FOR; -- With Assert FOR user IN ( SELECT group1, count(*) as count FROM `database.user` ) DO ASSERT (COUNT > 0); END FOR; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/for_in.yml000066400000000000000000000145541451700765000236270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7775b88f55c77a35515b37384786c1bd94cc2be7968b7f712a5957a2ec5b3345 file: - multi_statement_segment: for_in_statement: - keyword: FOR - naked_identifier: record - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: word - comma: ',' - select_clause_element: column_reference: naked_identifier: word_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: bigquery - dash: '-' - naked_identifier: public - dash: '-' - naked_identifier: data - dot: . - naked_identifier: samples - dot: . - naked_identifier: shakespeare limit_clause: keyword: LIMIT numeric_literal: '5' end_bracket: ) - keyword: DO - for_in_statements: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word - comma: ',' - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word_count statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - multi_statement_segment: for_in_statement: - keyword: FOR - naked_identifier: record - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: word - comma: ',' - select_clause_element: column_reference: naked_identifier: word_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: bigquery - dash: '-' - naked_identifier: public - dash: '-' - naked_identifier: data - dot: . - naked_identifier: samples - dot: . - naked_identifier: shakespeare limit_clause: keyword: LIMIT numeric_literal: '5' end_bracket: ) - keyword: DO - for_in_statements: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word - comma: ',' - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word_count - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word - comma: ',' - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word_count - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word - comma: ',' - select_clause_element: column_reference: - naked_identifier: record - dot: . - naked_identifier: word_count - statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - multi_statement_segment: for_in_statement: - keyword: FOR - naked_identifier: user - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: group1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: as naked_identifier: count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`database.user`' end_bracket: ) - keyword: DO - for_in_statements: statement: assert_statement: keyword: ASSERT expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: COUNT comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/handle_exception.sql000066400000000000000000000016671451700765000256630ustar00rootroot00000000000000-- Basic block BEGIN SELECT * FROM one_table; END; -- Block showcasing use of in-scope variables DECLARE x INT64 DEFAULT 10; BEGIN DECLARE y INT64; SET y = x; SELECT y; END; SELECT x; -- Basic exception block BEGIN SELECT 1/0; EXCEPTION WHEN ERROR THEN RAISE USING MESSAGE = "An error happened"; END; -- Exception block utilising @error BEGIN SELECT 100/0; EXCEPTION WHEN ERROR THEN RAISE USING MESSAGE = FORMAT("Something went wrong: %s", @@error.message); END; -- More complicated block with multiple statements BEGIN EXPORT DATA OPTIONS( uri='gs://bucket/folder/*.csv', format='CSV', overwrite=true, header=true, field_delimiter=';' ) AS ( SELECT field1, field2 FROM mydataset.table1 ORDER BY field1 LIMIT 10 ); SELECT 1/0; EXCEPTION WHEN ERROR THEN DELETE FROM mydataset.table1 WHERE field1 = '1'; RAISE USING MESSAGE = FORMAT("Something went wrong: %s", @@error.message); RETURN; END; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/handle_exception.yml000066400000000000000000000201701451700765000256530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1b4e7316d947b3082776f2b5d29aa85448671a8fc632f1dda2e4c83325bea6d2 file: - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: one_table - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - naked_identifier: x - data_type: data_type_identifier: INT64 - keyword: DEFAULT - numeric_literal: '10' - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE naked_identifier: y data_type: data_type_identifier: INT64 - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: y comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: y - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: / - numeric_literal: '0' - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - keyword: ERROR - keyword: THEN - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: '"An error happened"' - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '100' - binary_operator: / - numeric_literal: '0' - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - keyword: ERROR - keyword: THEN - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: FORMAT bracketed: - start_bracket: ( - expression: quoted_literal: '"Something went wrong: %s"' - comma: ',' - expression: system_variable: double_at_sign_literal: '@@error' semi_structured_expression: dot: . naked_identifier: message - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - multi_statement_segment: begin_statement: - keyword: BEGIN - statement: export_statement: - keyword: EXPORT - keyword: DATA - keyword: OPTIONS - bracketed: - start_bracket: ( - export_option: uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gs://bucket/folder/*.csv'" - comma: ',' - export_option: format - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - export_option: overwrite - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: header - comparison_operator: raw_comparison_operator: '=' - keyword: 'true' - comma: ',' - export_option: field_delimiter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: field1 - comma: ',' - select_clause_element: column_reference: naked_identifier: field2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field1 limit_clause: keyword: LIMIT numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: / - numeric_literal: '0' - statement_terminator: ; - keyword: EXCEPTION - keyword: WHEN - keyword: ERROR - keyword: THEN - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: mydataset - dot: . - naked_identifier: table1 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: field1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" - statement_terminator: ; - statement: raise_statement: - keyword: RAISE - keyword: USING - keyword: MESSAGE - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: FORMAT bracketed: - start_bracket: ( - expression: quoted_literal: '"Something went wrong: %s"' - comma: ',' - expression: system_variable: double_at_sign_literal: '@@error' semi_structured_expression: dot: . naked_identifier: message - end_bracket: ) - statement_terminator: ; - statement: return_statement: keyword: RETURN - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/insert.sql000066400000000000000000000003571451700765000236510ustar00rootroot00000000000000INSERT INTO books (title, author) VALUES ('The Great Gatsby', 'F. Scott Fitzgerald'); INSERT books (title, author) VALUES ('The Great Gatsby', 'F. Scott Fitzgerald'); INSERT INTO `project.dataset.table` ( SELECT * FROM table1 ); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/insert.yml000066400000000000000000000043211451700765000236460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3924160b0b1aa58fa83cfb474ecceedd60a3fc3ba313ca6b2d3f4c3eb1a78b94 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: books - bracketed: - start_bracket: ( - column_reference: naked_identifier: title - comma: ',' - column_reference: naked_identifier: author - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - quoted_literal: "'The Great Gatsby'" - comma: ',' - quoted_literal: "'F. Scott Fitzgerald'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: keyword: INSERT table_reference: naked_identifier: books bracketed: - start_bracket: ( - column_reference: naked_identifier: title - comma: ',' - column_reference: naked_identifier: author - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - quoted_literal: "'The Great Gatsby'" - comma: ',' - quoted_literal: "'F. Scott Fitzgerald'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: quoted_identifier: '`project.dataset.table`' - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/interval_function.sql000066400000000000000000000004741451700765000260760ustar00rootroot00000000000000SELECT TIMESTAMP_TRUNC(TIMESTAMP_ADD(session_start.eventTimestamp, INTERVAL cast(TIMESTAMP_DIFF(session_end.eventTimestamp, session_start.eventTimestamp, SECOND)/2 AS int64) second), HOUR) AS avgAtHour, TIME_ADD(time1, INTERVAL 10 MINUTE) AS after, DATE_SUB(time2, INTERVAL 5 YEAR) AS before FROM dummy; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/interval_function.yml000066400000000000000000000110751451700765000260770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d676d56a69954ae04fd98affa2f043e777209f36bcc8e3a1d896d48ff8a3c139 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: TIMESTAMP_TRUNC bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: TIMESTAMP_ADD bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: session_start - dot: . - naked_identifier: eventTimestamp - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: TIMESTAMP_DIFF bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: session_end - dot: . - naked_identifier: eventTimestamp - comma: ',' - expression: column_reference: - naked_identifier: session_start - dot: . - naked_identifier: eventTimestamp - comma: ',' - date_part: SECOND - end_bracket: ) binary_operator: / numeric_literal: '2' keyword: AS data_type: data_type_identifier: int64 end_bracket: ) date_part: second - end_bracket: ) comma: ',' date_part: HOUR end_bracket: ) alias_expression: keyword: AS naked_identifier: avgAtHour - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TIME_ADD bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: time1 - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '10' date_part: MINUTE - end_bracket: ) alias_expression: keyword: AS naked_identifier: after - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATE_SUB bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: time2 - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '5' date_part: YEAR - end_bracket: ) alias_expression: keyword: AS naked_identifier: before from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dummy statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/is_not_distinct.sql000066400000000000000000000020131451700765000255300ustar00rootroot00000000000000-- 1. Column distinctness in SELECT expression -- TODO allow this to work without brackets SELECT (a_column IS DISTINCT FROM b_column) FROM t_table; SELECT (b_column IS NOT DISTINCT FROM c_column) FROM t_table; -- 2. Column distinctness in WHERE expression SELECT a_column FROM t_table WHERE a_column IS DISTINCT FROM b_column; SELECT a_column FROM t_table WHERE a_column IS NOT DISTINCT FROM b_column; -- 3. Column distinctness in JOIN expression SELECT t_table_1.a_column FROM t_table_1 INNER JOIN t_table_2 ON t_table_1.a_column IS DISTINCT FROM t_table_2.a_column; SELECT t_table_1.a_column FROM t_table_1 INNER JOIN t_table_2 ON t_table_1.a_column IS NOT DISTINCT FROM t_table_2.a_column; -- 4. Column distinctness in MERGE expression MERGE INTO t_table_1 USING t_table_2 ON t_table_1.a_column IS DISTINCT FROM t_table_2.a_column WHEN NOT MATCHED THEN INSERT (a) VALUES (b); MERGE INTO t_table_1 USING t_table_2 ON t_table_1.a_column IS NOT DISTINCT FROM t_table_2.a_column WHEN NOT MATCHED THEN INSERT (a) VALUES (b); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/is_not_distinct.yml000066400000000000000000000177101451700765000255440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d9ed9cabb7b1dedcb113c5b3e1e927f2224ba7002a18e9108d5c67388db5f05 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a_column - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b_column end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: b_column - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: c_column end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a_column - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b_column - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a_column - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b_column - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table_1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t_table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: t_table_2 - dot: . - naked_identifier: a_column - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table_1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t_table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: t_table_2 - dot: . - naked_identifier: a_column - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t_table_1 - keyword: USING - table_reference: naked_identifier: t_table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: t_table_2 - dot: . - naked_identifier: a_column - merge_match: not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t_table_1 - keyword: USING - table_reference: naked_identifier: t_table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_table_1 - dot: . - naked_identifier: a_column - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: t_table_2 - dot: . - naked_identifier: a_column - merge_match: not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/literals_with_data_type_and_quoted.sql000066400000000000000000000027371451700765000314600ustar00rootroot00000000000000-- https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical?hl=en SELECT NUMERIC '0'; SELECT NUMERIC '0' = NUMERIC '0'; SELECT NUMERIC "0"; SELECT NUMERIC "0" = NUMERIC "0"; SELECT BIGNUMERIC '0'; SELECT BIGNUMERIC '0' = BIGNUMERIC '0'; SELECT BIGNUMERIC "0"; SELECT BIGNUMERIC "0" = BIGNUMERIC "0"; SELECT DATE '2014-09-27'; SELECT DATE '2014-09-27' = DATE '2014-09-27'; SELECT DATE "2014-09-27"; SELECT DATE "2014-09-27" = DATE "2014-09-27"; SELECT TIME '12:30:00.45'; SELECT TIME '12:30:00.45' = TIME '12:30:00.45'; SELECT TIME "12:30:00.45"; SELECT TIME "12:30:00.45" = TIME "12:30:00.45"; SELECT DATETIME '2014-09-27 12:30:00.45'; SELECT DATETIME '2014-09-27 12:30:00.45' = DATETIME '2014-09-27 12:30:00.45'; SELECT DATETIME "2014-09-27 12:30:00.45"; SELECT DATETIME "2014-09-27 12:30:00.45" = DATETIME "2014-09-27 12:30:00.45"; SELECT TIMESTAMP '2014-09-27 12:30:00.45-08'; SELECT TIMESTAMP '2014-09-27 12:30:00.45-08' = TIMESTAMP '2014-09-27 12:30:00.45-08'; SELECT TIMESTAMP "2014-09-27 12:30:00.45-08"; SELECT TIMESTAMP "2014-09-27 12:30:00.45-08" = TIMESTAMP "2014-09-27 12:30:00.45-08"; SELECT INTERVAL '10:20:30.52' HOUR TO SECOND; SELECT INTERVAL '10:20:30.52' HOUR TO SECOND = INTERVAL '10:20:30.52' HOUR TO SECOND; SELECT INTERVAL "10:20:30.52" HOUR TO SECOND; SELECT INTERVAL "10:20:30.52" HOUR TO SECOND = INTERVAL "10:20:30.52" HOUR TO SECOND; SELECT JSON '{}'; SELECT JSON '{}' IS NOT NULL; SELECT JSON "{}"; SELECT JSON "{}" IS NOT NULL; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/literals_with_data_type_and_quoted.yml000066400000000000000000000273101451700765000314540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 086599cb9d8417775d7067ee02c60261b7c4311d5d6defd43db83ebff6bfd85d file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: NUMERIC quoted_literal: "'0'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: NUMERIC - quoted_literal: "'0'" - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: NUMERIC - quoted_literal: "'0'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: NUMERIC quoted_literal: '"0"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: NUMERIC - quoted_literal: '"0"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: NUMERIC - quoted_literal: '"0"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: BIGNUMERIC quoted_literal: "'0'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: BIGNUMERIC - quoted_literal: "'0'" - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: BIGNUMERIC - quoted_literal: "'0'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: BIGNUMERIC quoted_literal: '"0"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: BIGNUMERIC - quoted_literal: '"0"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: BIGNUMERIC - quoted_literal: '"0"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATE date_constructor_literal: "'2014-09-27'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: DATE - date_constructor_literal: "'2014-09-27'" - comparison_operator: raw_comparison_operator: '=' - keyword: DATE - date_constructor_literal: "'2014-09-27'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: DATE quoted_literal: '"2014-09-27"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: DATE - quoted_literal: '"2014-09-27"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: DATE - quoted_literal: '"2014-09-27"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIME date_constructor_literal: "'12:30:00.45'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: TIME - date_constructor_literal: "'12:30:00.45'" - comparison_operator: raw_comparison_operator: '=' - keyword: TIME - date_constructor_literal: "'12:30:00.45'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: TIME quoted_literal: '"12:30:00.45"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: TIME - quoted_literal: '"12:30:00.45"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: TIME - quoted_literal: '"12:30:00.45"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATETIME date_constructor_literal: "'2014-09-27 12:30:00.45'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: DATETIME - date_constructor_literal: "'2014-09-27 12:30:00.45'" - comparison_operator: raw_comparison_operator: '=' - keyword: DATETIME - date_constructor_literal: "'2014-09-27 12:30:00.45'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: DATETIME quoted_literal: '"2014-09-27 12:30:00.45"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: DATETIME - quoted_literal: '"2014-09-27 12:30:00.45"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: DATETIME - quoted_literal: '"2014-09-27 12:30:00.45"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIMESTAMP date_constructor_literal: "'2014-09-27 12:30:00.45-08'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - keyword: TIMESTAMP - date_constructor_literal: "'2014-09-27 12:30:00.45-08'" - comparison_operator: raw_comparison_operator: '=' - keyword: TIMESTAMP - date_constructor_literal: "'2014-09-27 12:30:00.45-08'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: TIMESTAMP quoted_literal: '"2014-09-27 12:30:00.45-08"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: TIMESTAMP - quoted_literal: '"2014-09-27 12:30:00.45-08"' - comparison_operator: raw_comparison_operator: '=' - data_type: data_type_identifier: TIMESTAMP - quoted_literal: '"2014-09-27 12:30:00.45-08"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - expression: quoted_literal: "'10:20:30.52'" - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: - keyword: INTERVAL - expression: quoted_literal: "'10:20:30.52'" - date_part: HOUR - keyword: TO - date_part: SECOND - comparison_operator: raw_comparison_operator: '=' - interval_expression: - keyword: INTERVAL - expression: quoted_literal: "'10:20:30.52'" - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - expression: quoted_literal: '"10:20:30.52"' - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: - keyword: INTERVAL - expression: quoted_literal: '"10:20:30.52"' - date_part: HOUR - keyword: TO - date_part: SECOND - comparison_operator: raw_comparison_operator: '=' - interval_expression: - keyword: INTERVAL - expression: quoted_literal: '"10:20:30.52"' - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: JSON quoted_literal: "'{}'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: JSON - quoted_literal: "'{}'" - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: data_type_identifier: JSON quoted_literal: '"{}"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - data_type: data_type_identifier: JSON - quoted_literal: '"{}"' - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/merge_into.sql000066400000000000000000000050061451700765000244710ustar00rootroot00000000000000MERGE dataset.detailedinventory t USING dataset.inventory s ON t.product = s.product WHEN NOT MATCHED AND quantity < 20 THEN INSERT(product, quantity, supply_constrained, comments) VALUES(product, quantity, TRUE) WHEN NOT MATCHED THEN INSERT(product, quantity, supply_constrained) VALUES(product, quantity, FALSE); -- optional INTO MERGE INTO dataset.detailedinventory t USING dataset.inventory s ON t.product = s.product WHEN NOT MATCHED AND quantity < 20 THEN INSERT(product, quantity, supply_constrained, comments) VALUES(product, quantity, TRUE) WHEN NOT MATCHED THEN INSERT(product, quantity, supply_constrained) VALUES(product, quantity, FALSE); MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity WHEN NOT MATCHED THEN INSERT (product, quantity) VALUES(product, quantity); MERGE dataset.newarrivals t USING (SELECT * FROM dataset.newarrivals WHERE warehouse != 'warehouse #2') s ON t.product = s.product WHEN MATCHED AND t.warehouse = 'warehouse #1' THEN UPDATE SET quantity = t.quantity + 20 WHEN MATCHED THEN DELETE; MERGE dataset.inventory t USING (SELECT product, quantity, state FROM dataset.newarrivals INNER JOIN dataset.warehouse ON dataset.newarrivals.warehouse = dataset.warehouse.warehouse) s ON t.product = s.product WHEN MATCHED AND state = 'CA' THEN UPDATE SET quantity = t.quantity + s.quantity WHEN MATCHED THEN DELETE; MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; -- INSERT ROW MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN NOT MATCHED THEN INSERT ROW; -- Optional BY TARGET MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN NOT MATCHED BY TARGET AND quantity < 20 THEN INSERT(product, quantity, supply_constrained, comments) VALUES(product, quantity, TRUE) WHEN NOT MATCHED BY TARGET THEN INSERT(product, quantity, supply_constrained) VALUES(product, quantity, FALSE); -- NOT MATCHED BY SOURCE MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN NOT MATCHED BY SOURCE THEN UPDATE SET quantity = t.quantity + s.quantity; -- Merge using Select without alias MERGE dataset.NewArrivals USING (SELECT * FROM dataset.NewArrivals WHERE warehouse <> 'warehouse #2') ON FALSE WHEN MATCHED THEN DELETE sqlfluff-2.3.5/test/fixtures/dialects/bigquery/merge_into.yml000066400000000000000000000601331451700765000244750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9e6988348c99ebb219984d286dc8aa9beaaab8121d85729dcf544e6479c2c596 file: - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: detailedinventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: AND - expression: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: < numeric_literal: '20' - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - comma: ',' - column_reference: naked_identifier: comments - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'TRUE' - end_bracket: ) - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'FALSE' - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: detailedinventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: AND - expression: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: < numeric_literal: '20' - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - comma: ',' - column_reference: naked_identifier: comments - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'TRUE' - end_bracket: ) - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'FALSE' - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: quantity not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals where_clause: keyword: WHERE expression: column_reference: naked_identifier: warehouse comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'warehouse #2'" end_bracket: ) - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: warehouse comparison_operator: raw_comparison_operator: '=' quoted_literal: "'warehouse #1'" - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity binary_operator: + numeric_literal: '20' - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: product - comma: ',' - select_clause_element: column_reference: naked_identifier: quantity - comma: ',' - select_clause_element: column_reference: naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: warehouse - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - dot: . - naked_identifier: warehouse - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: dataset - dot: . - naked_identifier: warehouse - dot: . - naked_identifier: warehouse end_bracket: ) - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: "'CA'" - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: quantity - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: quantity - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: - keyword: INSERT - keyword: ROW - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: BY - keyword: TARGET - keyword: AND - expression: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: < numeric_literal: '20' - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - comma: ',' - column_reference: naked_identifier: comments - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'TRUE' - end_bracket: ) - not_matched_by_target_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: BY - keyword: TARGET - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: product - comma: ',' - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: supply_constrained - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: product - comma: ',' - expression: column_reference: naked_identifier: quantity - comma: ',' - boolean_literal: 'FALSE' - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: inventory - alias_expression: naked_identifier: t - keyword: USING - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: newarrivals - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: product - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: product - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: BY - keyword: SOURCE - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: quantity comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: quantity - binary_operator: + - column_reference: - naked_identifier: s - dot: . - naked_identifier: quantity - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: dataset - dot: . - naked_identifier: NewArrivals - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: NewArrivals where_clause: keyword: WHERE expression: column_reference: naked_identifier: warehouse comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "'warehouse #2'" end_bracket: ) - join_on_condition: keyword: 'ON' expression: boolean_literal: 'FALSE' - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE sqlfluff-2.3.5/test/fixtures/dialects/bigquery/normalize_function.sql000066400000000000000000000021201451700765000262400ustar00rootroot00000000000000SELECT col1, NORMALIZE('\u00ea', NFD) AS a, NORMALIZE('\u0065\u0302', NFD) AS b, NORMALIZE_AND_CASEFOLD('\u00ea', NFD) AS c, NORMALIZE_AND_CASEFOLD('\u0065\u0302', NFD) AS d; SELECT a, b, a = b as normalized FROM (SELECT NORMALIZE('\u00ea') as a, NORMALIZE('\u0065\u0302') as b); WITH EquivalentNames AS ( SELECT name FROM UNNEST([ 'Jane\u2004Doe', 'John\u2004Smith', 'Jane\u2005Doe', 'Jane\u2006Doe', 'John Smith']) AS name ) SELECT NORMALIZE(name, NFKC) AS normalized_name, COUNT(*) AS name_count FROM EquivalentNames GROUP BY 1; SELECT a, b, NORMALIZE(a) = NORMALIZE(b) as normalized, NORMALIZE_AND_CASEFOLD(a) = NORMALIZE_AND_CASEFOLD(b) as normalized_with_case_folding FROM (SELECT 'The red barn' AS a, 'The Red Barn' AS b); SELECT a, b, NORMALIZE_AND_CASEFOLD(a, NFD)=NORMALIZE_AND_CASEFOLD(b, NFD) AS nfd, NORMALIZE_AND_CASEFOLD(a, NFC)=NORMALIZE_AND_CASEFOLD(b, NFC) AS nfc, NORMALIZE_AND_CASEFOLD(a, NFKD)=NORMALIZE_AND_CASEFOLD(b, NFKD) AS nkfd, NORMALIZE_AND_CASEFOLD(a, NFKC)=NORMALIZE_AND_CASEFOLD(b, NFKC) AS nkfc sqlfluff-2.3.5/test/fixtures/dialects/bigquery/normalize_function.yml000066400000000000000000000336561451700765000262640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cd07c5fe6f5104e9a01bfaa3e1436dd646b13baedf7513db4d1a52ae601358c0 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE bracketed: start_bracket: ( expression: quoted_literal: "'\\u00ea'" comma: ',' keyword: NFD end_bracket: ) alias_expression: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE bracketed: start_bracket: ( expression: quoted_literal: "'\\u0065\\u0302'" comma: ',' keyword: NFD end_bracket: ) alias_expression: keyword: AS naked_identifier: b - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: quoted_literal: "'\\u00ea'" comma: ',' keyword: NFD end_bracket: ) alias_expression: keyword: AS naked_identifier: c - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: quoted_literal: "'\\u0065\\u0302'" comma: ',' keyword: NFD end_bracket: ) alias_expression: keyword: AS naked_identifier: d - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b alias_expression: keyword: as naked_identifier: normalized from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: NORMALIZE bracketed: start_bracket: ( expression: quoted_literal: "'\\u00ea'" end_bracket: ) alias_expression: keyword: as naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NORMALIZE bracketed: start_bracket: ( expression: quoted_literal: "'\\u0065\\u0302'" end_bracket: ) alias_expression: keyword: as naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: EquivalentNames keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - quoted_literal: "'Jane\\u2004Doe'" - comma: ',' - quoted_literal: "'John\\u2004Smith'" - comma: ',' - quoted_literal: "'Jane\\u2005Doe'" - comma: ',' - quoted_literal: "'Jane\\u2006Doe'" - comma: ',' - quoted_literal: "'John Smith'" - end_square_bracket: ']' end_bracket: ) alias_expression: keyword: AS naked_identifier: name end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: NORMALIZE bracketed: start_bracket: ( expression: column_reference: naked_identifier: name comma: ',' keyword: NFKC end_bracket: ) alias_expression: keyword: AS naked_identifier: normalized_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: name_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EquivalentNames groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) alias_expression: keyword: as naked_identifier: normalized - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) alias_expression: keyword: as naked_identifier: normalized_with_case_folding from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'The red barn'" alias_expression: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: quoted_literal: "'The Red Barn'" alias_expression: keyword: AS naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: a comma: ',' keyword: NFD end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comma: ',' keyword: NFD end_bracket: ) alias_expression: keyword: AS naked_identifier: nfd - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: a comma: ',' keyword: NFC end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comma: ',' keyword: NFC end_bracket: ) alias_expression: keyword: AS naked_identifier: nfc - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: a comma: ',' keyword: NFKD end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comma: ',' keyword: NFKD end_bracket: ) alias_expression: keyword: AS naked_identifier: nkfd - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: a comma: ',' keyword: NFKC end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: NORMALIZE_AND_CASEFOLD bracketed: start_bracket: ( expression: column_reference: naked_identifier: b comma: ',' keyword: NFKC end_bracket: ) alias_expression: keyword: AS naked_identifier: nkfc sqlfluff-2.3.5/test/fixtures/dialects/bigquery/parameters.sql000066400000000000000000000011001451700765000244730ustar00rootroot00000000000000--bigquery allows for named params like @param or ordered params in ? select "1" from x where y = @z_test1; select datetime_trunc(@z2, week); select datetime_trunc(@_ab, week); select datetime_trunc(@a, week); select parse_date("%Y%m", year); -- this should parse year as an identifier select "1" from x where y = ?; select concat("1", ?); select id, datetime_trunc(@z2, week), sum(something) over( partition by some_id order by some_date rows BETWEEN @query_parameter PRECEDING AND CURRENT ROW) as some_sum from some_table where some_column = @query_parameter2; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/parameters.yml000066400000000000000000000146031451700765000245110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6aa7cecf1e4e9c9564e77ea114924339272ff388cb0dcfe19c4a71bed8c87193 file: - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: '"1"' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x where_clause: keyword: where expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' parameterized_expression: at_sign_literal: '@z_test1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datetime_trunc bracketed: start_bracket: ( expression: parameterized_expression: at_sign_literal: '@z2' comma: ',' date_part: week end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datetime_trunc bracketed: start_bracket: ( expression: parameterized_expression: at_sign_literal: '@_ab' comma: ',' date_part: week end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datetime_trunc bracketed: start_bracket: ( expression: parameterized_expression: at_sign_literal: '@a' comma: ',' date_part: week end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: parse_date bracketed: start_bracket: ( expression: quoted_literal: '"%Y%m"' comma: ',' date_part: year end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: '"1"' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x where_clause: keyword: where expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' parameterized_expression: question_mark: '?' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: concat bracketed: - start_bracket: ( - expression: quoted_literal: '"1"' - comma: ',' - expression: parameterized_expression: question_mark: '?' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: datetime_trunc bracketed: start_bracket: ( expression: parameterized_expression: at_sign_literal: '@z2' comma: ',' date_part: week end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: something end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: some_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: some_date frame_clause: - keyword: rows - keyword: BETWEEN - parameterized_expression: at_sign_literal: '@query_parameter' - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: keyword: as naked_identifier: some_sum from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table where_clause: keyword: where expression: column_reference: naked_identifier: some_column comparison_operator: raw_comparison_operator: '=' parameterized_expression: at_sign_literal: '@query_parameter2' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/procedural_statements.sql000066400000000000000000000012661451700765000267540ustar00rootroot00000000000000DECLARE x INT64 DEFAULT 0; REPEAT SET x = x + 1; SELECT x; UNTIL x >= 3 END REPEAT; WHILE true DO SELECT 1; CONTINUE; END WHILE; IF x >= 10 THEN SELECT x; END IF; IF x >= 10 THEN SET x = x - 1; ELSEIF x < 0 THEN SET x = x + 1; ELSEIF x = 0 THEN SET x = x + 1; ELSE SELECT x; END IF; LOOP SET x = x + 1; IF x >= 10 THEN LEAVE; ELSE CONTINUE; END IF; END LOOP; SELECT x; DECLARE heads BOOL; DECLARE heads_count INT64 DEFAULT 0; LOOP SET heads = RAND() < 0.5; IF heads THEN SELECT 'Heads!'; SET heads_count = heads_count + 1; CONTINUE; END IF; SELECT 'Tails!'; BREAK; END LOOP; SELECT CONCAT(CAST(heads_count AS STRING), ' heads in a row'); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/procedural_statements.yml000066400000000000000000000234051451700765000267550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c0f080db908906d750c6204125990b52d6d37b21bd173359fcf77ee795356633 file: - statement: declare_segment: - keyword: DECLARE - naked_identifier: x - data_type: data_type_identifier: INT64 - keyword: DEFAULT - numeric_literal: '0' - statement_terminator: ; - multi_statement_segment: repeat_statement: - keyword: REPEAT - repeat_statements: - statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x - statement_terminator: ; - keyword: UNTIL - expression: column_reference: naked_identifier: x comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '3' - keyword: END - keyword: REPEAT - statement_terminator: ; - multi_statement_segment: while_statement: - keyword: WHILE - expression: boolean_literal: 'true' - keyword: DO - while_statements: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: continue_statement: keyword: CONTINUE - statement_terminator: ; - keyword: END - keyword: WHILE - statement_terminator: ; - multi_statement_segment: if_statement: - keyword: IF - expression: column_reference: naked_identifier: x comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '10' - keyword: THEN - if_statements: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - multi_statement_segment: if_statement: - keyword: IF - expression: column_reference: naked_identifier: x comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '10' - keyword: THEN - if_statements: statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: '-' numeric_literal: '1' statement_terminator: ; - keyword: ELSEIF - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: < numeric_literal: '0' - keyword: THEN - if_statements: statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' statement_terminator: ; - keyword: ELSEIF - expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - if_statements: statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' statement_terminator: ; - keyword: ELSE - if_statements: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - multi_statement_segment: loop_statement: - keyword: LOOP - loop_statements: - statement: set_segment: keyword: SET naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: x binary_operator: + numeric_literal: '1' - statement_terminator: ; - multi_statement_segment: if_statement: - keyword: IF - expression: column_reference: naked_identifier: x comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '10' - keyword: THEN - if_statements: statement: leave_statement: keyword: LEAVE statement_terminator: ; - keyword: ELSE - if_statements: statement: continue_statement: keyword: CONTINUE statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x - statement_terminator: ; - statement: declare_segment: keyword: DECLARE naked_identifier: heads data_type: data_type_identifier: BOOL - statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - naked_identifier: heads_count - data_type: data_type_identifier: INT64 - keyword: DEFAULT - numeric_literal: '0' - statement_terminator: ; - multi_statement_segment: loop_statement: - keyword: LOOP - loop_statements: - statement: set_segment: keyword: SET naked_identifier: heads comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: RAND bracketed: start_bracket: ( end_bracket: ) comparison_operator: raw_comparison_operator: < numeric_literal: '0.5' - statement_terminator: ; - multi_statement_segment: if_statement: - keyword: IF - expression: column_reference: naked_identifier: heads - keyword: THEN - if_statements: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Heads!'" - statement_terminator: ; - statement: set_segment: keyword: SET naked_identifier: heads_count comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: heads_count binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: continue_statement: keyword: CONTINUE - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Tails!'" - statement_terminator: ; - statement: break_statement: keyword: BREAK - statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CONCAT bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: column_reference: naked_identifier: heads_count keyword: AS data_type: data_type_identifier: STRING end_bracket: ) - comma: ',' - expression: quoted_literal: "' heads in a row'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_1_gt_0.sql000066400000000000000000000000171451700765000247460ustar00rootroot00000000000000SELECT (1 > 0) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_1_gt_0.yml000066400000000000000000000014321451700765000247520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51a44bc3e8acaeffbccf25c241dff6ac626037ba22b0d7d1bb369a967ee210bd file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_1_lt_0.sql000066400000000000000000000000171451700765000247530ustar00rootroot00000000000000SELECT (1 < 0) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_1_lt_0.yml000066400000000000000000000014301451700765000247550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5734ec8350f121daf28d0b5b6fd95c469e24f54648fff9c274fd016aae92976f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: < - numeric_literal: '0' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_case.sql000066400000000000000000000002021451700765000246040ustar00rootroot00000000000000select case fruit_code when 0 then 'apple' when 1 then 'banana' when 2 then 'cashew' end as fruit from some_table sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_case.yml000066400000000000000000000032351451700765000246170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8ab76c0358b7b17419c23a31a4dc142f91cef693d91f9693909b77d005229120 file: statement: select_statement: select_clause: keyword: select select_clause_element: expression: case_expression: - keyword: case - expression: column_reference: naked_identifier: fruit_code - when_clause: - keyword: when - expression: numeric_literal: '0' - keyword: then - expression: quoted_literal: "'apple'" - when_clause: - keyword: when - expression: numeric_literal: '1' - keyword: then - expression: quoted_literal: "'banana'" - when_clause: - keyword: when - expression: numeric_literal: '2' - keyword: then - expression: quoted_literal: "'cashew'" - keyword: end alias_expression: keyword: as naked_identifier: fruit from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_column_object_with_keyword.sql000066400000000000000000000003141451700765000313170ustar00rootroot00000000000000-- current is a reserved word but keywords are allowed as part of a nested object name SELECT table1.current.column, table1.object.current.column, table1.object.nested.current.column, FROM table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_column_object_with_keyword.yml000066400000000000000000000030051451700765000313210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8320edbbddda2850279525aa5140e648fbcd0e3d164bcadb11a5194ebfee66f8 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier_all: current - dot: . - naked_identifier: column - comma: ',' - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: object - dot: . - naked_identifier_all: current - dot: . - naked_identifier: column - comma: ',' - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: object - dot: . - naked_identifier: nested - dot: . - naked_identifier_all: current - dot: . - naked_identifier: column - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_datetime.sql000066400000000000000000000004061451700765000254730ustar00rootroot00000000000000-- Test BigQuery specific date identifiers. SELECT gmv._merchant_key, gmv.order_created_at, EXTRACT(DAY FROM gmv.order_created_at) AS order_day FROM my_table as gmv WHERE gmv.datetime >= DATE_TRUNC(DATE_SUB(CURRENT_DATE(), INTERVAL 2 YEAR), year) LIMIT 1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_datetime.yml000066400000000000000000000057671451700765000255140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f6ebeb0f2e31eb62b7cbec5f29559737c406d958eabb7334ce5f9c6d66b20700 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: gmv - dot: . - naked_identifier: _merchant_key - comma: ',' - select_clause_element: column_reference: - naked_identifier: gmv - dot: . - naked_identifier: order_created_at - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: - naked_identifier: gmv - dot: . - naked_identifier: order_created_at end_bracket: ) alias_expression: keyword: AS naked_identifier: order_day from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: keyword: as naked_identifier: gmv where_clause: keyword: WHERE expression: column_reference: - naked_identifier: gmv - dot: . - naked_identifier: datetime comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' function: function_name: function_name_identifier: DATE_TRUNC bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATE_SUB bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_DATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '2' date_part: YEAR - end_bracket: ) comma: ',' date_part: year end_bracket: ) limit_clause: keyword: LIMIT numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_empty_array_literal.sql000066400000000000000000000000121451700765000277400ustar00rootroot00000000000000SELECT [] sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_empty_array_literal.yml000066400000000000000000000011341451700765000277500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f45e520f24c3363bbc9a99cb0361bdbd9ef1959f5a177a1152b88479a635d45 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: start_square_bracket: '[' end_square_bracket: ']' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_example.sql000066400000000000000000000016361451700765000253400ustar00rootroot00000000000000-- This query should also parse in ANSI, but as a bigquery example -- it probably lives here. In particular it has an un-bracketed -- select clause within a function, and array notation which -- makes it a useful test case. WITH age_buckets_bit_array AS ( SELECT bucket_id, num_ranges, min_age, ARRAY(SELECT CAST(num AS INT64) FROM UNNEST(SPLIT(binary, '')) AS num) AS bits, age_label FROM age_buckets ), bucket_abundance AS ( SELECT bucket_id (count_18_24 * bits[OFFSET(0)] + count_25_34 * bits[OFFSET(1)] + count_35_44 * bits[OFFSET(2)] + count_45_54 * bits[OFFSET(3)] + count_55_64 * bits[OFFSET(4)] + count_65_plus * bits[OFFSET(5)]) / audience_size AS relative_abundance FROM audience_counts_gender_age CROSS JOIN age_buckets_bit_array ) SELECT * FROM age_buckets_bit_array JOIN bucket_abundance USING (bucket_id) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_example.yml000066400000000000000000000260551451700765000253440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 092ca35ac4c316050095ee2d2afb538da887757caae2d561b386f76d9ede47bd file: statement: with_compound_statement: - keyword: WITH - common_table_expression: naked_identifier: age_buckets_bit_array keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: bucket_id - comma: ',' - select_clause_element: column_reference: naked_identifier: num_ranges - comma: ',' - select_clause_element: column_reference: naked_identifier: min_age - comma: ',' - select_clause_element: expression: array_expression: function_name: function_name_identifier: ARRAY bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: column_reference: naked_identifier: num keyword: AS data_type: data_type_identifier: INT64 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SPLIT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: binary - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: num end_bracket: ) alias_expression: keyword: AS naked_identifier: bits - comma: ',' - select_clause_element: column_reference: naked_identifier: age_label from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: age_buckets end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: bucket_abundance keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: bucket_id bracketed: start_bracket: ( expression: - column_reference: naked_identifier: count_18_24 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_25_34 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_35_44 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_45_54 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_55_64 - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) end_square_bracket: ']' - binary_operator: + - column_reference: naked_identifier: count_65_plus - binary_operator: '*' - column_reference: naked_identifier: bits - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) end_square_bracket: ']' end_bracket: ) binary_operator: / column_reference: naked_identifier: audience_size alias_expression: keyword: AS naked_identifier: relative_abundance from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: audience_counts_gender_age join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: age_buckets_bit_array end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: age_buckets_bit_array join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: bucket_abundance - keyword: USING - bracketed: start_bracket: ( naked_identifier: bucket_id end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_except.sql000066400000000000000000000007321451700765000251710ustar00rootroot00000000000000SELECT * EXCEPT (seqnum) REPLACE (foo as bar, baz foobar) FROM my_tbl; -- Catch potential bugs in unions select * except (foo) from some_table union all select * from another_table; -- Except is allowed after other fields select 1 + 2 as calculated, * except (irrelevant) from my_tbl; -- This might be redundant with the example above. -- Demonstrates using multiple except clauses. select foo.* except (some_column), bar.* except (other_column) from my_tbl; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_except.yml000066400000000000000000000112311451700765000251670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 20669da1ec7b2ff521f943c541e2c9f914b4af17af1bc77738eef8c26fb40131 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: EXCEPT bracketed: start_bracket: ( naked_identifier: seqnum end_bracket: ) select_replace_clause: keyword: REPLACE bracketed: - start_bracket: ( - select_clause_element: column_reference: naked_identifier: foo alias_expression: keyword: as naked_identifier: bar - comma: ',' - select_clause_element: column_reference: naked_identifier: baz alias_expression: naked_identifier: foobar - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - set_operator: - keyword: union - keyword: all - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' alias_expression: keyword: as naked_identifier: calculated - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: irrelevant end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: foo dot: . star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: some_column end_bracket: ) - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: bar dot: . star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: other_column end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_except_replace.sql000066400000000000000000000002051451700765000266570ustar00rootroot00000000000000-- We can call functions when replacing a field select * except(foo) replace (concat(fruit, 'berry') as fruit) from some_table sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_except_replace.yml000066400000000000000000000033451451700765000266710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b87b7c4bc323ea48b9c76005402f9fad26742d896f7770880092822cf100ef8e file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: foo end_bracket: ) select_replace_clause: keyword: replace bracketed: start_bracket: ( select_clause_element: function: function_name: function_name_identifier: concat bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: fruit - comma: ',' - expression: quoted_literal: "'berry'" - end_bracket: ) alias_expression: keyword: as naked_identifier: fruit end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_extract.sql000066400000000000000000000006131451700765000253510ustar00rootroot00000000000000SELECT EXTRACT(HOUR FROM some_timestamp AT TIME ZONE "UTC"); SELECT EXTRACT(HOUR FROM some_timestamp AT TIME ZONE timezone_column); WITH Input AS (SELECT TIMESTAMP("2008-12-25 05:30:00+00") AS timestamp_value) SELECT EXTRACT(DAY FROM timestamp_value AT TIME ZONE "UTC") AS the_day_utc, EXTRACT(DAY FROM timestamp_value AT TIME ZONE "America/Los_Angeles") AS the_day_california FROM Input; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_extract.yml000066400000000000000000000104321451700765000253530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f69e82d537f53274885f22d0d50064f386a37892894385e9d24aeb7ecffe0777 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: HOUR keyword: FROM expression: column_reference: naked_identifier: some_timestamp time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: '"UTC"' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: HOUR keyword: FROM expression: column_reference: naked_identifier: some_timestamp time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: column_reference: naked_identifier: timezone_column end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: Input keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TIMESTAMP bracketed: start_bracket: ( expression: quoted_literal: '"2008-12-25 05:30:00+00"' end_bracket: ) alias_expression: keyword: AS naked_identifier: timestamp_value end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: naked_identifier: timestamp_value time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: '"UTC"' end_bracket: ) alias_expression: keyword: AS naked_identifier: the_day_utc - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DAY keyword: FROM expression: column_reference: naked_identifier: timestamp_value time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: '"America/Los_Angeles"' end_bracket: ) alias_expression: keyword: AS naked_identifier: the_day_california from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Input - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_for_system_time.sql000066400000000000000000000006011451700765000271040ustar00rootroot00000000000000SELECT user_id FROM lists_emails AS list_emails FOR SYSTEM_TIME AS OF CAST('2019-12-02T20:52:34+00:00' AS TIMESTAMP); SELECT user_id FROM `project.dataset.table1` FOR SYSTEM_TIME AS OF CAST('2020-05-11T14:02:52+00:00' AS TIMESTAMP); SELECT user_id FROM `project.dataset.table1` FOR SYSTEM TIME AS OF CAST('2020-05-11T14:02:52+00:00' AS TIMESTAMP) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_for_system_time.yml000066400000000000000000000064431451700765000271200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a0bc988c91bbf2eec01f4c1ca18bfae91db7fdadf49b9613c98c8eb2ac7177f8 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: lists_emails - alias_expression: keyword: AS naked_identifier: list_emails - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: quoted_literal: "'2019-12-02T20:52:34+00:00'" keyword: AS data_type: data_type_identifier: TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: quoted_identifier: '`project.dataset.table1`' - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: quoted_literal: "'2020-05-11T14:02:52+00:00'" keyword: AS data_type: data_type_identifier: TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: quoted_identifier: '`project.dataset.table1`' - keyword: FOR - keyword: SYSTEM - keyword: TIME - keyword: AS - keyword: OF - expression: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: quoted_literal: "'2020-05-11T14:02:52+00:00'" keyword: AS data_type: data_type_identifier: TIMESTAMP end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_function_object_fields.sql000066400000000000000000000006551451700765000304060ustar00rootroot00000000000000SELECT testFunction(a).b AS field, testFunction(a).* AS wildcard, testFunction(a).b.c AS field_with_field, testFunction(a).b.* AS field_with_wildcard, testFunction(a)[OFFSET(0)].* AS field_with_offset_wildcard, testFunction(a)[SAFE_OFFSET(0)].* AS field_with_safe_offset_wildcard, testFunction(a)[ORDINAL(1)].* AS field_with_ordinal_wildcard, testFunction(a)[ORDINAL(1)].a AS field_with_ordinal_field FROM table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_function_object_fields.yml000066400000000000000000000147511451700765000304120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ea825b3ba14b42f2b2e3bc7c24d40c6086e32fb5c5359e9ce157af8c0bccbe80 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: testFunction bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) semi_structured_expression: dot: . naked_identifier: b alias_expression: keyword: AS naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) semi_structured_expression: dot: . star: '*' alias_expression: keyword: AS naked_identifier: wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) semi_structured_expression: - dot: . - naked_identifier: b - dot: . - naked_identifier: c alias_expression: keyword: AS naked_identifier: field_with_field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) semi_structured_expression: - dot: . - naked_identifier: b - dot: . - star: '*' alias_expression: keyword: AS naked_identifier: field_with_wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: OFFSET bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) end_square_bracket: ']' semi_structured_expression: dot: . star: '*' alias_expression: keyword: AS naked_identifier: field_with_offset_wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: SAFE_OFFSET bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) end_square_bracket: ']' semi_structured_expression: dot: . star: '*' alias_expression: keyword: AS naked_identifier: field_with_safe_offset_wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: ORDINAL bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' semi_structured_expression: dot: . star: '*' alias_expression: keyword: AS naked_identifier: field_with_ordinal_wildcard - comma: ',' - select_clause_element: function: function_name: function_name_identifier: testFunction bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: ORDINAL bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: a alias_expression: keyword: AS naked_identifier: field_with_ordinal_field from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 select_function_parameter_order_by_multiple_columns.sql000066400000000000000000000000541451700765000350440ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/bigquerySELECT STRING_AGG(a ORDER BY b, c) FROM foo select_function_parameter_order_by_multiple_columns.yml000066400000000000000000000024161451700765000350520ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/bigquery# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 369719879934cc50e9157180a6c18f02451bcd9c966bf8632f2e2621e25290af file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: STRING_AGG bracketed: start_bracket: ( expression: column_reference: naked_identifier: a aggregate_order_by: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_function_with_named_arguments.sql000066400000000000000000000001431451700765000320060ustar00rootroot00000000000000SELECT ST_GEOGFROMGEOJSON('{"type":"LineString","coordinates":[[1,2],[4,5]]}', make_valid => true) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_function_with_named_arguments.yml000066400000000000000000000017661451700765000320240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3daf256a2d007e8cb5434c78961775b013be5645247426c12fcb758c76a8198a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ST_GEOGFROMGEOJSON bracketed: start_bracket: ( expression: quoted_literal: "'{\"type\":\"LineString\",\"coordinates\":[[1,2],[4,5]]}'" comma: ',' named_argument: naked_identifier: make_valid right_arrow: => expression: boolean_literal: 'true' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_gt_lt.sql000066400000000000000000000000311451700765000250020ustar00rootroot00000000000000SELECT (1 > 0 AND 0 < 1) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_gt_lt.yml000066400000000000000000000017331451700765000250160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 980ee3955a2b1cc7004ad794d17d244255158323e7b2ba6a89eb09394f5ae31a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' - binary_operator: AND - numeric_literal: '0' - comparison_operator: raw_comparison_operator: < - numeric_literal: '1' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.sql000066400000000000000000000004411451700765000317070ustar00rootroot00000000000000SELECT * FROM project-a.dataset-b.table-c JOIN dataset-c.table-d USING (a); SELECT * FROM a-1a.b.c; SELECT * FROM a-1.b.c; SELECT * FROM project23-123.dataset7-b1.table-2c JOIN dataset12-c1.table-1d USING (a); SELECT col1-col2 AS newcol1, col1 - col2 AS newcol2 FROM table-a123; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_hyphenated_table_name_in_from.yml000066400000000000000000000121061451700765000317120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f00f24816a288acc08abb48e0eb2f4fb427b593666602762f52ae53d5e62f87 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: project - dash: '-' - naked_identifier: a - dot: . - naked_identifier: dataset - dash: '-' - naked_identifier: b - dot: . - naked_identifier: table - dash: '-' - naked_identifier: c join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dash: '-' - naked_identifier: c - dot: . - naked_identifier: table - dash: '-' - naked_identifier: d - keyword: USING - bracketed: start_bracket: ( naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: a - dash: '-' - naked_identifier: 1a - dot: . - naked_identifier: b - dot: . - naked_identifier: c - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: a - dash: '-' - naked_identifier: '1' - dot: . - naked_identifier: b - dot: . - naked_identifier: c - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: project23 - dash: '-' - naked_identifier: '123' - dot: . - naked_identifier: dataset7 - dash: '-' - naked_identifier: b1 - dot: . - naked_identifier: table - dash: '-' - naked_identifier: 2c join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dataset12 - dash: '-' - naked_identifier: c1 - dot: . - naked_identifier: table - dash: '-' - naked_identifier: 1d - keyword: USING - bracketed: start_bracket: ( naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - column_reference: naked_identifier: col1 - binary_operator: '-' - column_reference: naked_identifier: col2 alias_expression: keyword: AS naked_identifier: newcol1 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: col1 - binary_operator: '-' - column_reference: naked_identifier: col2 alias_expression: keyword: AS naked_identifier: newcol2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: table - dash: '-' - naked_identifier: a123 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_identifiers.sql000066400000000000000000000012771451700765000262130ustar00rootroot00000000000000--Identifiers can start with an underscore in BigQuery -- and can contin just _ and 0-9 SELECT _01 FROM _2010_01; --Identifiers can start with an underscore in BigQuery -- and can contin just _ and 0-9 SELECT col_a AS _ FROM table1; -- TODO: Currently we don't support this but should -- Table names can contain dashes from FROM and TABLE clauses -- But reluctant to add to general naked_identifier grammar and not -- sure worth adding specific syntax for this unless someone requests it -- https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical -- SELECT * FROM data-customers-287.mydatabase.mytable; -- Same as above but quoted SELECT * FROM `data-customers-287`.mydatabase.mytable; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_identifiers.yml000066400000000000000000000035101451700765000262050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e51b7806ec8a91df0eb4229e6e648fa4eef597288d3eed965fcea3c0e99ad62a file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: _01 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: _2010_01 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a alias_expression: keyword: AS naked_identifier: _ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - quoted_identifier: '`data-customers-287`' - dot: . - naked_identifier: mydatabase - dot: . - naked_identifier: mytable - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_if.sql000066400000000000000000000006471451700765000243040ustar00rootroot00000000000000SELECT client, firstHtml, vary, IF(_cdn_provider != '', 'CDN', 'Origin') AS source, COUNT(0) AS total FROM `httparchive.almanac.requests`, UNNEST(split(REGEXP_REPLACE(REGEXP_REPLACE(LOWER(resp_vary), '\"', ''), '[, ]+|\\\\0', ','), ',')) AS vary WHERE date = '2019-07-01' GROUP BY client, firstHtml, vary, source HAVING vary != '' AND vary IS NOT NULL ORDER BY client DESC, firstHtml DESC, total DESC sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_if.yml000066400000000000000000000141271451700765000243040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f65beb4a183a9fc16750f7bfb5a7b7b9e358759377992c1dfd9cd4994fa30a1 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: client - comma: ',' - select_clause_element: column_reference: naked_identifier: firstHtml - comma: ',' - select_clause_element: column_reference: naked_identifier: vary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: IF bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: _cdn_provider comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "''" - comma: ',' - expression: quoted_literal: "'CDN'" - comma: ',' - expression: quoted_literal: "'Origin'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: source - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) alias_expression: keyword: AS naked_identifier: total from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`httparchive.almanac.requests`' - comma: ',' - from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: split bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: REGEXP_REPLACE bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: REGEXP_REPLACE bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: LOWER bracketed: start_bracket: ( expression: column_reference: naked_identifier: resp_vary end_bracket: ) - comma: ',' - expression: quoted_literal: "'\\\"'" - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) - comma: ',' - expression: quoted_literal: "'[, ]+|\\\\\\\\0'" - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: vary where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: '=' quoted_literal: "'2019-07-01'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: client - comma: ',' - column_reference: naked_identifier: firstHtml - comma: ',' - column_reference: naked_identifier: vary - comma: ',' - column_reference: naked_identifier: source having_clause: keyword: HAVING expression: - column_reference: naked_identifier: vary - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - quoted_literal: "''" - binary_operator: AND - column_reference: naked_identifier: vary - keyword: IS - keyword: NOT - null_literal: 'NULL' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: client - keyword: DESC - comma: ',' - column_reference: naked_identifier: firstHtml - keyword: DESC - comma: ',' - column_reference: naked_identifier: total - keyword: DESC sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_interval_expression.sql000066400000000000000000000000631451700765000300010ustar00rootroot00000000000000SELECT DATE_ADD(CURRENT_DATE(), INTERVAL -1+2 DAY) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_interval_expression.yml000066400000000000000000000024461451700765000300120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d973b9a726c598788c021623fc9aebce1cc2e27e1b0313ed61083bee0407123b file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURRENT_DATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - numeric_literal: '2' date_part: DAY - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_less_than_greater_than.sql000066400000000000000000000000201451700765000303720ustar00rootroot00000000000000SELECT 1<2, 2>1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_less_than_greater_than.yml000066400000000000000000000015611451700765000304070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 95b2a7833108ccca1a76481d7173df106d449b6fda839d0f4483d19bfb3efa23 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: < - numeric_literal: '2' - comma: ',' - select_clause_element: expression: - numeric_literal: '2' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_lt_gt.sql000066400000000000000000000000311451700765000250020ustar00rootroot00000000000000SELECT (0 < 1 AND 1 > 0) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_lt_gt.yml000066400000000000000000000017331451700765000250160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bbf4a1322c6b9005ab54e9218f75baae50477bbe66cea4d4efff99e52523b15e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - numeric_literal: '0' - comparison_operator: raw_comparison_operator: < - numeric_literal: '1' - binary_operator: AND - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_mixture_of_array_literals.sql000066400000000000000000000012571451700765000311620ustar00rootroot00000000000000-- Created this test case in response to issue #989. As of April 25, 2021 and PR #998, -- this query PARSES without error, but the word ARRAY is parsing as a column name, and -- the angle brackets < and > are being incorrectly parsed as comparison operators. -- This is being tracked in a separate issue, 999, not #989, since it's less severe -- (incorrect parse vs parse failure). SELECT [], [false], ARRAY[false], ['a'] AS strcol1, ARRAY['b'] AS strcol2, [1.0] AS numcol1, ARRAY[1.4] AS numcol2, [STRUCT("Rudisha" AS name, [23.4, 26.3, 26.4, 26.1] AS splits)] AS struct1, col1.obj1[safe_offset(1)].a AS struct_safe_offset sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_mixture_of_array_literals.yml000066400000000000000000000113541451700765000311630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cbf066dd0c8317bbf87b4729592f41e65bf7ac4e26cf7eb7d0a29504232c43a4 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: array_literal: start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - select_clause_element: array_literal: start_square_bracket: '[' boolean_literal: 'false' end_square_bracket: ']' - comma: ',' - select_clause_element: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: BOOLEAN end_angle_bracket: '>' array_literal: start_square_bracket: '[' boolean_literal: 'false' end_square_bracket: ']' - comma: ',' - select_clause_element: array_literal: start_square_bracket: '[' quoted_literal: "'a'" end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: strcol1 - comma: ',' - select_clause_element: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: string end_angle_bracket: '>' array_literal: start_square_bracket: '[' quoted_literal: "'b'" end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: strcol2 - comma: ',' - select_clause_element: array_literal: start_square_bracket: '[' numeric_literal: '1.0' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: numcol1 - comma: ',' - select_clause_element: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: data_type_identifier: NUMERIC end_angle_bracket: '>' array_literal: start_square_bracket: '[' numeric_literal: '1.4' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: numcol2 - comma: ',' - select_clause_element: array_literal: start_square_bracket: '[' expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - quoted_literal: '"Rudisha"' - alias_expression: keyword: AS naked_identifier: name - comma: ',' - array_literal: - start_square_bracket: '[' - numeric_literal: '23.4' - comma: ',' - numeric_literal: '26.3' - comma: ',' - numeric_literal: '26.4' - comma: ',' - numeric_literal: '26.1' - end_square_bracket: ']' - alias_expression: keyword: AS naked_identifier: splits - end_bracket: ) end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: struct1 - comma: ',' - select_clause_element: expression: column_reference: - naked_identifier: col1 - dot: . - naked_identifier: obj1 semi_structured_expression: array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: safe_offset bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' dot: . naked_identifier: a alias_expression: keyword: AS naked_identifier: struct_safe_offset sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_ml_predict_with_select.sql000066400000000000000000000003071451700765000304130ustar00rootroot00000000000000SELECT * FROM ML.PREDICT( MODEL `project.dataset.model`, ( SELECT user_id FROM `project.dataset.stats` ) ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_ml_predict_with_select.yml000066400000000000000000000031411451700765000304140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e13d492f83f781ed68921a0c372915028ba8a71cc32b5564714b9e1b9f03ad00 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: ml_table_expression: keyword: ML dot: . naked_identifier: PREDICT bracketed: start_bracket: ( keyword: MODEL object_reference: quoted_identifier: '`project.dataset.model`' comma: ',' bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`project.dataset.stats`' end_bracket: ) end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_ml_weights.sql000066400000000000000000000001161451700765000260370ustar00rootroot00000000000000SELECT * FROM ML.WEIGHTS(MODEL `project.dataset.model`) ORDER BY 2, 1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_ml_weights.yml000066400000000000000000000021361451700765000260450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 745e8980f1829e521701aed0b6d4ef96d4546f7d6a1d4d5b4b4b1dd2ccc54075 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: ml_table_expression: keyword: ML dot: . naked_identifier: WEIGHTS bracketed: start_bracket: ( keyword: MODEL object_reference: quoted_identifier: '`project.dataset.model`' end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '2' - comma: ',' - numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_multi_except.sql000066400000000000000000000001431451700765000263770ustar00rootroot00000000000000select d.*, r.* except(date_key) from my_table as d inner join my_other_table as r using(date_key) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_multi_except.yml000066400000000000000000000034201451700765000264020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2371f514d669a18fb8476ccff9b246fa0b1d1009086794ee00fc088e19894230 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: d dot: . star: '*' - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: r dot: . star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: date_key end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table alias_expression: keyword: as naked_identifier: d join_clause: - keyword: inner - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: my_other_table alias_expression: keyword: as naked_identifier: r - keyword: using - bracketed: start_bracket: ( naked_identifier: date_key end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_natural_join.sql000066400000000000000000000003551451700765000263670ustar00rootroot00000000000000SELECT * FROM table1 natural -- this should parse as an alias as BigQuery does not have NATURAL joins JOIN table2; SELECT * FROM table1 natural -- this should parse as an alias as BigQuery does not have NATURAL joins INNER JOIN table2; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_natural_join.yml000066400000000000000000000034151451700765000263710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1258f6496f9e52162346f19a12037e8f8bb2365aaae290ed0359ae0b88e27904 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: natural join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: natural join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_pivot.sql000066400000000000000000000021241451700765000250370ustar00rootroot00000000000000SELECT * FROM (SELECT * FROM Produce) PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2', 'Q3', 'Q4')); SELECT * FROM (SELECT sales, quarter FROM Produce) PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2', 'Q3', 'Q4')); SELECT * FROM (SELECT * FROM Produce) PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2', 'Q3')); SELECT * FROM (SELECT sales, quarter FROM Produce) PIVOT(SUM(sales) FOR quarter IN ('Q1', 'Q2', 'Q3')); SELECT * FROM (SELECT sales, quarter FROM Produce) PIVOT(SUM(sales), COUNT(sales) FOR quarter IN ('Q1', 'Q2', 'Q3')); SELECT col1, col2 FROM table1 PIVOT(SUM(`grand_total`) FOR REPLACE(LOWER(`media_type`), " ", "_") IN ( "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv" )); SELECT col1, col2 FROM table1 PIVOT(SUM(`grand_total`) FOR `media_type` IN ( "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv" )); SELECT col1, col2 FROM table1 PIVOT(SUM(`grand_total`) FOR '2' || '1' IN ( "cinema", "digital", "direct_mail", "door_drops", "outdoor", "press", "radio", "tv" )); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_pivot.yml000066400000000000000000000407611451700765000250520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7e339af111db52b3b16ec80bdaa37d553acfb835383a16a3629faa621864630b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - comma: ',' - quoted_literal: "'Q4'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: sales - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - comma: ',' - quoted_literal: "'Q4'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: sales - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: sales - comma: ',' - select_clause_element: column_reference: naked_identifier: quarter from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: column_reference: naked_identifier: sales end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'Q1'" - comma: ',' - quoted_literal: "'Q2'" - comma: ',' - quoted_literal: "'Q3'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '`grand_total`' end_bracket: ) - keyword: FOR - pivot_for_clause: function: function_name: function_name_identifier: REPLACE bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: LOWER bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '`media_type`' end_bracket: ) - comma: ',' - expression: quoted_literal: '" "' - comma: ',' - expression: quoted_literal: '"_"' - end_bracket: ) - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: '"cinema"' - comma: ',' - quoted_literal: '"digital"' - comma: ',' - quoted_literal: '"direct_mail"' - comma: ',' - quoted_literal: '"door_drops"' - comma: ',' - quoted_literal: '"outdoor"' - comma: ',' - quoted_literal: '"press"' - comma: ',' - quoted_literal: '"radio"' - comma: ',' - quoted_literal: '"tv"' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '`grand_total`' end_bracket: ) - keyword: FOR - pivot_for_clause: column_reference: quoted_identifier: '`media_type`' - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: '"cinema"' - comma: ',' - quoted_literal: '"digital"' - comma: ',' - quoted_literal: '"direct_mail"' - comma: ',' - quoted_literal: '"door_drops"' - comma: ',' - quoted_literal: '"outdoor"' - comma: ',' - quoted_literal: '"press"' - comma: ',' - quoted_literal: '"radio"' - comma: ',' - quoted_literal: '"tv"' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '`grand_total`' end_bracket: ) - keyword: FOR - pivot_for_clause: expression: - quoted_literal: "'2'" - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'1'" - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: '"cinema"' - comma: ',' - quoted_literal: '"digital"' - comma: ',' - quoted_literal: '"direct_mail"' - comma: ',' - quoted_literal: '"door_drops"' - comma: ',' - quoted_literal: '"outdoor"' - comma: ',' - quoted_literal: '"press"' - comma: ',' - quoted_literal: '"radio"' - comma: ',' - quoted_literal: '"tv"' - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_quoting.sql000066400000000000000000000002171451700765000253650ustar00rootroot00000000000000SELECT user_id, "some string" as list_id FROM `database.schema.benchmark_user_map` WHERE list_id IS NULL OR user_id IS NULLsqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_quoting.yml000066400000000000000000000024501451700765000253700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4ba156700ef9c14dc259c25e20cbcbdcedfd8f81987dc381bfe990ea77be5651 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: user_id - comma: ',' - select_clause_element: quoted_literal: '"some string"' alias_expression: keyword: as naked_identifier: list_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`database.schema.benchmark_user_map`' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: list_id - keyword: IS - null_literal: 'NULL' - binary_operator: OR - column_reference: naked_identifier: user_id - keyword: IS - null_literal: 'NULL' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_replace.sql000066400000000000000000000003531451700765000253130ustar00rootroot00000000000000SELECT * REPLACE (CAST(1 AS BOOLEAN) AS foo) FROM (SELECT 1 AS foo); -- Single replace select * replace ('thing' as foo) from some_table; -- Multi replace select * REPLACE (quantity/2 AS quantity, 'thing' as foo) from some_table sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_replace.yml000066400000000000000000000073151451700765000253220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 578a679b675f9c46be4d68f0ebf95efbc9151169eae5eaa2ba32185a1dd45f77 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: REPLACE bracketed: start_bracket: ( select_clause_element: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: numeric_literal: '1' keyword: AS data_type: data_type_identifier: BOOLEAN end_bracket: ) alias_expression: keyword: AS naked_identifier: foo end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: foo end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: replace bracketed: start_bracket: ( select_clause_element: quoted_literal: "'thing'" alias_expression: keyword: as naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: REPLACE bracketed: - start_bracket: ( - select_clause_element: expression: column_reference: naked_identifier: quantity binary_operator: / numeric_literal: '2' alias_expression: keyword: AS naked_identifier: quantity - comma: ',' - select_clause_element: quoted_literal: "'thing'" alias_expression: keyword: as naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_rows_between.sql000066400000000000000000000004611451700765000264030ustar00rootroot00000000000000SELECT is_sensitive, breach_date, total_number_of_affected_accounts, SUM(total_number_of_affected_accounts) OVER ( PARTITION BY is_sensitive ORDER BY is_sensitive, breach_date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS cumulative_number_of_affected_accounts FROM table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_rows_between.yml000066400000000000000000000047411451700765000264120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6a54c252893ec812849e0cb8100a61cb1d25384df7131303e76c4628a3547bcd file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: is_sensitive - comma: ',' - select_clause_element: column_reference: naked_identifier: breach_date - comma: ',' - select_clause_element: column_reference: naked_identifier: total_number_of_affected_accounts - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_number_of_affected_accounts end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: is_sensitive orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: is_sensitive - comma: ',' - column_reference: naked_identifier: breach_date frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: keyword: AS naked_identifier: cumulative_number_of_affected_accounts from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_safe_function.sql000066400000000000000000000002301451700765000265150ustar00rootroot00000000000000SELECT TRUE AS col1, SAFE.SUBSTR('foo', 0, -2) AS col2, SAFE.DATEADD(DAY, -2, CURRENT_DATE), SAFE.MY_FUNCTION(column1) FROM table1; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_safe_function.yml000066400000000000000000000046601451700765000265320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5fe16a4415d2a3b7ec104bed1c9356c9068b4b78320b56a4e4815db0251c448f file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: boolean_literal: 'TRUE' alias_expression: keyword: AS naked_identifier: col1 - comma: ',' - select_clause_element: function: function_name: keyword: SAFE dot: . function_name_identifier: SUBSTR bracketed: - start_bracket: ( - expression: quoted_literal: "'foo'" - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '2' - end_bracket: ) alias_expression: keyword: AS naked_identifier: col2 - comma: ',' - select_clause_element: function: function_name: keyword: SAFE dot: . function_name_identifier: DATEADD bracketed: - start_bracket: ( - date_part: DAY - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '2' - comma: ',' - expression: bare_function: CURRENT_DATE - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: keyword: SAFE dot: . function_name_identifier: MY_FUNCTION bracketed: start_bracket: ( expression: column_reference: naked_identifier: column1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_set_operators.sql000066400000000000000000000007261451700765000265750ustar00rootroot00000000000000-- EXCEPT DISTINCT SELECT c FROM number1 EXCEPT DISTINCT SELECT c FROM number2; -- INTERSECT DISTINCT (SELECT c FROM number1) INTERSECT DISTINCT (SELECT c FROM number2); -- UNION DISTINCT (SELECT c FROM number1) UNION DISTINCT (SELECT c FROM number2); -- UNION ALL SELECT c FROM number1 UNION ALL (SELECT c FROM number2); -- nesting of UNIONs (SELECT c FROM number1 UNION ALL SELECT c FROM number2) UNION ALL (SELECT c FROM number3 UNION ALL SELECT c FROM number4); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_set_operators.yml000066400000000000000000000150261451700765000265760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ef2babb8254dc59514cf05078a3d35bfde9a669ecd0070d8e37a361e4ee67ef file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: - keyword: EXCEPT - keyword: DISTINCT - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: INTERSECT - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: UNION - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 set_operator: - keyword: UNION - keyword: ALL bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - set_operator: - keyword: UNION - keyword: ALL - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number3 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number4 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_struct.sql000066400000000000000000000021501451700765000252210ustar00rootroot00000000000000-- Example of "select as struct *" syntax. select some_table.foo_id, array( select as struct * from another_table where another_table.foo_id = some_table.foo_id ) from another_table; -- Example of "select as struct <>" syntax select as struct '1' as bb, 2 as aa; select distinct as struct '1' as bb, 2 as aa; -- Example of explicitly building a struct in a select clause. select struct( bar.bar_id as id, bar.bar_name as bar ) as bar from foo left join bar on bar.foo_id = foo.foo_id; -- Array of structs SELECT col_1, col_2 FROM UNNEST(ARRAY>[ ('hello','world'), ('hi', 'there') ]); SELECT STRUCT(5), STRUCT("2011-05-05"), STRUCT(1, t.str_col), STRUCT(int_col); -- This is to test typeless struct fields are not mistakenly considered as -- data types, see https://github.com/sqlfluff/sqlfluff/issues/3277 SELECT STRUCT( some_field, some_other_field ) AS col FROM table; -- Empty STRUCT within TO_JSON SELECT TO_JSON(STRUCT()) AS col FROM table; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_struct.yml000066400000000000000000000271431451700765000252340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5033aac4dba9632028d262eee7824a0f4b7c269ed7829c6aca2b74bf0a7c23ca file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: some_table - dot: . - naked_identifier: foo_id - comma: ',' - select_clause_element: expression: array_expression: function_name: function_name_identifier: array bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_modifier: - keyword: as - keyword: struct select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table where_clause: keyword: where expression: - column_reference: - naked_identifier: another_table - dot: . - naked_identifier: foo_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: some_table - dot: . - naked_identifier: foo_id end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: another_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_modifier: - keyword: as - keyword: struct - select_clause_element: quoted_literal: "'1'" alias_expression: keyword: as naked_identifier: bb - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: keyword: as naked_identifier: aa - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_modifier: - keyword: distinct - keyword: as - keyword: struct - select_clause_element: quoted_literal: "'1'" alias_expression: keyword: as naked_identifier: bb - comma: ',' - select_clause_element: numeric_literal: '2' alias_expression: keyword: as naked_identifier: aa - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: typed_struct_literal: struct_type: keyword: struct struct_literal: bracketed: - start_bracket: ( - column_reference: - naked_identifier: bar - dot: . - naked_identifier: bar_id - alias_expression: keyword: as naked_identifier: id - comma: ',' - column_reference: - naked_identifier: bar - dot: . - naked_identifier: bar_name - alias_expression: keyword: as naked_identifier: bar - end_bracket: ) alias_expression: keyword: as naked_identifier: bar from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo join_clause: - keyword: left - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: bar - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: bar - dot: . - naked_identifier: foo_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: foo - dot: . - naked_identifier: foo_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col_2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: col_1 - data_type: data_type_identifier: STRING - comma: ',' - parameter: col_2 - data_type: data_type_identifier: STRING - end_angle_bracket: '>' end_angle_bracket: '>' array_literal: - start_square_bracket: '[' - expression: bracketed: - start_bracket: ( - quoted_literal: "'hello'" - comma: ',' - quoted_literal: "'world'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - quoted_literal: "'hi'" - comma: ',' - quoted_literal: "'there'" - end_bracket: ) - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < data_type: data_type_identifier: int64 end_angle_bracket: '>' struct_literal: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < data_type: data_type_identifier: date end_angle_bracket: '>' struct_literal: bracketed: start_bracket: ( quoted_literal: '"2011-05-05"' end_bracket: ) - comma: ',' - select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - parameter: x - data_type: data_type_identifier: int64 - comma: ',' - parameter: y - data_type: data_type_identifier: string - end_angle_bracket: '>' struct_literal: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' column_reference: - naked_identifier: t - dot: . - naked_identifier: str_col end_bracket: ) - comma: ',' - select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_type_schema: start_angle_bracket: < data_type: data_type_identifier: int64 end_angle_bracket: '>' struct_literal: bracketed: start_bracket: ( column_reference: naked_identifier: int_col end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - column_reference: naked_identifier: some_field - comma: ',' - column_reference: naked_identifier: some_other_field - end_bracket: ) alias_expression: keyword: AS naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TO_JSON bracketed: start_bracket: ( expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_typeless_struct_inside_function.sql000066400000000000000000000001371451700765000324140ustar00rootroot00000000000000SELECT STRUCT(STRUCT(1 AS b) AS a) AS foo; SELECT ARRAY_AGG(STRUCT(a AS a, b AS b)) FROM foo; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_typeless_struct_inside_function.yml000066400000000000000000000051661451700765000324250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c95046c9221d495e525f8bdbc766233769c7389747bc4042ae8b8ddf6f8ebeca file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: start_bracket: ( expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: start_bracket: ( numeric_literal: '1' alias_expression: keyword: AS naked_identifier: b end_bracket: ) alias_expression: keyword: AS naked_identifier: a end_bracket: ) alias_expression: keyword: AS naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG bracketed: start_bracket: ( expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - column_reference: naked_identifier: a - alias_expression: keyword: AS naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - alias_expression: keyword: AS naked_identifier: b - end_bracket: ) end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_udf_quote_everything.sql000066400000000000000000000002161451700765000301350ustar00rootroot00000000000000SELECT `another-gcp-project.functions.timestamp_parsing`(log_tbl.orderdate) AS orderdate FROM `gcp-project.data.year_2021` AS log_tbl sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_udf_quote_everything.yml000066400000000000000000000024461451700765000301460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 87e79f07adffb44eb77cd74fca7cfa0a249c682f4e9e108ed10d7db33c443f38 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: '`another-gcp-project.functions.timestamp_parsing`' bracketed: start_bracket: ( expression: column_reference: - naked_identifier: log_tbl - dot: . - naked_identifier: orderdate end_bracket: ) alias_expression: keyword: AS naked_identifier: orderdate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`gcp-project.data.year_2021`' alias_expression: keyword: AS naked_identifier: log_tbl sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_udf_quote_nothing.sql000066400000000000000000000002051451700765000274150ustar00rootroot00000000000000SELECT gcpproject.functions.timestamp_parsing(log_tbl.orderdate) AS orderdate FROM `gcp-project.data.year_2021` AS log_tbl sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_udf_quote_nothing.yml000066400000000000000000000026031451700765000274230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e75358245b34f941c12870c68538cae8f63a40cbacebecbc72f46f9f809d7063 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: - naked_identifier: gcpproject - dot: . - naked_identifier: functions - dot: . - function_name_identifier: timestamp_parsing bracketed: start_bracket: ( expression: column_reference: - naked_identifier: log_tbl - dot: . - naked_identifier: orderdate end_bracket: ) alias_expression: keyword: AS naked_identifier: orderdate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`gcp-project.data.year_2021`' alias_expression: keyword: AS naked_identifier: log_tbl sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_udf_quote_project_and_datasetfunctionname.sql000066400000000000000000000002231451700765000343530ustar00rootroot00000000000000SELECT `another-gcp-project`.`functions.timestamp_parsing` (log_tbl.first_move) AS first_move FROM `gcp-project.data.year_2021` AS log_tbl sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_udf_quote_project_and_datasetfunctionname.yml000066400000000000000000000025421451700765000343630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7664a49e726983c38a47e44f259ec313b2ff9115c730c7533226f6af960f4fc4 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: quoted_identifier: '`another-gcp-project`' dot: . function_name_identifier: '`functions.timestamp_parsing`' bracketed: start_bracket: ( expression: column_reference: - naked_identifier: log_tbl - dot: . - naked_identifier: first_move end_bracket: ) alias_expression: keyword: AS naked_identifier: first_move from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`gcp-project.data.year_2021`' alias_expression: keyword: AS naked_identifier: log_tbl sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_udf_quote_project_name.sql000066400000000000000000000002201451700765000304120ustar00rootroot00000000000000SELECT `another-gcp-project`.functions.timestamp_parsing(log_tbl.first_move) AS first_move FROM `gcp-project.data.year_2021` AS log_tbl sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_udf_quote_project_name.yml000066400000000000000000000026231451700765000304250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4442cf49e1eea4d336ef43caa3ed491bd3b7e7659ff6677cf1939eff9e334ecf file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: - quoted_identifier: '`another-gcp-project`' - dot: . - naked_identifier: functions - dot: . - function_name_identifier: timestamp_parsing bracketed: start_bracket: ( expression: column_reference: - naked_identifier: log_tbl - dot: . - naked_identifier: first_move end_bracket: ) alias_expression: keyword: AS naked_identifier: first_move from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`gcp-project.data.year_2021`' alias_expression: keyword: AS naked_identifier: log_tbl sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_unpivot.sql000066400000000000000000000016371451700765000254120ustar00rootroot00000000000000WITH Produce AS ( SELECT 'Kale' as product, 51 as Q1, 23 as Q2, 45 as Q3, 3 as Q4 UNION ALL SELECT 'Apple', 77, 0, 25, 2) SELECT * FROM Produce; SELECT * FROM Produce UNPIVOT(sales FOR quarter IN (Q1, Q2, Q3, Q4)); SELECT * FROM Produce UNPIVOT(sales FOR quarter IN (Q1 AS 1, Q2 AS 2, Q3 AS 3, Q4 AS 4)); SELECT * FROM Produce UNPIVOT INCLUDE NULLS (sales FOR quarter IN (Q1, Q2, Q3, Q4)); SELECT * FROM Produce UNPIVOT EXCLUDE NULLS (sales FOR quarter IN (Q1, Q2, Q3, Q4)); SELECT * FROM Produce UNPIVOT( (first_half_sales, second_half_sales) FOR semesters IN ((Q1, Q2) AS 'semester_1', (Q3, Q4) AS 'semester_2')); SELECT a AS 'barry' FROM model UNPIVOT( (A, B) FOR year IN ((C, D) AS "year_2011", (E, F) AS "year_2012")); SELECT * FROM foo UNPIVOT( (bar2, bar3, bar4) FOR year IN ((foo1, foo2, foo3) AS 1, (foo4, foo5, foo6) AS 2, (foo7, foo8, foo9) AS 3)); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_unpivot.yml000066400000000000000000000313411451700765000254070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ae3af309e693b3d3a22d339e25a49727a20c9575cebaf26d47657c313dc9bdd file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: Produce keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'Kale'" alias_expression: keyword: as naked_identifier: product - comma: ',' - select_clause_element: numeric_literal: '51' alias_expression: keyword: as naked_identifier: Q1 - comma: ',' - select_clause_element: numeric_literal: '23' alias_expression: keyword: as naked_identifier: Q2 - comma: ',' - select_clause_element: numeric_literal: '45' alias_expression: keyword: as naked_identifier: Q3 - comma: ',' - select_clause_element: numeric_literal: '3' alias_expression: keyword: as naked_identifier: Q4 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'Apple'" - comma: ',' - select_clause_element: numeric_literal: '77' - comma: ',' - select_clause_element: numeric_literal: '0' - comma: ',' - select_clause_element: numeric_literal: '25' - comma: ',' - select_clause_element: numeric_literal: '2' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - comma: ',' - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - alias_expression: keyword: AS numeric_literal: '1' - comma: ',' - naked_identifier: Q2 - alias_expression: keyword: AS numeric_literal: '2' - comma: ',' - naked_identifier: Q3 - alias_expression: keyword: AS numeric_literal: '3' - comma: ',' - naked_identifier: Q4 - alias_expression: keyword: AS numeric_literal: '4' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - comma: ',' - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: - keyword: UNPIVOT - keyword: EXCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - naked_identifier: sales - keyword: FOR - naked_identifier: quarter - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - comma: ',' - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: first_half_sales - comma: ',' - naked_identifier: second_half_sales - end_bracket: ) - keyword: FOR - naked_identifier: semesters - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: Q1 - comma: ',' - naked_identifier: Q2 - end_bracket: ) - alias_expression: keyword: AS quoted_literal: "'semester_1'" - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: Q3 - comma: ',' - naked_identifier: Q4 - end_bracket: ) - alias_expression: keyword: AS quoted_literal: "'semester_2'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a alias_expression: keyword: AS quoted_identifier: "'barry'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: model from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: A - comma: ',' - naked_identifier: B - end_bracket: ) - keyword: FOR - naked_identifier: year - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: C - comma: ',' - naked_identifier: D - end_bracket: ) - alias_expression: keyword: AS quoted_literal: '"year_2011"' - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: E - comma: ',' - naked_identifier: F - end_bracket: ) - alias_expression: keyword: AS quoted_literal: '"year_2012"' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: bar2 - comma: ',' - naked_identifier: bar3 - comma: ',' - naked_identifier: bar4 - end_bracket: ) - keyword: FOR - naked_identifier: year - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - naked_identifier: foo1 - comma: ',' - naked_identifier: foo2 - comma: ',' - naked_identifier: foo3 - end_bracket: ) - alias_expression: keyword: AS numeric_literal: '1' - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: foo4 - comma: ',' - naked_identifier: foo5 - comma: ',' - naked_identifier: foo6 - end_bracket: ) - alias_expression: keyword: AS numeric_literal: '2' - comma: ',' - bracketed: - start_bracket: ( - naked_identifier: foo7 - comma: ',' - naked_identifier: foo8 - comma: ',' - naked_identifier: foo9 - end_bracket: ) - alias_expression: keyword: AS numeric_literal: '3' - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_where_array_element_less_than.sql000066400000000000000000000001621451700765000317570ustar00rootroot00000000000000SELECT * FROM `project.dataset.table_1` WHERE effect_size_list[ORDINAL(1)] < effect_size_list[ORDINAL(1+1)] sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_where_array_element_less_than.yml000066400000000000000000000036671451700765000317760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ae4d34ee738a9bd107170906922423de913ff7884e1a415e895e5baccdbb83f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`project.dataset.table_1`' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: effect_size_list - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: ORDINAL bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_square_bracket: ']' - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: effect_size_list - array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: ORDINAL bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' end_bracket: ) end_square_bracket: ']' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_where_greater_than.sql000066400000000000000000000000721451700765000275330ustar00rootroot00000000000000SELECT * FROM `project.dataset.actions` WHERE c > 1 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_where_greater_than.yml000066400000000000000000000020241451700765000275340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 94b86c29080873051a5a3cb46f90cd71d335ec8f90dbf54e16a91e25483ab46e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`project.dataset.actions`' where_clause: keyword: WHERE expression: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_where_less_than.sql000066400000000000000000000004041451700765000270470ustar00rootroot00000000000000SELECT * FROM table_a WHERE -- Tests that '<' is parsed correctly. (Since some dialects use angle -- brackets, e.g. ARRAY, it's possible for a "<" in isolation to -- be parsed as an open angle bracket without a matching close bracket. a < b sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_where_less_than.yml000066400000000000000000000020321451700765000270500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eba4d1754fab4c17ea2b0024c83b5adef079d6f15a550c565fe44b0c58d864f5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b select_with_date_literal_coercion_and_two_part_string_interval.sql000066400000000000000000000001611451700765000372160ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/bigquerySELECT GENERATE_DATE_ARRAY( DATE '2010-01-01', DATE '2010-01-31', INTERVAL '7' DAY ) AS my_array select_with_date_literal_coercion_and_two_part_string_interval.yml000066400000000000000000000023621451700765000372250ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/bigquery# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 67d8000d25556d82ce4a87a8fd35d04e7b62cbe7e056d9a6756e8f0683d8fd65 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: GENERATE_DATE_ARRAY bracketed: - start_bracket: ( - expression: keyword: DATE date_constructor_literal: "'2010-01-01'" - comma: ',' - expression: keyword: DATE date_constructor_literal: "'2010-01-31'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: quoted_literal: "'7'" date_part: DAY - end_bracket: ) alias_expression: keyword: AS naked_identifier: my_array sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_offset.sql000066400000000000000000000006771451700765000262320ustar00rootroot00000000000000-- This has a table expression and also an offset value. -- It also includes a nested SELECT SELECT SUM(CASE WHEN value != previous_value THEN 1.0 ELSE 0.0 END) FROM ( SELECT value, CASE WHEN ix != 0 THEN LAG(value) OVER (ORDER BY ix ASC) ELSE value END AS previous_value FROM UNNEST(sequence_validation_and_business_rules.sequence_validation_and_business_rules) AS value WITH OFFSET AS ix )sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_offset.yml000066400000000000000000000122121451700765000262200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 71e16dfa6259fe56b20ec3371bfdd21e73a17bdadb948b9e228fd8a29d5ad18d file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: value - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: naked_identifier: previous_value - keyword: THEN - expression: numeric_literal: '1.0' - else_clause: keyword: ELSE expression: numeric_literal: '0.0' - keyword: END end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: value - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ix comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - expression: function: function_name: function_name_identifier: LAG bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ix - keyword: ASC end_bracket: ) - else_clause: keyword: ELSE expression: column_reference: naked_identifier: value - keyword: END alias_expression: keyword: AS naked_identifier: previous_value from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: column_reference: - naked_identifier: sequence_validation_and_business_rules - dot: . - naked_identifier: sequence_validation_and_business_rules end_bracket: ) - alias_expression: keyword: AS naked_identifier: value - keyword: WITH - keyword: OFFSET - alias_expression: keyword: AS naked_identifier: ix end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_offset_2.sql000066400000000000000000000000751451700765000264430ustar00rootroot00000000000000SELECT ARRAY(SELECT a FROM foo WITH OFFSET WHERE OFFSET > 1) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_offset_2.yml000066400000000000000000000031521451700765000264440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 20d75284109ac15ae2a08ce59ea032046629c82efd45f33c5d9763b29d130a51 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: array_expression: function_name: function_name_identifier: ARRAY bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: foo - keyword: WITH - keyword: OFFSET where_clause: keyword: WHERE expression: column_reference: naked_identifier: OFFSET comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_offset_3.sql000066400000000000000000000000711451700765000264400ustar00rootroot00000000000000SELECT i, offset FROM UNNEST([1, 2, 3]) AS i WITH OFFSET sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_offset_3.yml000066400000000000000000000027461451700765000264550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8516fdd159466287ab817436d114773baa1d8e4e9e827c8208876ca8d0848b35 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: i - comma: ',' - select_clause_element: column_reference: naked_identifier: offset from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: function: function_name: function_name_identifier: UNNEST bracketed: start_bracket: ( expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) - alias_expression: keyword: AS naked_identifier: i - keyword: WITH - keyword: OFFSET sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_qualify.sql000066400000000000000000000025401451700765000264050ustar00rootroot00000000000000SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 WINDOW item_window AS ( PARTITION BY category ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING); SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item WINDOW item_window AS ( PARTITION BY category ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_qualify.yml000066400000000000000000000375321451700765000264200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5741b59dd9432d0d4713210dcb2a7bb01e9de0ad46f35e5426465d3adb4bd03b file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' named_window: keyword: WINDOW named_window_expression: naked_identifier: item_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item named_window: keyword: WINDOW named_window_expression: naked_identifier: item_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_union_and_qualify.sql000066400000000000000000000004741451700765000304430ustar00rootroot00000000000000SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 UNION ALL SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_union_and_qualify.yml000066400000000000000000000106251451700765000304440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 17f0f80265b930088ba878c6f6bbd5d10633ceef5a760c5135d4b1e0763e135b file: statement: set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_window.sql000066400000000000000000000015671451700765000262520ustar00rootroot00000000000000SELECT item, purchases, category, LAST_VALUE(item) OVER (item_window) AS most_popular FROM Produce WINDOW item_window AS ( PARTITION BY category ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING); SELECT item, purchases, category, LAST_VALUE(item) OVER (d) AS most_popular FROM Produce WINDOW a AS (PARTITION BY category), b AS (a ORDER BY purchases), c AS (b ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING), d AS (c); SELECT item, purchases, category, LAST_VALUE(item) OVER (c ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS most_popular FROM Produce WINDOW a AS (PARTITION BY category), b AS (a ORDER BY purchases), c AS b; select * , max(x) over (window_z) as max_x_over_z from raw_data_1 window window_z as (partition by z) union all select * , max(x) over (window_z) as max_x_over_z from raw_data_2 window window_z as (partition by z); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/select_with_window.yml000066400000000000000000000261401451700765000262460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 078f423fe0fa8ac12ea0aa72af5d13b5f50c251898e1cb1408e33805226f6a01 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: item_window end_bracket: ) alias_expression: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: keyword: WINDOW named_window_expression: naked_identifier: item_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: d end_bracket: ) alias_expression: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: - keyword: WINDOW - named_window_expression: naked_identifier: a keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: b keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: c keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: b frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: d keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: column_reference: naked_identifier: purchases - comma: ',' - select_clause_element: column_reference: naked_identifier: category - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: naked_identifier: c frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) alias_expression: keyword: AS naked_identifier: most_popular from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: - keyword: WINDOW - named_window_expression: naked_identifier: a keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category end_bracket: ) - comma: ',' - named_window_expression: naked_identifier: b keyword: AS bracketed: start_bracket: ( window_specification: naked_identifier: a orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases end_bracket: ) - comma: ',' - named_window_expression: - naked_identifier: c - keyword: AS - naked_identifier: b - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: naked_identifier: window_z end_bracket: ) alias_expression: keyword: as naked_identifier: max_x_over_z from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_data_1 named_window: keyword: window named_window_expression: naked_identifier: window_z keyword: as bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: z end_bracket: ) - set_operator: - keyword: union - keyword: all - select_statement: select_clause: - keyword: select - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: naked_identifier: window_z end_bracket: ) alias_expression: keyword: as naked_identifier: max_x_over_z from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_data_2 named_window: keyword: window named_window_expression: naked_identifier: window_z keyword: as bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: z end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/set_variable_multiple.sql000066400000000000000000000001171451700765000267120ustar00rootroot00000000000000set (var2, var3, var5) = ("y", (select "x"), DATE_TRUNC("2000-01-01", month)); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/set_variable_multiple.yml000066400000000000000000000025471451700765000267250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f485b1848e86164c6415af072f3f2d3eb43a47a39f108627398c54a32e8822c6 file: statement: set_segment: - keyword: set - bracketed: - start_bracket: ( - naked_identifier: var2 - comma: ',' - naked_identifier: var3 - comma: ',' - naked_identifier: var5 - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: '"y"' - comma: ',' - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: quoted_literal: '"x"' end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: DATE_TRUNC bracketed: start_bracket: ( expression: quoted_literal: '"2000-01-01"' comma: ',' date_part: month end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/set_variable_single.sql000066400000000000000000000000511451700765000263350ustar00rootroot00000000000000set var1 = 5; set var1 = ['one', 'two']; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/set_variable_single.yml000066400000000000000000000016371451700765000263520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5b73183ebf8033c3781e1134e79dda3300baf1a5b990595c27be20e9a23b764f file: - statement: set_segment: keyword: set naked_identifier: var1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - statement: set_segment: keyword: set naked_identifier: var1 comparison_operator: raw_comparison_operator: '=' array_literal: - start_square_bracket: '[' - quoted_literal: "'one'" - comma: ',' - quoted_literal: "'two'" - end_square_bracket: ']' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/string_literals.sql000066400000000000000000000011341451700765000255440ustar00rootroot00000000000000-- Examples from https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals SELECT "", '', "abc", "it's", 'it\'s', 'Title: "Boy"', "test \"escaped\"", 'test \'escaped\'', "test \\\"escaped", "test \"escaped\\\"", r"", r'', r"abc+", R"abc+", r'abc+', R'abc+', r'f\(abc, (.*),def\)', r"f\(abc, (.*),def\)", b'abc', B"abc", rb"abc*", rB"abc*", Rb'abc*', br'abc+', RB"abc+", r''' as foo '' bar ''', B""" triple ''' quoted '' "" string are """ as hard FROM dummy sqlfluff-2.3.5/test/fixtures/dialects/bigquery/string_literals.yml000066400000000000000000000061561451700765000255570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 529c379b58b13b0d1848eb9d59c1ab1e0b51da0ef143a06272e952fcb65635a9 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: '""' - comma: ',' - select_clause_element: quoted_literal: "''" - comma: ',' - select_clause_element: quoted_literal: '"abc"' - comma: ',' - select_clause_element: quoted_literal: "\"it's\"" - comma: ',' - select_clause_element: quoted_literal: "'it\\'s'" - comma: ',' - select_clause_element: quoted_literal: "'Title: \"Boy\"'" - comma: ',' - select_clause_element: quoted_literal: '"test \"escaped\""' - comma: ',' - select_clause_element: quoted_literal: "'test \\'escaped\\''" - comma: ',' - select_clause_element: quoted_literal: '"test \\\"escaped"' - comma: ',' - select_clause_element: quoted_literal: '"test \"escaped\\\""' - comma: ',' - select_clause_element: quoted_literal: r"" - comma: ',' - select_clause_element: quoted_literal: "r''" - comma: ',' - select_clause_element: quoted_literal: r"abc+" - comma: ',' - select_clause_element: quoted_literal: R"abc+" - comma: ',' - select_clause_element: quoted_literal: "r'abc+'" - comma: ',' - select_clause_element: quoted_literal: "R'abc+'" - comma: ',' - select_clause_element: quoted_literal: "r'f\\(abc, (.*),def\\)'" - comma: ',' - select_clause_element: quoted_literal: r"f\(abc, (.*),def\)" - comma: ',' - select_clause_element: quoted_literal: "b'abc'" - comma: ',' - select_clause_element: quoted_literal: B"abc" - comma: ',' - select_clause_element: quoted_literal: rb"abc*" - comma: ',' - select_clause_element: quoted_literal: rB"abc*" - comma: ',' - select_clause_element: quoted_literal: "Rb'abc*'" - comma: ',' - select_clause_element: quoted_literal: "br'abc+'" - comma: ',' - select_clause_element: quoted_literal: RB"abc+" - comma: ',' - select_clause_element: quoted_literal: "r''' as foo '' bar '''" - comma: ',' - select_clause_element: quoted_literal: "B\"\"\" triple ''' quoted '' \"\" string are \"\"\"" alias_expression: keyword: as naked_identifier: hard from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dummy sqlfluff-2.3.5/test/fixtures/dialects/bigquery/tablesample.sql000066400000000000000000000001001451700765000246200ustar00rootroot00000000000000SELECT * FROM dataset.my_table TABLESAMPLE SYSTEM (10 PERCENT); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/tablesample.yml000066400000000000000000000021711451700765000246340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4852fe883d0832f0f3878fcdbf9b303b3fdee4e1a60307c75f3b4c58ab4da621 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dataset - dot: . - naked_identifier: my_table sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' keyword: PERCENT end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/typeless_array.sql000066400000000000000000000001051451700765000254020ustar00rootroot00000000000000SELECT ARRAY(SELECT c FROM number1 UNION ALL SELECT c FROM number2); sqlfluff-2.3.5/test/fixtures/dialects/bigquery/typeless_array.yml000066400000000000000000000036611451700765000254160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6723634f87c8e5406bd98b3183f988d341dc24cb8cdd62192eca126ee99c73f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: array_expression: function_name: function_name_identifier: ARRAY bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/typeless_struct.sql000066400000000000000000000004371451700765000256200ustar00rootroot00000000000000 SELECT IF( TRUE, STRUCT('hello' AS greeting, 'world' AS subject), STRUCT('hi' AS greeting, 'there' AS subject) ) AS salute FROM (SELECT 1); SELECT CASE WHEN a.xxx != b.xxx THEN STRUCT(a.xxx AS M, b.xxx AS N) END AS xxx FROM A JOIN B ON B.id = A.id; sqlfluff-2.3.5/test/fixtures/dialects/bigquery/typeless_struct.yml000066400000000000000000000121631451700765000256210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6e5ae6640110e593f6849562ffcd1161fe6ccefbadb97693bc80e003c4e935c8 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: IF bracketed: - start_bracket: ( - expression: boolean_literal: 'TRUE' - comma: ',' - expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - quoted_literal: "'hello'" - alias_expression: keyword: AS naked_identifier: greeting - comma: ',' - quoted_literal: "'world'" - alias_expression: keyword: AS naked_identifier: subject - end_bracket: ) - comma: ',' - expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - quoted_literal: "'hi'" - alias_expression: keyword: AS naked_identifier: greeting - comma: ',' - quoted_literal: "'there'" - alias_expression: keyword: AS naked_identifier: subject - end_bracket: ) - end_bracket: ) alias_expression: keyword: AS naked_identifier: salute from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: xxx - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: xxx - keyword: THEN - expression: typed_struct_literal: struct_type: keyword: STRUCT struct_literal: bracketed: - start_bracket: ( - column_reference: - naked_identifier: a - dot: . - naked_identifier: xxx - alias_expression: keyword: AS naked_identifier: M - comma: ',' - column_reference: - naked_identifier: b - dot: . - naked_identifier: xxx - alias_expression: keyword: AS naked_identifier: N - end_bracket: ) - keyword: END alias_expression: keyword: AS naked_identifier: xxx from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: B join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: B - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: A - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/000077500000000000000000000000001451700765000221215ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/.sqlfluff000066400000000000000000000000401451700765000237360ustar00rootroot00000000000000[sqlfluff] dialect = clickhouse sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/create_database.sql000066400000000000000000000210271451700765000257330ustar00rootroot00000000000000CREATE DATABASE db_name; CREATE DATABASE db_name COMMENT 'SingleQuoted'; CREATE DATABASE db_name COMMENT "DoubleQuoted"; CREATE DATABASE db_name COMMENT 'SingleQuoted three words'; CREATE DATABASE db_name COMMENT "DoubleQuoted three words"; CREATE DATABASE db_name COMMENT 'Weird characters: !@#$%^&*()_+{}|:"<>?'; CREATE DATABASE db_name ON CLUSTER cluster; CREATE DATABASE db_name ON CLUSTER "cluster"; CREATE DATABASE db_name ON CLUSTER "underscore_cluster"; CREATE DATABASE db_name ON CLUSTER 'cluster'; CREATE DATABASE db_name ON CLUSTER 'underscore_cluster'; CREATE DATABASE db_name ENGINE = Lazy() COMMENT 'Comment'; CREATE DATABASE db_comment ENGINE = Lazy() COMMENT 'The temporary database'; SELECT name, comment FROM system.databases WHERE name = 'db_comment'; -- https://clickhouse.com/docs/en/engines/database-engines/atomic CREATE DATABASE test; CREATE DATABASE test ENGINE = Atomic; -- https://clickhouse.com/docs/en/engines/database-engines/lazy CREATE DATABASE testlazy; CREATE DATABASE testlazy ENGINE = Lazy(expiration_time_in_seconds); -- https://clickhouse.com/docs/en/engines/database-engines/replicated CREATE DATABASE testdb; CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name'); CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') SETTINGS key1 = value1; CREATE DATABASE testdb ENGINE = Replicated('zoo_path', 'shard_name', 'replica_name') SETTINGS key1 = 1, key2 = 2; CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','replica1'); CREATE DATABASE r ENGINE=Replicated('some/path/r','shard1','other_replica'); CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','{replica}'); CREATE DATABASE r ENGINE=Replicated('some/path/r','other_shard','r2'); -- https://clickhouse.com/docs/en/engines/database-engines/postgresql CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres'); CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword'); CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 'schema_name'); CREATE DATABASE test_database ENGINE = PostgreSQL('postgres1:5432', 'test_database', 'postgres', 'mysecretpassword', 'schema_name', 1); -- https://clickhouse.com/docs/en/engines/database-engines/mysql CREATE DATABASE IF NOT EXISTS mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password'); CREATE DATABASE mysql_db ON CLUSTER cluster ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password'); CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'my_user', 'user_password'); CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', test, 'my_user', 'user_password'); CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password'); CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000; CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000, connect_timeout=100; -- https://clickhouse.com/docs/en/engines/database-engines/sqlite CREATE DATABASE sqlite_db ENGINE = SQLite('sqlite.db'); -- https://clickhouse.com/docs/en/engines/database-engines/materialized-postgresql CREATE DATABASE postgres_db ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password'); CREATE DATABASE postgres_database ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_schema = 'postgres_schema'; CREATE DATABASE database1 ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_tables_list = 'schema1.table1,schema2.table2,schema1.table3', materialized_postgresql_tables_list_with_schema = 1; CREATE DATABASE database1 ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_schema_list = 'schema1,schema2,schema3'; CREATE DATABASE database1 ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_tables_list = 'table1,table2,table3'; CREATE DATABASE demodb ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgres_user', 'postgres_password') SETTINGS materialized_postgresql_replication_slot = 'clickhouse_sync', materialized_postgresql_snapshot = '0000000A-0000023F-3', materialized_postgresql_tables_list = 'table1,table2,table3'; CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password'); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password'); CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password'); CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password') SETTINGS materialized_postgresql_schema = 'postgres_schema'; CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password') SETTINGS materialized_postgresql_schema = 'postgres_schema'; CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedPostgreSQL('host:port', 'database', 'user', 'password') SETTINGS materialized_postgresql_schema = 'postgres_schema'; -- https://clickhouse.com/docs/en/engines/database-engines/materialized-mysql CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE; CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE IF NOT EXISTS db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***'); CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***'); CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB'; CREATE DATABASE IF NOT EXISTS db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB' TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***'); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB'; CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB' TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB'; CREATE DATABASE db_name ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB' TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB'; CREATE DATABASE db_name ON CLUSTER cluster ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS materialized_mysql_database_engine = 'InnoDB' TABLE OVERRIDE table1 (id UInt32, name String) TABLE OVERRIDE table2 (id UInt32, name String); CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') SETTINGS allows_query_when_mysql_lost=true, max_wait_time_when_mysql_unavailable=10000; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/create_database.yml000066400000000000000000001544441451700765000257470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9e78f284bcd9f6853ca4d74d9fba0c6e2ad5e76d795a87763c4cf732346c8b8b file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: "'SingleQuoted'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: '"DoubleQuoted"' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: "'SingleQuoted three words'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: '"DoubleQuoted three words"' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - keyword: COMMENT - quoted_identifier: "'Weird characters: !@#$%^&*()_+{}|:\"<>?'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: '"cluster"' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: '"underscore_cluster"' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'cluster'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - quoted_identifier: "'underscore_cluster'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Lazy bracketed: start_bracket: ( end_bracket: ) - keyword: COMMENT - quoted_identifier: "'Comment'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_comment - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Lazy bracketed: start_bracket: ( end_bracket: ) - keyword: COMMENT - quoted_identifier: "'The temporary database'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: comment from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: databases where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'db_comment'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Atomic - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testlazy - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testlazy - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Lazy bracketed: start_bracket: ( expression: column_reference: naked_identifier: expiration_time_in_seconds end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testdb - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testdb - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated bracketed: - start_bracket: ( - expression: quoted_literal: "'zoo_path'" - comma: ',' - expression: quoted_literal: "'shard_name'" - comma: ',' - expression: quoted_literal: "'replica_name'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testdb - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated bracketed: - start_bracket: ( - expression: quoted_literal: "'zoo_path'" - comma: ',' - expression: quoted_literal: "'shard_name'" - comma: ',' - expression: quoted_literal: "'replica_name'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: key1 - comparison_operator: raw_comparison_operator: '=' - naked_identifier: value1 - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: testdb - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: Replicated bracketed: - start_bracket: ( - expression: quoted_literal: "'zoo_path'" - comma: ',' - expression: quoted_literal: "'shard_name'" - comma: ',' - expression: quoted_literal: "'replica_name'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: key1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - naked_identifier: key2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: r - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated bracketed: - start_bracket: ( - expression: quoted_literal: "'some/path/r'" - comma: ',' - expression: quoted_literal: "'shard1'" - comma: ',' - expression: quoted_literal: "'replica1'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: r - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated bracketed: - start_bracket: ( - expression: quoted_literal: "'some/path/r'" - comma: ',' - expression: quoted_literal: "'shard1'" - comma: ',' - expression: quoted_literal: "'other_replica'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: r - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated bracketed: - start_bracket: ( - expression: quoted_literal: "'some/path/r'" - comma: ',' - expression: quoted_literal: "'other_shard'" - comma: ',' - expression: quoted_literal: "'{replica}'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: r - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: Replicated bracketed: - start_bracket: ( - expression: quoted_literal: "'some/path/r'" - comma: ',' - expression: quoted_literal: "'other_shard'" - comma: ',' - expression: quoted_literal: "'r2'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: PostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'test_database'" - comma: ',' - expression: quoted_literal: "'postgres'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: PostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'test_database'" - comma: ',' - expression: quoted_literal: "'postgres'" - comma: ',' - expression: quoted_literal: "'mysecretpassword'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: PostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'test_database'" - comma: ',' - expression: quoted_literal: "'postgres'" - comma: ',' - expression: quoted_literal: "'mysecretpassword'" - comma: ',' - expression: quoted_literal: "'schema_name'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: test_database - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: PostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'test_database'" - comma: ',' - expression: quoted_literal: "'postgres'" - comma: ',' - expression: quoted_literal: "'mysecretpassword'" - comma: ',' - expression: quoted_literal: "'schema_name'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: column_reference: naked_identifier: test - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: read_write_timeout - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql_db - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'test'" - comma: ',' - expression: quoted_literal: "'my_user'" - comma: ',' - expression: quoted_literal: "'user_password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: read_write_timeout - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - comma: ',' - naked_identifier: connect_timeout - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: sqlite_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: SQLite bracketed: start_bracket: ( expression: quoted_literal: "'sqlite.db'" end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: postgres_db - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: postgres_database - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_postgresql_schema - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'postgres_schema'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: database1 - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_postgresql_tables_list - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'schema1.table1,schema2.table2,schema1.table3'" - comma: ',' - naked_identifier: materialized_postgresql_tables_list_with_schema - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: database1 - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_postgresql_schema_list - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'schema1,schema2,schema3'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: database1 - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_postgresql_tables_list - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'table1,table2,table3'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: demodb - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'postgres1:5432'" - comma: ',' - expression: quoted_literal: "'postgres_database'" - comma: ',' - expression: quoted_literal: "'postgres_user'" - comma: ',' - expression: quoted_literal: "'postgres_password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_postgresql_replication_slot - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'clickhouse_sync'" - comma: ',' - naked_identifier: materialized_postgresql_snapshot - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'0000000A-0000023F-3'" - comma: ',' - naked_identifier: materialized_postgresql_tables_list - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'table1,table2,table3'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_postgresql_schema - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'postgres_schema'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_postgresql_schema - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'postgres_schema'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedPostgreSQL bracketed: - start_bracket: ( - expression: quoted_literal: "'host:port'" - comma: ',' - expression: quoted_literal: "'database'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'password'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_postgresql_schema - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'postgres_schema'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_mysql_database_engine - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'InnoDB'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_mysql_database_engine - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'InnoDB'" - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_mysql_database_engine - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'InnoDB'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_mysql_database_engine - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'InnoDB'" - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_mysql_database_engine - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'InnoDB'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_mysql_database_engine - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'InnoDB'" - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_mysql_database_engine - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'InnoDB'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - database_engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: materialized_mysql_database_engine - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'InnoDB'" - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - keyword: TABLE - keyword: OVERRIDE - table_reference: naked_identifier: table2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: UInt32 - comma: ',' - column_definition: naked_identifier: name data_type: data_type_identifier: String - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: mysql - database_engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' engine_function: keyword: MaterializedMySQL bracketed: - start_bracket: ( - expression: quoted_literal: "'localhost:3306'" - comma: ',' - expression: quoted_literal: "'db'" - comma: ',' - expression: quoted_literal: "'user'" - comma: ',' - expression: quoted_literal: "'***'" - end_bracket: ) - keyword: SETTINGS - naked_identifier: allows_query_when_mysql_lost - comparison_operator: raw_comparison_operator: '=' - naked_identifier: 'true' - comma: ',' - naked_identifier: max_wait_time_when_mysql_unavailable - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/create_materialized_view.sql000066400000000000000000000014361451700765000276750ustar00rootroot00000000000000CREATE MATERIALIZED VIEW IF NOT EXISTS db.table_mv TO db.table AS SELECT column1, column2 FROM db.table_kafka; CREATE MATERIALIZED VIEW table_mv TO table AS SELECT column1, column2 FROM table_kafka; CREATE MATERIALIZED VIEW IF NOT EXISTS db.table_mv ON CLUSTER mycluster TO db.table AS SELECT column1, column2 FROM db.table_kafka; CREATE MATERIALIZED VIEW table_mv TO table ENGINE = MergeTree() AS SELECT column1, column2 FROM table_kafka; CREATE MATERIALIZED VIEW table_mv ENGINE = MergeTree() AS SELECT column1, column2 FROM table_kafka; CREATE MATERIALIZED VIEW table_mv ENGINE = MergeTree() POPULATE AS SELECT column1, column2 FROM table_kafka; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/create_materialized_view.yml000066400000000000000000000146671451700765000277110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b2c3cc157211631bf681fb486c2acfbe8a05f377b12eef199b5f5fe7e89d1653 file: - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: table_mv - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: table - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: table_mv - keyword: TO - table_reference: naked_identifier: table - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: table_mv - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: mycluster - keyword: TO - table_reference: - naked_identifier: db - dot: . - naked_identifier: table - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: db - dot: . - naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: table_mv - keyword: TO - table_reference: naked_identifier: table - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree bracketed: start_bracket: ( end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: table_mv - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree bracketed: start_bracket: ( end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_kafka - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: table_mv - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree bracketed: start_bracket: ( end_bracket: ) - keyword: POPULATE - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_kafka - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/create_table.sql000066400000000000000000000040531451700765000252560ustar00rootroot00000000000000create table example1 ( a String, b String ) engine = MergeTree() order by (a, b); CREATE TABLE table_name ( u64 UInt64, i32 Int32, s String ) ENGINE = MergeTree() ORDER BY (CounterID, EventDate) PARTITION BY toYYYYMM(EventDate) SETTINGS index_granularity=8192; CREATE TABLE WatchLog_old(date Date, UserId Int64, EventType String, Cnt UInt64) ENGINE=MergeTree(date, (UserId, EventType), 8192); CREATE TABLE WatchLog_new(date Date, UserId Int64, EventType String, Cnt UInt64) ENGINE=MergeTree PARTITION BY date ORDER BY (UserId, EventType) SETTINGS index_granularity=8192; CREATE TABLE WatchLog as WatchLog_old ENGINE=Merge(currentDatabase(), '^WatchLog'); CREATE TABLE hits_all AS hits ENGINE = Distributed(logs, default, hits) SETTINGS fsync_after_insert=0, fsync_directories=0; CREATE TABLE IF NOT EXISTS db.table_name AS table_function(); CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1; CREATE TABLE codec_example ( timestamp DateTime CODEC(DoubleDelta), slow_values Float32 CODEC(Gorilla) ) ENGINE = MergeTree(); CREATE TABLE mytable ( x String Codec(Delta, LZ4, AES_128_GCM_SIV) ) ENGINE = MergeTree ORDER BY x; CREATE OR REPLACE TABLE base.t1 (n UInt64, s String) ENGINE = MergeTree ORDER BY n; CREATE OR REPLACE TABLE base.t1 (n UInt64, s Nullable(String)) ENGINE = MergeTree ORDER BY n; CREATE TABLE t1 (x String) ENGINE = Memory COMMENT 'The temporary table'; CREATE TABLE IF NOT EXISTS all_hits ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(cluster, default, hits); CREATE TABLE table_name ( name1 String, CONSTRAINT constraint_name_1 CHECK (name1 = 'test') ) ENGINE = engine; CREATE TABLE example_table ( d DateTime, a Int TTL d + INTERVAL 1 MONTH, b Int TTL d + INTERVAL 1 MONTH, c String ) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY d; CREATE TABLE example_table ( d DateTime, a Int ) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY d TTL d + INTERVAL 1 MONTH DELETE, d + INTERVAL 1 WEEK TO VOLUME 'aaa', d + INTERVAL 2 WEEK TO DISK 'bbb'; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/create_table.yml000066400000000000000000000513071451700765000252640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 612f5a6269c6f3b3edfb7afeee15e095dce9d4e4d6adfaeaab1ccab45eb525ce file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: example1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: String - end_bracket: ) - engine: - keyword: engine - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree bracketed: start_bracket: ( end_bracket: ) - keyword: order - keyword: by - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: u64 data_type: data_type_identifier: UInt64 - comma: ',' - column_definition: naked_identifier: i32 data_type: data_type_identifier: Int32 - comma: ',' - column_definition: naked_identifier: s data_type: data_type_identifier: String - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree bracketed: start_bracket: ( end_bracket: ) - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: CounterID - comma: ',' - column_reference: naked_identifier: EventDate - end_bracket: ) - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: toYYYYMM bracketed: start_bracket: ( expression: column_reference: naked_identifier: EventDate end_bracket: ) - keyword: SETTINGS - naked_identifier: index_granularity - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '8192' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: WatchLog_old - bracketed: - start_bracket: ( - column_definition: naked_identifier: date data_type: data_type_identifier: Date - comma: ',' - column_definition: naked_identifier: UserId data_type: data_type_identifier: Int64 - comma: ',' - column_definition: naked_identifier: EventType data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: Cnt data_type: data_type_identifier: UInt64 - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: date - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: UserId - comma: ',' - column_reference: naked_identifier: EventType - end_bracket: ) - comma: ',' - expression: numeric_literal: '8192' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: WatchLog_new - bracketed: - start_bracket: ( - column_definition: naked_identifier: date data_type: data_type_identifier: Date - comma: ',' - column_definition: naked_identifier: UserId data_type: data_type_identifier: Int64 - comma: ',' - column_definition: naked_identifier: EventType data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: Cnt data_type: data_type_identifier: UInt64 - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: date - keyword: ORDER - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: UserId - comma: ',' - column_reference: naked_identifier: EventType - end_bracket: ) - keyword: SETTINGS - naked_identifier: index_granularity - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '8192' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: WatchLog - keyword: as - table_reference: naked_identifier: WatchLog_old - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Merge bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: currentDatabase bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: quoted_literal: "'^WatchLog'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: hits_all - keyword: AS - table_reference: naked_identifier: hits - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: Distributed bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: logs - comma: ',' - expression: column_reference: naked_identifier: default - comma: ',' - expression: column_reference: naked_identifier: hits - end_bracket: ) - keyword: SETTINGS - naked_identifier: fsync_after_insert - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - comma: ',' - naked_identifier: fsync_directories - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: db - dot: . - naked_identifier: table_name - keyword: AS - function: function_name: function_name_identifier: table_function bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: String end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Memory - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: codec_example - bracketed: - start_bracket: ( - column_definition: naked_identifier: timestamp data_type: data_type_identifier: DateTime column_constraint_segment: keyword: CODEC expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: DoubleDelta end_bracket: ) - comma: ',' - column_definition: naked_identifier: slow_values data_type: data_type_identifier: Float32 column_constraint_segment: keyword: CODEC expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: Gorilla end_bracket: ) - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: MergeTree bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: String column_constraint_segment: keyword: Codec expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: Delta - comma: ',' - column_reference: naked_identifier: LZ4 - comma: ',' - column_reference: naked_identifier: AES_128_GCM_SIV - end_bracket: ) end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: ORDER - keyword: BY - column_reference: naked_identifier: x - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: base - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: n data_type: data_type_identifier: UInt64 - comma: ',' - column_definition: naked_identifier: s data_type: data_type_identifier: String - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: ORDER - keyword: BY - column_reference: naked_identifier: n - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: base - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: n data_type: data_type_identifier: UInt64 - comma: ',' - column_definition: naked_identifier: s data_type: data_type_identifier: Nullable bracketed_arguments: bracketed: start_bracket: ( data_type_identifier: String end_bracket: ) - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: ORDER - keyword: BY - column_reference: naked_identifier: n - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: naked_identifier: x data_type: data_type_identifier: String end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Memory - keyword: COMMENT - quoted_identifier: "'The temporary table'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: all_hits - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - bracketed: - start_bracket: ( - column_definition: naked_identifier: p data_type: data_type_identifier: Date - comma: ',' - column_definition: naked_identifier: i data_type: data_type_identifier: Int32 - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: Distributed bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: cluster - comma: ',' - expression: column_reference: naked_identifier: default - comma: ',' - expression: column_reference: naked_identifier: hits - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: name1 data_type: data_type_identifier: String - comma: ',' - column_definition: naked_identifier: CONSTRAINT data_type: data_type_identifier: constraint_name_1 column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: name1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'test'" end_bracket: ) - end_bracket: ) - engine: keyword: ENGINE comparison_operator: raw_comparison_operator: '=' table_engine_function: function_name: function_name_identifier: engine - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: example_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: d data_type: data_type_identifier: DateTime - comma: ',' - column_definition: naked_identifier: a data_type: data_type_identifier: Int column_constraint_segment: column_ttl_segment: keyword: TTL expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: MONTH - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: Int column_constraint_segment: column_ttl_segment: keyword: TTL expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: MONTH - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: String - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: toYYYYMM bracketed: start_bracket: ( expression: column_reference: naked_identifier: d end_bracket: ) - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: example_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: d data_type: data_type_identifier: DateTime - comma: ',' - column_definition: naked_identifier: a data_type: data_type_identifier: Int - end_bracket: ) - engine: - keyword: ENGINE - comparison_operator: raw_comparison_operator: '=' - table_engine_function: function_name: function_name_identifier: MergeTree - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: toYYYYMM bracketed: start_bracket: ( expression: column_reference: naked_identifier: d end_bracket: ) - keyword: ORDER - keyword: BY - column_reference: naked_identifier: d - table_ttl_segment: - keyword: TTL - expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: MONTH - keyword: DELETE - comma: ',' - expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: WEEK - keyword: TO - keyword: VOLUME - quoted_literal: "'aaa'" - comma: ',' - expression: column_reference: naked_identifier: d binary_operator: + interval_expression: keyword: INTERVAL numeric_literal: '2' date_part: WEEK - keyword: TO - keyword: DISK - quoted_literal: "'bbb'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/cte.sql000066400000000000000000000016211451700765000234150ustar00rootroot00000000000000with ( select 1 as p ) as test_param select toString(1) as Test_string, toDateTime64('2022-05-25', 3) as Test_dateTime64, ifNull(null, 'TestNull') as testIf, JSONExtractString('{"abc": "hello"}', 'abc') as testJSON, test_param as param; WITH '2019-08-01 15:23:00' as ts_upper_bound SELECT * FROM hits WHERE EventDate = toDate(ts_upper_bound) AND EventTime <= ts_upper_bound; WITH sum(bytes) as s SELECT formatReadableSize(s), table FROM system.parts GROUP BY table ORDER BY s; /* this example would return TOP 10 of most huge tables */ WITH ( SELECT sum(bytes) FROM system.parts WHERE active ) AS total_disk_usage SELECT (sum(bytes) / total_disk_usage) * 100 AS table_disk_usage, table FROM system.parts GROUP BY table ORDER BY table_disk_usage DESC LIMIT 10; WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT * FROM test1; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/cte.yml000066400000000000000000000247711451700765000234320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f5efb217212b97c6ea3f52e5a265c8dc25e078729131d82262c8a1cbd33285d file: - statement: with_compound_statement: keyword: with common_table_expression: expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' alias_expression: keyword: as naked_identifier: p end_bracket: ) keyword: as naked_identifier: test_param select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: toString bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) alias_expression: keyword: as naked_identifier: Test_string - comma: ',' - select_clause_element: function: function_name: function_name_identifier: toDateTime64 bracketed: - start_bracket: ( - expression: quoted_literal: "'2022-05-25'" - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) alias_expression: keyword: as naked_identifier: Test_dateTime64 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ifNull bracketed: - start_bracket: ( - expression: null_literal: 'null' - comma: ',' - expression: quoted_literal: "'TestNull'" - end_bracket: ) alias_expression: keyword: as naked_identifier: testIf - comma: ',' - select_clause_element: function: function_name: function_name_identifier: JSONExtractString bracketed: - start_bracket: ( - expression: quoted_literal: "'{\"abc\": \"hello\"}'" - comma: ',' - expression: quoted_literal: "'abc'" - end_bracket: ) alias_expression: keyword: as naked_identifier: testJSON - comma: ',' - select_clause_element: column_reference: naked_identifier: test_param alias_expression: keyword: as naked_identifier: param - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: expression: quoted_literal: "'2019-08-01 15:23:00'" keyword: as naked_identifier: ts_upper_bound select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: hits where_clause: keyword: WHERE expression: - column_reference: naked_identifier: EventDate - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: toDate bracketed: start_bracket: ( expression: column_reference: naked_identifier: ts_upper_bound end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: EventTime - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: ts_upper_bound - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: expression: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: bytes end_bracket: ) keyword: as naked_identifier: s select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: formatReadableSize bracketed: start_bracket: ( expression: column_reference: naked_identifier: s end_bracket: ) - comma: ',' - select_clause_element: column_reference: naked_identifier: table from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: parts groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: table orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: s - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: bytes end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: parts where_clause: keyword: WHERE expression: column_reference: naked_identifier: active end_bracket: ) keyword: AS naked_identifier: total_disk_usage select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: bytes end_bracket: ) binary_operator: / column_reference: naked_identifier: total_disk_usage end_bracket: ) binary_operator: '*' numeric_literal: '100' alias_expression: keyword: AS naked_identifier: table_disk_usage - comma: ',' - select_clause_element: column_reference: naked_identifier: table from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: parts groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: table orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: table_disk_usage - keyword: DESC limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: test1 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: i binary_operator: + numeric_literal: '1' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: j binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/cte_columns.sql000066400000000000000000000000761451700765000251600ustar00rootroot00000000000000WITH t(col1, col2) AS (SELECT 1, 2 FROM foo) SELECT * from t; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/cte_columns.yml000066400000000000000000000033561451700765000251660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b53fa22bf40f811851284c89c29204c8a30460e8248bc1b5c45d29b252db203d file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: col1 - comma: ',' - naked_identifier: col2 end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '2' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/dollar_quoted_literal.sql000066400000000000000000000001271451700765000272140ustar00rootroot00000000000000SELECT * FROM foo WHERE col1 = $$bar$$; SELECT * FROM foo WHERE col1 = $baz$bar$baz$; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/dollar_quoted_literal.yml000066400000000000000000000032621451700765000272210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db62483fc2007d81cce45cf5cb073a25261d92065231c8344f0fbafff69db6ff file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: $$bar$$ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: $baz$bar$baz$ - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/drop_statement.sql000066400000000000000000000025721451700765000257000ustar00rootroot00000000000000-- DROP DATABASE DROP DATABASE db; DROP DATABASE IF EXISTS db; DROP DATABASE db ON CLUSTER cluster; DROP DATABASE db SYNC; -- DROP TABLE DROP TABLE db_name; DROP TABLE db.name; DROP TABLE IF EXISTS db_name; DROP TABLE db_name ON CLUSTER cluster; DROP TABLE db_name SYNC; DROP TEMPORARY TABLE db_name; -- DROP DICTIONARY DROP DICTIONARY dict_name; DROP DICTIONARY IF EXISTS dict_name; DROP DICTIONARY dict_name SYNC; -- DROP USER DROP USER user_name; DROP USER IF EXISTS user_name; DROP USER user_name ON CLUSTER cluster_name; -- DROP ROLE DROP ROLE role_name; DROP ROLE IF EXISTS role_name; DROP ROLE role_name ON CLUSTER cluster_name; -- DROP ROW POLICY -- DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] -- DROP QUOTA DROP QUOTA quota_name; DROP QUOTA IF EXISTS quota_name; DROP QUOTA quota_name ON CLUSTER cluster_name; -- DROP SETTINGS PROFILE DROP setting_name PROFILE profile_name; DROP setting_name PROFILE IF EXISTS profile_name; DROP setting_name PROFILE profile_name ON CLUSTER cluster_name; DROP setting_name1,setting_name2 PROFILE profile_name; -- DROP VIEW DROP VIEW view_name; DROP VIEW db.view_name; DROP VIEW IF EXISTS view_name; DROP VIEW view_name ON CLUSTER cluster; DROP VIEW view_name SYNC; -- DROP FUNCTION DROP FUNCTION function_name; DROP FUNCTION IF EXISTS function_name; DROP FUNCTION function_name on CLUSTER cluster; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/drop_statement.yml000066400000000000000000000161201451700765000256740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2009929ac9fd7f306cb4749f88dbdd183dba6fbe1ae1e98b347863f6a4a34301 file: - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: db - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SYNC - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: db_name - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: name - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: db_name - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: db_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: db_name - keyword: SYNC - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: db_name - statement_terminator: ; - statement: drop_dictionary_statement: - keyword: DROP - keyword: DICTIONARY - naked_identifier: dict_name - statement_terminator: ; - statement: drop_dictionary_statement: - keyword: DROP - keyword: DICTIONARY - keyword: IF - keyword: EXISTS - naked_identifier: dict_name - statement_terminator: ; - statement: drop_dictionary_statement: - keyword: DROP - keyword: DICTIONARY - naked_identifier: dict_name - keyword: SYNC - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - naked_identifier: user_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - keyword: IF - keyword: EXISTS - naked_identifier: user_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - naked_identifier: user_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: ROLE - naked_identifier: role_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: ROLE - keyword: IF - keyword: EXISTS - naked_identifier: role_name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: ROLE - naked_identifier: role_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: drop_quota_statement: - keyword: DROP - keyword: QUOTA - naked_identifier: quota_name - statement_terminator: ; - statement: drop_quota_statement: - keyword: DROP - keyword: QUOTA - keyword: IF - keyword: EXISTS - naked_identifier: quota_name - statement_terminator: ; - statement: drop_quota_statement: - keyword: DROP - keyword: QUOTA - naked_identifier: quota_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: drop_setting_profile_statement: - keyword: DROP - naked_identifier: setting_name - keyword: PROFILE - naked_identifier: profile_name - statement_terminator: ; - statement: drop_setting_profile_statement: - keyword: DROP - naked_identifier: setting_name - keyword: PROFILE - keyword: IF - keyword: EXISTS - naked_identifier: profile_name - statement_terminator: ; - statement: drop_setting_profile_statement: - keyword: DROP - naked_identifier: setting_name - keyword: PROFILE - naked_identifier: profile_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: drop_setting_profile_statement: - keyword: DROP - naked_identifier: setting_name1 - comma: ',' - naked_identifier: setting_name2 - keyword: PROFILE - naked_identifier: profile_name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: view_name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: - naked_identifier: db - dot: . - naked_identifier: view_name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: view_name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: view_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: view_name - keyword: SYNC - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - naked_identifier: function_name - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - naked_identifier: function_name - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - naked_identifier: function_name - on_cluster_clause: - keyword: 'on' - keyword: CLUSTER - naked_identifier: cluster - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/final.sql000066400000000000000000000001621451700765000237320ustar00rootroot00000000000000select a from my_table final where a > 0; SELECT sum(bytes) FROM system.parts as table_alias final WHERE active; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/final.yml000066400000000000000000000036771451700765000237520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21a9fa765028853342c1cbe03c2ec6ba97de4c5e0a570257f92c3e333a7eb1f8 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table keyword: final where_clause: keyword: where expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: bytes end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: parts alias_expression: keyword: as naked_identifier: table_alias keyword: final where_clause: keyword: WHERE expression: column_reference: naked_identifier: active - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/join.sql000066400000000000000000000071771451700765000236150ustar00rootroot00000000000000-- no type join SELECT * FROM test1 ALL JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 ANY JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 JOIN test2 ON test2.ty1=test1.ty1; -- INNER join SELECT * FROM test1 INNER JOIN test2 ON test2.ty1=test1.ty1; -- INNER join ... SELECT * FROM test1 INNER ALL JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 INNER ANY JOIN test2 ON test2.ty1=test1.ty1; -- ... INNER join SELECT * FROM test1 ALL INNER JOIN test2 ON test2.ty1=test1.ty1; SELECT * FROM test1 ANY INNER JOIN test2 ON test2.ty1=test1.ty1; -- LEFT JOIN SELECT * FROM test1 LEFT JOIN test2 ON test2.ty1=test1.ty1; -- LEFT join ... SELECT tbl1.id FROM tbl1 LEFT ANTI join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 LEFT SEMI JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ANY JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ALL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ASOF JOIN test2 USING ty1,ty2; -- ... LEFT join select tbl1.id from tbl1 ANTI LEFT join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 SEMI LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ANY LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ALL LEFT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ASOF LEFT JOIN test2 USING (ty1,ty2); -- LEFT join test case OUTER SELECT * FROM test1 as t1 LEFT OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ASOF OUTER JOIN test2 USING ty1,ty2; SELECT tbl1.id FROM tbl1 LEFT ANTI OUTER join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 LEFT SEMI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ANY OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ALL OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 LEFT ASOF OUTER JOIN test2 USING ty1,ty2; -- RIGHT JOIN SELECT * FROM test1 RIGHT JOIN test2 ON test2.ty1=test1.ty1; -- RIGHT join ... SELECT tbl1.id FROM tbl1 RIGHT ANTI join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 RIGHT SEMI JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ANY JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ALL JOIN test2 USING ty1,ty2; -- ... RIGHT join select tbl1.id from tbl1 ANTI RIGHT join tbl2 on tbl1.id = tbl2.id; SELECT * FROM test1 as t1 SEMI RIGHT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ANY RIGHT JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 ALL RIGHT JOIN test2 USING ty1,ty2; -- RIGHT join test case OUTER SELECT * FROM test1 as t1 RIGHT OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ANTI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT SEMI OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ANY OUTER JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 RIGHT ALL OUTER JOIN test2 USING ty1,ty2; -- ASOF join select tbl1.id from tbl1 ASOF JOIN tbl2 on tbl1.id = tbl2.id; -- CROSS join SELECT * FROM test1 CROSS JOIN test2; -- FULL join SELECT * FROM test1 as t1 FULL ALL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 FULL JOIN test2 USING ty1,ty2; SELECT * FROM test1 as t1 FULL ALL OUTER JOIN test2 USING ty1,ty2; -- ARRAY join SELECT col FROM (SELECT arr FROM test1) AS t2 ARRAY JOIN arr AS col; SELECT col FROM (SELECT [1, 2] AS arr) AS t1 LEFT ARRAY JOIN arr AS col; SELECT * FROM (SELECT [1, 2] AS arr) AS t1 ARRAY JOIN arr; SELECT * FROM (SELECT [1, 2] AS arr) AS t1 LEFT ARRAY JOIN arr; SELECT * FROM (SELECT [1, 2] AS arr, [3, 4] AS arr2) AS t1 ARRAY JOIN arr, arr2; SELECT x, y FROM (SELECT [1, 2] AS arr, [3, 4] AS arr2) AS t1 ARRAY JOIN arr AS x, arr2 AS y; SELECT *,ch,cg FROM (SELECT 1) ARRAY JOIN ['1','2'] as cg, splitByChar(',','1,2') as ch; SELECT * FROM (SELECT [1,2] x) AS t1 ARRAY JOIN t1.*; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/join.yml000066400000000000000000001557061451700765000236210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 691193ed2bf48b17d52586c0663c81b562d7f2c58769fcd4349b4cb2cdb06511 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: test2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: INNER - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: INNER - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: ALL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: ANY - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: LEFT - keyword: ANTI - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ASOF - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: ANTI - keyword: LEFT - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: SEMI - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: ANY - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: ALL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: ASOF - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - bracketed: - start_bracket: ( - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ASOF - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: LEFT - keyword: ANTI - keyword: OUTER - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: SEMI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ANY - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: LEFT - keyword: ASOF - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: test2 - dot: . - naked_identifier: ty1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: test1 - dot: . - naked_identifier: ty1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: RIGHT - keyword: ANTI - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ANY - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: ANTI - keyword: RIGHT - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: SEMI - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: ANY - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: ALL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ANTI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: SEMI - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ANY - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: RIGHT - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 alias_expression: naked_identifier: ASOF join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: tbl2 join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: FULL - keyword: ALL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 alias_expression: keyword: as naked_identifier: t1 join_clause: - keyword: FULL - keyword: ALL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test2 - keyword: USING - naked_identifier: ty1 - comma: ',' - naked_identifier: ty2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: arr from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 end_bracket: ) alias_expression: keyword: AS naked_identifier: t2 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: arr end_bracket: ) alias_expression: keyword: AS naked_identifier: t1 array_join_clause: - keyword: LEFT - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: arr end_bracket: ) alias_expression: keyword: AS naked_identifier: t1 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: arr end_bracket: ) alias_expression: keyword: AS naked_identifier: t1 array_join_clause: - keyword: LEFT - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: arr - comma: ',' - select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: arr2 end_bracket: ) alias_expression: keyword: AS naked_identifier: t1 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr - comma: ',' - select_clause_element: column_reference: naked_identifier: arr2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: y from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: arr - comma: ',' - select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: arr2 end_bracket: ) alias_expression: keyword: AS naked_identifier: t1 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: column_reference: naked_identifier: arr alias_expression: keyword: AS naked_identifier: x - comma: ',' - select_clause_element: column_reference: naked_identifier: arr2 alias_expression: keyword: AS naked_identifier: y - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: column_reference: naked_identifier: ch - comma: ',' - select_clause_element: column_reference: naked_identifier: cg from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: array_literal: - start_square_bracket: '[' - quoted_literal: "'1'" - comma: ',' - quoted_literal: "'2'" - end_square_bracket: ']' alias_expression: keyword: as naked_identifier: cg - comma: ',' - select_clause_element: function: function_name: function_name_identifier: splitByChar bracketed: - start_bracket: ( - expression: quoted_literal: "','" - comma: ',' - expression: quoted_literal: "'1,2'" - end_bracket: ) alias_expression: keyword: as naked_identifier: ch - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: naked_identifier: x end_bracket: ) alias_expression: keyword: AS naked_identifier: t1 array_join_clause: - keyword: ARRAY - keyword: JOIN - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: t1 dot: . star: '*' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/lambda_function.sql000066400000000000000000000001551451700765000257700ustar00rootroot00000000000000SELECT arrayFirst(x -> x = 2, [1, 1, 2, 2]); SELECT arrayFirst(x, y -> x != y, [1, 1, 2, 2], [1, 2, 2, 3]); sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/lambda_function.yml000066400000000000000000000060401451700765000257710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a6c721a05f4a158ae4714ce75d9126a350791a238eaaf6e33beae82779e27d3d file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: arrayFirst bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: x - lambda: -> - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: arrayFirst bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: y - lambda: -> - column_reference: naked_identifier: x - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: naked_identifier: y - comma: ',' - expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - comma: ',' - expression: array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/system_statement.sql000066400000000000000000000104161451700765000262540ustar00rootroot00000000000000-- RELOAD DICTIONARY SELECT name, status FROM system.dictionaries; -- RELOAD MODELS SYSTEM RELOAD MODELS; SYSTEM RELOAD MODELS ON CLUSTER cluster_name; -- RELOAD MODEL SYSTEM RELOAD MODEL /model/path; SYSTEM RELOAD MODEL ON CLUSTER cluster_name /model/path; -- DROP REPLICA SYSTEM DROP REPLICA 'replica_name' FROM TABLE table; SYSTEM DROP REPLICA 'replica_name' FROM TABLE database.table; SYSTEM DROP REPLICA 'replica_name' FROM DATABASE database; SYSTEM DROP REPLICA 'replica_name'; SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk'; -- Managing Distributed Tables -- -- STOP DISTRIBUTED SENDS SYSTEM STOP DISTRIBUTED SENDS distributed_table_name; SYSTEM STOP DISTRIBUTED SENDS db.distributed_table_name; -- -- FLUSH DISTRIBUTED SYSTEM FLUSH DISTRIBUTED distributed_table_name; SYSTEM FLUSH DISTRIBUTED db.distributed_table_name; -- -- START DISTRIBUTED SENDS SYSTEM START DISTRIBUTED SENDS distributed_table_name; SYSTEM START DISTRIBUTED SENDS db.distributed_table_name; -- Managing MergeTree Tables -- -- STOP MERGES SYSTEM STOP MERGES ON VOLUME volume_name; SYSTEM STOP MERGES merge_tree_family_table_name; SYSTEM STOP MERGES db.merge_tree_family_table_name; -- -- START MERGES SYSTEM START MERGES ON VOLUME volume_name; SYSTEM START MERGES merge_tree_family_table_name; SYSTEM START MERGES db.merge_tree_family_table_name; -- -- STOP TTL MERGES SYSTEM STOP TTL MERGES; SYSTEM STOP TTL MERGES db.merge_tree_family_table_name; SYSTEM STOP TTL MERGES merge_tree_family_table_name; -- -- START TTL MERGES SYSTEM START TTL MERGES; SYSTEM START TTL MERGES merge_tree_family_table_name; SYSTEM START TTL MERGES db.merge_tree_family_table_name; -- -- STOP MOVES SYSTEM STOP MOVES; SYSTEM STOP MOVES merge_tree_family_table_name; SYSTEM STOP MOVES db.merge_tree_family_table_name; -- -- START MOVES SYSTEM START MOVES; SYSTEM START MOVES merge_tree_family_table_name; SYSTEM START MOVES db.merge_tree_family_table_name; -- -- SYSTEM UNFREEZE SYSTEM UNFREEZE WITH NAME backup_name; -- Managing ReplicatedMergeTree Tables -- -- STOP FETCHES SYSTEM STOP FETCHES; SYSTEM STOP FETCHES replicated_merge_tree_family_table_name; SYSTEM STOP FETCHES db.replicated_merge_tree_family_table_name; -- -- START FETCHES SYSTEM START FETCHES; SYSTEM START FETCHES replicated_merge_tree_family_table_name; SYSTEM START FETCHES db.replicated_merge_tree_family_table_name; -- -- STOP REPLICATED SENDS SYSTEM STOP REPLICATED SENDS; SYSTEM STOP REPLICATED SENDS replicated_merge_tree_family_table_name; SYSTEM STOP REPLICATED SENDS db.replicated_merge_tree_family_table_name; -- -- START REPLICATED SENDS SYSTEM START REPLICATED SENDS; SYSTEM START REPLICATED SENDS replicated_merge_tree_family_table_name; SYSTEM START REPLICATED SENDS db.replicated_merge_tree_family_table_name; -- -- STOP REPLICATION QUEUES SYSTEM STOP REPLICATION QUEUES; SYSTEM STOP REPLICATION QUEUES replicated_merge_tree_family_table_name; SYSTEM STOP REPLICATION QUEUES db.replicated_merge_tree_family_table_name; -- -- START REPLICATION QUEUES SYSTEM START REPLICATION QUEUES; SYSTEM START REPLICATION QUEUES replicated_merge_tree_family_table_name; SYSTEM START REPLICATION QUEUES db.replicated_merge_tree_family_table_name; -- -- SYNC REPLICA SYSTEM SYNC REPLICA replicated_merge_tree_family_table_name; SYSTEM SYNC REPLICA db.replicated_merge_tree_family_table_name; SYSTEM SYNC REPLICA replicated_merge_tree_family_table_name STRICT; SYSTEM SYNC REPLICA ON CLUSTER cluster_name replicated_merge_tree_family_table_name; SYSTEM SYNC REPLICA ON CLUSTER cluster_name replicated_merge_tree_family_table_name STRICT; SYSTEM SYNC REPLICA ON CLUSTER cluster_name db.replicated_merge_tree_family_table_name; SYSTEM SYNC REPLICA ON CLUSTER cluster_name db.replicated_merge_tree_family_table_name STRICT; -- -- RESTART REPLICA SYSTEM RESTART REPLICA replicated_merge_tree_family_table_name; SYSTEM RESTART REPLICA db.replicated_merge_tree_family_table_name; -- -- RESTORE REPLICA SYSTEM RESTORE REPLICA replicated_merge_tree_family_table_name; SYSTEM RESTORE REPLICA db.replicated_merge_tree_family_table_name; SYSTEM RESTORE REPLICA replicated_merge_tree_family_table_name ON CLUSTER cluster_name; SYSTEM RESTORE REPLICA db.replicated_merge_tree_family_table_name ON CLUSTER cluster_name; -- -- DROP FILESYSTEM CACHE SYSTEM DROP FILESYSTEM CACHE; -- -- SYNC FILE CACHE SYSTEM SYNC FILE CACHE; sqlfluff-2.3.5/test/fixtures/dialects/clickhouse/system_statement.yml000066400000000000000000000457731451700765000262740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 987ac8b8f29bfe092c81571ac50c3e3a9658807051484df125a605c3893d2488 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: status from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: system - dot: . - naked_identifier: dictionaries - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_model_segment: - keyword: RELOAD - keyword: MODELS - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_model_segment: - keyword: RELOAD - keyword: MODELS - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_model_segment: - keyword: RELOAD - keyword: MODEL - path_segment: - slash: / - path_segment: model - slash: / - path_segment: path - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_model_segment: - keyword: RELOAD - keyword: MODEL - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - path_segment: - slash: / - path_segment: model - slash: / - path_segment: path - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - keyword: FROM - keyword: TABLE - table_reference: naked_identifier: table - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - keyword: FROM - keyword: TABLE - table_reference: - naked_identifier: database - dot: . - naked_identifier: table - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - keyword: FROM - keyword: DATABASE - object_reference: naked_identifier: database - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: DROP - keyword: REPLICA - quoted_identifier: "'replica_name'" - keyword: FROM - keyword: ZKPATH - path_segment: quoted_literal: "'/path/to/table/in/zk'" - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: STOP - keyword: DISTRIBUTED - keyword: SENDS - table_reference: naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: STOP - keyword: DISTRIBUTED - keyword: SENDS - table_reference: - naked_identifier: db - dot: . - naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: FLUSH - keyword: DISTRIBUTED - table_reference: naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: FLUSH - keyword: DISTRIBUTED - table_reference: - naked_identifier: db - dot: . - naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: START - keyword: DISTRIBUTED - keyword: SENDS - table_reference: naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_distributed_segment: - keyword: START - keyword: DISTRIBUTED - keyword: SENDS - table_reference: - naked_identifier: db - dot: . - naked_identifier: distributed_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: STOP - keyword: MERGES - keyword: 'ON' - keyword: VOLUME - object_reference: naked_identifier: volume_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: STOP - keyword: MERGES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: STOP - keyword: MERGES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: START - keyword: MERGES - keyword: 'ON' - keyword: VOLUME - object_reference: naked_identifier: volume_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: START - keyword: MERGES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_merges_segment: - keyword: START - keyword: MERGES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: STOP - keyword: TTL - keyword: MERGES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: STOP - keyword: TTL - keyword: MERGES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: STOP - keyword: TTL - keyword: MERGES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: START - keyword: TTL - keyword: MERGES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: START - keyword: TTL - keyword: MERGES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_ttl_merges_segment: - keyword: START - keyword: TTL - keyword: MERGES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: STOP - keyword: MOVES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: STOP - keyword: MOVES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: STOP - keyword: MOVES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: START - keyword: MOVES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: START - keyword: MOVES - table_reference: naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_moves_segment: - keyword: START - keyword: MOVES - table_reference: - naked_identifier: db - dot: . - naked_identifier: merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_unfreeze_segment: - keyword: UNFREEZE - keyword: WITH - keyword: NAME - object_reference: naked_identifier: backup_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: STOP - keyword: FETCHES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: STOP - keyword: FETCHES - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: STOP - keyword: FETCHES - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: START - keyword: FETCHES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: START - keyword: FETCHES - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_fetches_segment: - keyword: START - keyword: FETCHES - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: STOP - keyword: REPLICATED - keyword: SENDS - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: STOP - keyword: REPLICATED - keyword: SENDS - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: STOP - keyword: REPLICATED - keyword: SENDS - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: START - keyword: REPLICATED - keyword: SENDS - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: START - keyword: REPLICATED - keyword: SENDS - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replicated_segment: - keyword: START - keyword: REPLICATED - keyword: SENDS - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: STOP - keyword: REPLICATION - keyword: QUEUES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: STOP - keyword: REPLICATION - keyword: QUEUES - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: STOP - keyword: REPLICATION - keyword: QUEUES - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: START - keyword: REPLICATION - keyword: QUEUES - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: START - keyword: REPLICATION - keyword: QUEUES - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replication_segment: - keyword: START - keyword: REPLICATION - keyword: QUEUES - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - keyword: STRICT - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - table_reference: naked_identifier: replicated_merge_tree_family_table_name - keyword: STRICT - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: SYNC - keyword: REPLICA - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - keyword: STRICT - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTART - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTART - keyword: REPLICA - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTORE - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTORE - keyword: REPLICA - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTORE - keyword: REPLICA - table_reference: naked_identifier: replicated_merge_tree_family_table_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_replica_segment: - keyword: RESTORE - keyword: REPLICA - table_reference: - naked_identifier: db - dot: . - naked_identifier: replicated_merge_tree_family_table_name - on_cluster_clause: - keyword: 'ON' - keyword: CLUSTER - naked_identifier: cluster_name - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_filesystem_segment: - keyword: DROP - keyword: FILESYSTEM - keyword: CACHE - statement_terminator: ; - statement: system_statement: keyword: SYSTEM system_file_segment: - keyword: SYNC - keyword: FILE - keyword: CACHE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/databricks/000077500000000000000000000000001451700765000220775ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/databricks/.sqlfluff000066400000000000000000000000401451700765000237140ustar00rootroot00000000000000[sqlfluff] dialect = databricks sqlfluff-2.3.5/test/fixtures/dialects/databricks/alter_catalog.sql000066400000000000000000000003051451700765000254170ustar00rootroot00000000000000-- Transfer ownership of the catalog to another user ALTER CATALOG some_cat OWNER TO `alf@melmak.et`; -- SET is allowed as an optional keyword ALTER CATALOG some_cat SET OWNER TO `alf@melmak.et`; sqlfluff-2.3.5/test/fixtures/dialects/databricks/alter_catalog.yml000066400000000000000000000016311451700765000254240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 40b364e358e1643fde75134acd974436a1bb53e904e64284bbf1a163b6490b40 file: - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; - statement: alter_catalog_statement: - keyword: ALTER - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - keyword: SET - keyword: OWNER - keyword: TO - quoted_identifier: '`alf@melmak.et`' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/databricks/create_catalog.sql000066400000000000000000000006631451700765000255620ustar00rootroot00000000000000-- Create catalog `customer_cat`. -- This throws exception if catalog with name customer_cat already exists. CREATE CATALOG customer_cat; -- Create catalog `customer_cat` only if catalog with same name doesn't exist. CREATE CATALOG IF NOT EXISTS customer_cat; -- Create catalog `customer_cat` only if catalog with same name doesn't exist, with a comment. CREATE CATALOG IF NOT EXISTS customer_cat COMMENT 'This is customer catalog'; sqlfluff-2.3.5/test/fixtures/dialects/databricks/create_catalog.yml000066400000000000000000000021401451700765000255540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2d4e1bf9fa351097aede205a0244d067dc5a05213354eff1fdc94160fc227db file: - statement: create_catalog_statement: - keyword: CREATE - keyword: CATALOG - catalog_reference: naked_identifier: customer_cat - statement_terminator: ; - statement: create_catalog_statement: - keyword: CREATE - keyword: CATALOG - keyword: IF - keyword: NOT - keyword: EXISTS - catalog_reference: naked_identifier: customer_cat - statement_terminator: ; - statement: create_catalog_statement: - keyword: CREATE - keyword: CATALOG - keyword: IF - keyword: NOT - keyword: EXISTS - catalog_reference: naked_identifier: customer_cat - keyword: COMMENT - quoted_literal: "'This is customer catalog'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/databricks/drop_catalog.sql000066400000000000000000000002511451700765000252540ustar00rootroot00000000000000-- Drop the catalog and its schemas DROP CATALOG vaccine CASCADE; -- Drop the catalog using IF EXISTS and only if it is empty. DROP CATALOG IF EXISTS vaccine RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/databricks/drop_catalog.yml000066400000000000000000000014631451700765000252640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ea0dd1142a5697ad4fedde41ded6e228ee8f187261cbe617e7e9f231fe5a7e6 file: - statement: drop_catalog_statement: - keyword: DROP - keyword: CATALOG - catalog_reference: naked_identifier: vaccine - keyword: CASCADE - statement_terminator: ; - statement: drop_catalog_statement: - keyword: DROP - keyword: CATALOG - keyword: IF - keyword: EXISTS - catalog_reference: naked_identifier: vaccine - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/databricks/select.sql000066400000000000000000000000411451700765000240720ustar00rootroot00000000000000select * from shopify_cz.order ; sqlfluff-2.3.5/test/fixtures/dialects/databricks/select.yml000066400000000000000000000015711451700765000241050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bf986346fed8101687984158446015a57adda6a314601c4bd98977bd5a5c3a8b file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: shopify_cz - dot: . - naked_identifier: order statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/databricks/set_time_zone.sql000066400000000000000000000002561451700765000254670ustar00rootroot00000000000000SET TIME ZONE LOCAL; SET TIME ZONE 'America/Los_Angeles'; SET TIME ZONE '+08:00'; SET TIME ZONE INTERVAL 1 HOUR 30 MINUTES; SET TIME ZONE INTERVAL '08:30:00' HOUR TO SECOND; sqlfluff-2.3.5/test/fixtures/dialects/databricks/set_time_zone.yml000066400000000000000000000030141451700765000254640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 34b8602b50cd1a742888a49293b066a3d19c413df01ca983419426fe46d1cd63 file: - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - keyword: LOCAL - statement_terminator: ; - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - quoted_literal: "'America/Los_Angeles'" - statement_terminator: ; - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - quoted_literal: "'+08:00'" - statement_terminator: ; - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - interval_expression: - keyword: INTERVAL - interval_literal: numeric_literal: '1' date_part: HOUR - interval_literal: numeric_literal: '30' date_part: MINUTES - statement_terminator: ; - statement: set_timezone_statement: - keyword: SET - keyword: TIME - keyword: ZONE - interval_expression: keyword: INTERVAL interval_literal: - signed_quoted_literal: "'08:30:00'" - date_part: HOUR - keyword: TO - date_part: SECOND - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/databricks/use_catalog.sql000066400000000000000000000003021451700765000251010ustar00rootroot00000000000000USE CATALOG catalog_name; -- Use the 'hive_metastore' . USE CATALOG hive_metastore; USE CATALOG 'hive_metastore'; -- Use the 'some_catalog' USE CATALOG `some_catalog`; USE CATALOG some_cat; sqlfluff-2.3.5/test/fixtures/dialects/databricks/use_catalog.yml000066400000000000000000000023601451700765000251110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff36608a5d372437c1110254e09a2af934a263c0e5b4c0e9a6611df9ccc4e9a7 file: - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: naked_identifier: catalog_name - statement_terminator: ; - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: naked_identifier: hive_metastore - statement_terminator: ; - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: quoted_identifier: "'hive_metastore'" - statement_terminator: ; - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: quoted_identifier: '`some_catalog`' - statement_terminator: ; - statement: use_catalog_statement: - keyword: USE - keyword: CATALOG - catalog_reference: naked_identifier: some_cat - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/databricks/use_database.sql000066400000000000000000000003041451700765000252350ustar00rootroot00000000000000USE database_name; -- Use the 'userdb' USE userdb; -- Use the 'userdb1' USE userdb1; -- Keywords SCHEMA and DATABASE are interchangeable. USE DATABASE database_name; USE SCHEMA database_name; sqlfluff-2.3.5/test/fixtures/dialects/databricks/use_database.yml000066400000000000000000000022101451700765000252350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0a4c00d164b502c2bde84b7825b98a56478c8eed83c13cd9dc9dcd6f0e2bbb2 file: - statement: use_statement: keyword: USE database_reference: naked_identifier: database_name - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: userdb - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: userdb1 - statement_terminator: ; - statement: use_database_statement: - keyword: USE - keyword: DATABASE - database_reference: naked_identifier: database_name - statement_terminator: ; - statement: use_database_statement: - keyword: USE - keyword: SCHEMA - database_reference: naked_identifier: database_name - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/db2/000077500000000000000000000000001451700765000204375ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/db2/.sqlfluff000066400000000000000000000000311451700765000222540ustar00rootroot00000000000000[sqlfluff] dialect = db2 sqlfluff-2.3.5/test/fixtures/dialects/db2/case.sql000066400000000000000000000001461451700765000220740ustar00rootroot00000000000000SELECT CASE WHEN ROLL = 1 THEN DAG WHEN ROLL > 1 THEN DAG_MOD - 1 DAYS END FROM MY_TABLE; sqlfluff-2.3.5/test/fixtures/dialects/db2/case.yml000066400000000000000000000034221451700765000220760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e3d61bb2d36284751454c0b350bb2ff492fec31530e7c3672c64f1caac175325 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ROLL comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - expression: column_reference: naked_identifier: DAG - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ROLL comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - keyword: THEN - expression: column_reference: naked_identifier: DAG_MOD binary_operator: '-' numeric_literal: '1' keyword: DAYS - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/db2/create_table_field_name_with_pound_sign.sql000066400000000000000000000002221451700765000312110ustar00rootroot00000000000000-- Valid field names with # pound/hash sign CREATE TABLE test ( my_field_1# decimal(2,0), #my_field_1 decimal(2,0), # decimal(2,0) ); sqlfluff-2.3.5/test/fixtures/dialects/db2/create_table_field_name_with_pound_sign.yml000066400000000000000000000032621451700765000312220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13105ac0dbc90718b62d6e22bace4ccb56603dbc9629cffda3add9014a2307a1 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_definition: naked_identifier: my_field_1# data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: '#my_field_1' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: '#' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '0' - end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/db2/day_unit.sql000066400000000000000000000001451451700765000227740ustar00rootroot00000000000000SELECT CASE WHEN ROLL = 1 THEN DAG WHEN ROLL > 1 THEN DAG_MOD - 1 DAY END FROM MY_TABLE; sqlfluff-2.3.5/test/fixtures/dialects/db2/day_unit.yml000066400000000000000000000034211451700765000227760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac1d01c1f619dd06023730cbe95c7e1485af36e400fb5488abe29aaf84f65736 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ROLL comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - expression: column_reference: naked_identifier: DAG - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: ROLL comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - keyword: THEN - expression: column_reference: naked_identifier: DAG_MOD binary_operator: '-' numeric_literal: '1' keyword: DAY - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/db2/function_within_group.sql000066400000000000000000000001521451700765000256010ustar00rootroot00000000000000SELECT LISTAGG(A_COLUMN_NAME, 'X') WITHIN GROUP(ORDER BY A_COLUMN_NAME) AS MY_COLUMN FROM A_TABLE sqlfluff-2.3.5/test/fixtures/dialects/db2/function_within_group.yml000066400000000000000000000030051451700765000256030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e52ad2a7bebefe270132e4f297ad537f3a8b1ef1f76c0e5a39757f83c170eeca file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: LISTAGG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: A_COLUMN_NAME - comma: ',' - expression: quoted_literal: "'X'" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: A_COLUMN_NAME end_bracket: ) alias_expression: keyword: AS naked_identifier: MY_COLUMN from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A_TABLE sqlfluff-2.3.5/test/fixtures/dialects/db2/over.sql000066400000000000000000000001261451700765000221320ustar00rootroot00000000000000SELECT RANK() OVER (PARTITION BY ABCD ORDER BY EFGH DESC) AS A_RANK FROM A_TABLE; sqlfluff-2.3.5/test/fixtures/dialects/db2/over.yml000066400000000000000000000031311451700765000221330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 787b10f868c97797f33bd2fba027084919e3ab3beaa5f94f526a7bdd71d37f29 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: ABCD orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: EFGH - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: A_RANK from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A_TABLE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/db2/where_like.sql000066400000000000000000000000561451700765000232770ustar00rootroot00000000000000SELECT col1 FROM test WHERE col1 LIKE '%sql'; sqlfluff-2.3.5/test/fixtures/dialects/db2/where_like.yml000066400000000000000000000017241451700765000233040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 667e91a4d51c15009dc03ca939a17f8a46ea3e744e8578f12d842541892ab8e9 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 keyword: LIKE quoted_literal: "'%sql'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/000077500000000000000000000000001451700765000212245ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/duckdb/.sqlfluff000066400000000000000000000000341451700765000230440ustar00rootroot00000000000000[sqlfluff] dialect = duckdb sqlfluff-2.3.5/test/fixtures/dialects/duckdb/group_order_by_all.sql000066400000000000000000000010341451700765000256140ustar00rootroot00000000000000SELECT systems, planets, cities, cantinas, SUM(scum + villainy) as total_scum_and_villainy FROM star_wars_locations GROUP BY ALL ; SELECT * EXCLUDE (cantinas, booths, scum, villainy), SUM(scum + villainy) as total_scum_and_villainy FROM star_wars_locations GROUP BY ALL ; SELECT age, sum(civility) as total_civility FROM star_wars_universe GROUP BY ALL ORDER BY ALL ; SELECT x_wing, proton_torpedoes, --targeting_computer FROM luke_whats_wrong GROUP BY x_wing, proton_torpedoes, ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/group_order_by_all.yml000066400000000000000000000120011451700765000256120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2669ec22912c0564cc026fb2472887b407e941b4790df8a2407f45fc0611f10a file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: systems - comma: ',' - select_clause_element: column_reference: naked_identifier: planets - comma: ',' - select_clause_element: column_reference: naked_identifier: cities - comma: ',' - select_clause_element: column_reference: naked_identifier: cantinas - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: - column_reference: naked_identifier: scum - binary_operator: + - column_reference: naked_identifier: villainy end_bracket: ) alias_expression: keyword: as naked_identifier: total_scum_and_villainy from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars_locations groupby_clause: - keyword: GROUP - keyword: BY - keyword: ALL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: cantinas - comma: ',' - column_reference: naked_identifier: booths - comma: ',' - column_reference: naked_identifier: scum - comma: ',' - column_reference: naked_identifier: villainy - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: - column_reference: naked_identifier: scum - binary_operator: + - column_reference: naked_identifier: villainy end_bracket: ) alias_expression: keyword: as naked_identifier: total_scum_and_villainy from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars_locations groupby_clause: - keyword: GROUP - keyword: BY - keyword: ALL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: civility end_bracket: ) alias_expression: keyword: as naked_identifier: total_civility from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars_universe groupby_clause: - keyword: GROUP - keyword: BY - keyword: ALL orderby_clause: - keyword: ORDER - keyword: BY - keyword: ALL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: x_wing - comma: ',' - select_clause_element: column_reference: naked_identifier: proton_torpedoes - comma: ',' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: luke_whats_wrong groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: x_wing - comma: ',' - column_reference: naked_identifier: proton_torpedoes - comma: ',' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/list_struct.sql000066400000000000000000000007611451700765000243300ustar00rootroot00000000000000SELECT ['A-Wing', 'B-Wing', 'X-Wing', 'Y-Wing'] as starfighter_list, {name: 'Star Destroyer', common_misconceptions: 'Can''t in fact destroy a star'} as star_destroyer_facts ; SELECT starfighter_list[2:2] as dont_forget_the_b_wing FROM (SELECT ['A-Wing', 'B-Wing', 'X-Wing', 'Y-Wing'] as starfighter_list); SELECT 'I love you! I know'[:-3] as nearly_soloed; SELECT planet.name, planet."Amount of sand" FROM (SELECT {name: 'Tatooine', 'Amount of sand': 'High'} as planet) ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/list_struct.yml000066400000000000000000000117161451700765000243340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b65a1a966a8dd1bcc2bd1e9db40e00d2103e47e8ba47c4f5af5e3c957007a140 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: array_literal: - start_square_bracket: '[' - quoted_literal: "'A-Wing'" - comma: ',' - quoted_literal: "'B-Wing'" - comma: ',' - quoted_literal: "'X-Wing'" - comma: ',' - quoted_literal: "'Y-Wing'" - end_square_bracket: ']' alias_expression: keyword: as naked_identifier: starfighter_list - comma: ',' - select_clause_element: object_literal: - start_curly_bracket: '{' - object_literal_element: naked_identifier: name colon: ':' quoted_literal: "'Star Destroyer'" - comma: ',' - object_literal_element: naked_identifier: common_misconceptions colon: ':' quoted_literal: "'Can''t in fact destroy a star'" - end_curly_bracket: '}' alias_expression: keyword: as naked_identifier: star_destroyer_facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: starfighter_list array_accessor: - start_square_bracket: '[' - numeric_literal: '2' - slice: ':' - numeric_literal: '2' - end_square_bracket: ']' alias_expression: keyword: as naked_identifier: dont_forget_the_b_wing from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: array_literal: - start_square_bracket: '[' - quoted_literal: "'A-Wing'" - comma: ',' - quoted_literal: "'B-Wing'" - comma: ',' - quoted_literal: "'X-Wing'" - comma: ',' - quoted_literal: "'Y-Wing'" - end_square_bracket: ']' alias_expression: keyword: as naked_identifier: starfighter_list end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'I love you! I know'" array_accessor: start_square_bracket: '[' slice: ':' numeric_literal: sign_indicator: '-' numeric_literal: '3' end_square_bracket: ']' alias_expression: keyword: as naked_identifier: nearly_soloed - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: planet - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: planet dot: . quoted_identifier: '"Amount of sand"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: object_literal: - start_curly_bracket: '{' - object_literal_element: naked_identifier: name colon: ':' quoted_literal: "'Tatooine'" - comma: ',' - object_literal_element: - quoted_literal: "'Amount of sand'" - colon: ':' - quoted_literal: "'High'" - end_curly_bracket: '}' alias_expression: keyword: as naked_identifier: planet end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/select.sql000066400000000000000000000002161451700765000232230ustar00rootroot00000000000000select 10 // 5; SELECT * FROM capitals UNION BY NAME SELECT * FROM weather; SELECT * FROM capitals UNION ALL BY NAME SELECT * FROM weather; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/select.yml000066400000000000000000000051131451700765000232260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b336d1dcea0dab4bda87eab355124f897a8ebf8cc5181cd161ed477da253a0af file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - numeric_literal: '10' - binary_operator: // - numeric_literal: '5' - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: capitals - set_operator: - keyword: UNION - keyword: BY - keyword: NAME - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: weather - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: capitals - set_operator: - keyword: UNION - keyword: ALL - keyword: BY - keyword: NAME - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: weather - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/select_exclude.sql000066400000000000000000000003211451700765000247310ustar00rootroot00000000000000SELECT * EXCLUDE (jar_jar_binks, midichlorians) FROM star_wars; SELECT sw.* EXCLUDE (jar_jar_binks, midichlorians), ff.* EXCLUDE cancellation FROM star_wars sw, firefly ff ; SELECT * FROM star_wars; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/select_exclude.yml000066400000000000000000000055311451700765000247430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1d2d32acf940f030e8c387c2b2f487c10e99d359a2bfad44face163eb61b380 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: jar_jar_binks - comma: ',' - column_reference: naked_identifier: midichlorians - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: sw dot: . star: '*' keyword: EXCLUDE bracketed: - start_bracket: ( - column_reference: naked_identifier: jar_jar_binks - comma: ',' - column_reference: naked_identifier: midichlorians - end_bracket: ) - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: ff dot: . star: '*' keyword: EXCLUDE column_reference: naked_identifier: cancellation from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars alias_expression: naked_identifier: sw - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: firefly alias_expression: naked_identifier: ff - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/select_quoted.sql000066400000000000000000000001311451700765000246000ustar00rootroot00000000000000SELECT count(*) FROM 'https://shell.duckdb.org/data/tpch/0_01/parquet/lineitem.parquet'; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/select_quoted.yml000066400000000000000000000017451451700765000246160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41a79f9b8c4f36ac63b71783587e096a124c2d673dbc6a62081db7f6e06589a1 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: "'https://shell.duckdb.org/data/tpch/0_01/parquet/lineitem.parquet'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/select_replace.sql000066400000000000000000000001641451700765000247200ustar00rootroot00000000000000SELECT * REPLACE (movie_count+3 as movie_count, show_count*1000 as show_count) FROM star_wars_owned_by_disney ; sqlfluff-2.3.5/test/fixtures/dialects/duckdb/select_replace.yml000066400000000000000000000027471451700765000247330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec9e5ef917d8d10cde31c8c09c7bfa140ec0982fbbfd96e6da705eced0c579c0 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' keyword: REPLACE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: movie_count binary_operator: + numeric_literal: '3' - alias_expression: keyword: as naked_identifier: movie_count - comma: ',' - expression: column_reference: naked_identifier: show_count binary_operator: '*' numeric_literal: '1000' - alias_expression: keyword: as naked_identifier: show_count - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: star_wars_owned_by_disney statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/000077500000000000000000000000001451700765000212635ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/exasol/.sqlfluff000066400000000000000000000000341451700765000231030ustar00rootroot00000000000000[sqlfluff] dialect = exasol sqlfluff-2.3.5/test/fixtures/dialects/exasol/Add_Days.sql000066400000000000000000000001441451700765000234530ustar00rootroot00000000000000SELECT ADD_DAYS(DATE '2000-02-28', 1) AD1, ADD_DAYS(TIMESTAMP '2001-02-28 12:00:00', 1) AD2; sqlfluff-2.3.5/test/fixtures/dialects/exasol/Add_Days.yml000066400000000000000000000027071451700765000234640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 95cbd63d33edd7c0a0848cbdf092d886a618176c61f6680e17db1b6e432dc1e3 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ADD_DAYS bracketed: - start_bracket: ( - expression: keyword: DATE date_constructor_literal: "'2000-02-28'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) alias_expression: naked_identifier: AD1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ADD_DAYS bracketed: - start_bracket: ( - expression: keyword: TIMESTAMP date_constructor_literal: "'2001-02-28 12:00:00'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) alias_expression: naked_identifier: AD2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/access_statement.sql000066400000000000000000000022641451700765000253350ustar00rootroot00000000000000-- System privileges GRANT CREATE SCHEMA TO role1; GRANT SELECT ANY TABLE TO user1 WITH ADMIN OPTION; -- Object privileges GRANT INSERT ON my_schema.my_table TO user1, role2; GRANT SELECT ON VIEW my_schema.my_view TO user1; -- Access on my_view for all users GRANT SELECT ON my_schema.my_view TO PUBLIC; -- Roles GRANT role1 TO user1, user2 WITH ADMIN OPTION; GRANT role2 TO role1; -- Impersonation GRANT IMPERSONATION ON user2 TO user1; GRANT IMPERSONATION ON "user2" TO user1; GRANT IMPERSONATION ON user2 TO "user1"; -- Connection GRANT CONNECTION my_connection TO user1; GRANT CONNECTION my_connection TO "ADMIN"; -- Access to connection details for certain script GRANT ACCESS ON CONNECTION my_connection FOR SCRIPT script1 TO user1; GRANT ACCESS ON CONNECTION "my_connection" FOR SCRIPT "script1" TO "user1"; REVOKE CREATE SCHEMA FROM role1,user3; -- Object privileges REVOKE SELECT, INSERT ON my_schema.my_table FROM user1, role2; REVOKE ALL PRIVILEGES ON VIEW my_schema.my_view FROM PUBLIC; -- Role REVOKE role1 FROM user1, user2; -- Impersonation REVOKE IMPERSONATION ON user2 FROM user1; -- Connections REVOKE CONNECTION my_connection FROM user1; REVOKE CONNECTION my_connection FROM "ADMIN"; sqlfluff-2.3.5/test/fixtures/dialects/exasol/access_statement.yml000066400000000000000000000155441451700765000253440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2bb545f4adeb85060bff12a0f95565113146d63e1b8dcd7f456fe621a241f985 file: - statement: access_statement: keyword: GRANT grant_revoke_system_privileges: system_privilege: - keyword: CREATE - keyword: SCHEMA keyword: TO naked_identifier: role1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_system_privileges: - system_privilege: - keyword: SELECT - keyword: ANY - keyword: TABLE - keyword: TO - naked_identifier: user1 - keyword: WITH - keyword: ADMIN - keyword: OPTION - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_object_privileges: - object_privilege: keyword: INSERT - keyword: 'ON' - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: TO - naked_identifier: user1 - comma: ',' - naked_identifier: role2 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_object_privileges: - object_privilege: keyword: SELECT - keyword: 'ON' - keyword: VIEW - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_view - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_object_privileges: - object_privilege: keyword: SELECT - keyword: 'ON' - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_view - keyword: TO - naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_roles: - role_reference: naked_identifier: role1 - keyword: TO - role_reference: naked_identifier: user1 - comma: ',' - role_reference: naked_identifier: user2 - keyword: WITH - keyword: ADMIN - keyword: OPTION - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_roles: - role_reference: naked_identifier: role2 - keyword: TO - role_reference: naked_identifier: role1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_impersonation: - keyword: IMPERSONATION - keyword: 'ON' - naked_identifier: user2 - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_impersonation: - keyword: IMPERSONATION - keyword: 'ON' - quoted_identifier: '"user2"' - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_impersonation: - keyword: IMPERSONATION - keyword: 'ON' - naked_identifier: user2 - keyword: TO - quoted_identifier: '"user1"' - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_connection: - keyword: CONNECTION - naked_identifier: my_connection - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_connection: - keyword: CONNECTION - naked_identifier: my_connection - keyword: TO - quoted_identifier: '"ADMIN"' - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_connection_restricted: - keyword: ACCESS - keyword: 'ON' - keyword: CONNECTION - naked_identifier: my_connection - keyword: FOR - keyword: SCRIPT - naked_identifier: script1 - keyword: TO - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: GRANT grant_revoke_connection_restricted: - keyword: ACCESS - keyword: 'ON' - keyword: CONNECTION - quoted_identifier: '"my_connection"' - keyword: FOR - keyword: SCRIPT - quoted_identifier: '"script1"' - keyword: TO - quoted_identifier: '"user1"' - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_system_privileges: - system_privilege: - keyword: CREATE - keyword: SCHEMA - keyword: FROM - naked_identifier: role1 - comma: ',' - naked_identifier: user3 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_object_privileges: - object_privilege: keyword: SELECT - comma: ',' - object_privilege: keyword: INSERT - keyword: 'ON' - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: FROM - naked_identifier: user1 - comma: ',' - naked_identifier: role2 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_object_privileges: - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - keyword: VIEW - object_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_view - keyword: FROM - naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_roles: - role_reference: naked_identifier: role1 - keyword: FROM - role_reference: naked_identifier: user1 - comma: ',' - role_reference: naked_identifier: user2 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_impersonation: - keyword: IMPERSONATION - keyword: 'ON' - naked_identifier: user2 - keyword: FROM - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_connection: - keyword: CONNECTION - naked_identifier: my_connection - keyword: FROM - naked_identifier: user1 - statement_terminator: ; - statement: access_statement: keyword: REVOKE grant_revoke_connection: - keyword: CONNECTION - naked_identifier: my_connection - keyword: FROM - quoted_identifier: '"ADMIN"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_connection.sql000066400000000000000000000000741451700765000253330ustar00rootroot00000000000000ALTER CONNECTION exa_connection TO '192.168.6.11..14:8564'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_connection.yml000066400000000000000000000011761451700765000253410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb83dd281f2b9719c7e55dd6a8bbf1d0cf1228b098ef48a4372c4f699d5c0d09 file: statement: alter_connection: - keyword: ALTER - keyword: CONNECTION - naked_identifier: exa_connection - keyword: TO - connection_definition: quoted_literal: "'192.168.6.11..14:8564'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_consumer_group.sql000066400000000000000000000003641451700765000262450ustar00rootroot00000000000000ALTER CONSUMER GROUP TEST_TEAM SET PRECEDENCE = '800', CPU_WEIGHT = '150', GROUP_TEMP_DB_RAM_LIMIT = '10G', SESSION_TEMP_DB_RAM_LIMIT = '5G', QUERY_TIMEOUT = 60, IDLE_TIMEOUT = 3600; ALTER CONSUMER GROUP "ADMIN" SET PRECEDENCE = '800'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_consumer_group.yml000066400000000000000000000037421451700765000262520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 82fc2f28c5517d75f43744d817dc64b5d047d7cc3590bea26013a71265f97050 file: - statement: alter_consumer_group_statement: - keyword: ALTER - keyword: CONSUMER - keyword: GROUP - naked_identifier: TEST_TEAM - keyword: SET - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'800'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'150'" - comma: ',' - consumer_group_parameter: keyword: GROUP_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'10G'" - comma: ',' - consumer_group_parameter: keyword: SESSION_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'5G'" - comma: ',' - consumer_group_parameter: keyword: QUERY_TIMEOUT comparison_operator: raw_comparison_operator: '=' numeric_literal: '60' - comma: ',' - consumer_group_parameter: keyword: IDLE_TIMEOUT comparison_operator: raw_comparison_operator: '=' numeric_literal: '3600' - statement_terminator: ; - statement: alter_consumer_group_statement: - keyword: ALTER - keyword: CONSUMER - keyword: GROUP - quoted_identifier: '"ADMIN"' - keyword: SET - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'800'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_role.sql000066400000000000000000000002061451700765000241320ustar00rootroot00000000000000ALTER ROLE role1 SET CONSUMER_GROUP = CEO; ALTER ROLE role2 SET CONSUMER_GROUP = NULL; ALTER ROLE "TABLE" SET CONSUMER_GROUP = "DaY"; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_role.yml000066400000000000000000000024251451700765000241410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b81ce1404060a5c313267addf68a77067bb678e07eef7b7b095baf9cab8a5d97 file: - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: role1 - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - naked_identifier: CEO - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: role2 - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: quoted_identifier: '"TABLE"' - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: '"DaY"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_schema_statement.sql000066400000000000000000000002421451700765000265150ustar00rootroot00000000000000ALTER SCHEMA s1 CHANGE OWNER user1; ALTER SCHEMA s1 CHANGE OWNER role1; ALTER SCHEMA s1 SET RAW_SIZE_LIMIT = 128*1024*1024; ALTER SCHEMA s1 CHANGE OWNER "role1"; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_schema_statement.yml000066400000000000000000000027401451700765000265240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6caef570ecdf6ec635d22eccd23434c239efca88596bf79cd99ab8f7db1be3ba file: - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - naked_identifier: user1 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - naked_identifier: role1 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: SET - keyword: RAW_SIZE_LIMIT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '128' - star: '*' - numeric_literal: '1024' - star: '*' - numeric_literal: '1024' - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - quoted_identifier: '"role1"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_session.sql000066400000000000000000000004731451700765000246620ustar00rootroot00000000000000ALTER SESSION SET TIME_ZONE='EUROPE/BERLIN'; ALTER SESSION SET QUERY_TIMEOUT=120; ALTER SESSION SET NLS_DATE_FORMAT='DDD-YYYY'; ALTER SESSION SET SESSION_TEMP_DB_RAM_LIMIT = '10240M'; ALTER SESSION SET SNAPSHOT_MODE = 'OFF'; ALTER SESSION SET SNAPSHOT_MODE = 'SYSTEM TABLES'; ALTER SESSION SET IDLE_TIMEOUT = 2400; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_session.yml000066400000000000000000000043461451700765000246670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f3be0ce4a7bdbfe38682bbbcca6f44e8eec5dd1ce10ceb56e0cfd3056f4acd74 file: - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: TIME_ZONE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'EUROPE/BERLIN'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: QUERY_TIMEOUT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '120' - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: NLS_DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DDD-YYYY'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: SESSION_TEMP_DB_RAM_LIMIT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'10240M'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: SNAPSHOT_MODE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'OFF'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: SNAPSHOT_MODE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SYSTEM TABLES'" - statement_terminator: ; - statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - keyword: SET - session_parameter: IDLE_TIMEOUT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2400' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_system.sql000066400000000000000000000003351451700765000245200ustar00rootroot00000000000000ALTER SYSTEM SET NLS_DATE_LANGUAGE='DEU'; ALTER SYSTEM SET NLS_FIRST_DAY_OF_WEEK=1; ALTER SYSTEM SET SNAPSHOT_MODE = 'SYSTEM TABLES'; ALTER SYSTEM SET IDLE_TIMEOUT = 3600; ALTER SYSTEM SET USER_TEMP_DB_RAM_LIMIT = '50G'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_system.yml000066400000000000000000000032761451700765000245310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1439fe14d3633a0a6010879e8f07aef5f4dccfb13b299b4825c224eec080f9e file: - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: NLS_DATE_LANGUAGE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DEU'" - statement_terminator: ; - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: NLS_FIRST_DAY_OF_WEEK - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: SNAPSHOT_MODE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SYSTEM TABLES'" - statement_terminator: ; - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: IDLE_TIMEOUT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3600' - statement_terminator: ; - statement: alter_system_statement: - keyword: ALTER - keyword: SYSTEM - keyword: SET - system_parameter: USER_TEMP_DB_RAM_LIMIT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'50G'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_table_column.sql000066400000000000000000000010631451700765000256370ustar00rootroot00000000000000ALTER TABLE t ADD COLUMN IF NOT EXISTS new_dec DECIMAL(18,0); ALTER TABLE t ADD (new_char CHAR(10) DEFAULT 'some text'); ALTER TABLE myschema.t DROP COLUMN i; ALTER TABLE t DROP j; ALTER TABLE t MODIFY (i DECIMAL(10,2)); ALTER TABLE t MODIFY (j VARCHAR(5) DEFAULT 'text'); ALTER TABLE t MODIFY k INTEGER IDENTITY(1000); ALTER TABLE t RENAME COLUMN i TO j; ALTER TABLE t ALTER COLUMN v SET DEFAULT CURRENT_USER; ALTER TABLE "SCHEMA"."TABLE" ALTER COLUMN v DROP DEFAULT; ALTER TABLE t ALTER COLUMN id SET IDENTITY 1000; ALTER TABLE t ALTER COLUMN id DROP IDENTITY; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_table_column.yml000066400000000000000000000146121451700765000256450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b5949c81506c6ef3ba0649f65e32fd812eb7682e6080c9e181520ba389c9635 file: - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_add_column: - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_definition: column_datatype_definition: naked_identifier: new_dec data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '18' - comma: ',' - numeric_literal: '0' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_add_column: keyword: ADD bracketed: start_bracket: ( column_definition: column_datatype_definition: naked_identifier: new_char data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'some text'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: t - alter_table_drop_column: - keyword: DROP - keyword: COLUMN - naked_identifier: i - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_drop_column: keyword: DROP naked_identifier: j - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_modify_column: keyword: MODIFY bracketed: start_bracket: ( naked_identifier: i data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_modify_column: keyword: MODIFY bracketed: start_bracket: ( naked_identifier: j data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'text'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_modify_column: keyword: MODIFY naked_identifier: k data_type: keyword: INTEGER column_constraint_segment: keyword: IDENTITY bracketed: start_bracket: ( numeric_literal: '1000' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_rename_column: - keyword: RENAME - keyword: COLUMN - naked_identifier: i - keyword: TO - naked_identifier: j - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_alter_column: - keyword: ALTER - keyword: COLUMN - naked_identifier: v - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_USER - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '"SCHEMA"' - dot: . - quoted_identifier: '"TABLE"' - alter_table_alter_column: - keyword: ALTER - keyword: COLUMN - naked_identifier: v - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_alter_column: - keyword: ALTER - keyword: COLUMN - naked_identifier: id - keyword: SET - keyword: IDENTITY - numeric_literal: '1000' - statement_terminator: ; - statement: alter_table_statement: alter_table_column_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t - alter_table_alter_column: - keyword: ALTER - keyword: COLUMN - naked_identifier: id - keyword: DROP - keyword: IDENTITY - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_table_constraint.sql000066400000000000000000000005221451700765000265250ustar00rootroot00000000000000ALTER TABLE t1 ADD CONSTRAINT my_primary_key PRIMARY KEY (a); ALTER TABLE t2 ADD CONSTRAINT my_foreign_key FOREIGN KEY (x) REFERENCES t1; ALTER TABLE t2 MODIFY CONSTRAINT my_foreign_key DISABLE; ALTER TABLE t2 RENAME CONSTRAINT my_foreign_key TO my_fk; ALTER TABLE t2 DROP CONSTRAINT my_fk; ALTER TABLE t1 DROP CONSTRAINT IF EXISTS PK_X; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_table_constraint.yml000066400000000000000000000054361451700765000265400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cb4515d6700cb2e56cf988b801092be38560e0ea53536094c561ecf5d0b68d98 file: - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ADD - table_constraint_definition: - keyword: CONSTRAINT - naked_identifier: my_primary_key - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: ADD - table_constraint_definition: - keyword: CONSTRAINT - naked_identifier: my_foreign_key - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: x end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t1 - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: MODIFY - keyword: CONSTRAINT - naked_identifier: my_foreign_key - keyword: DISABLE - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: RENAME - keyword: CONSTRAINT - naked_identifier: my_foreign_key - keyword: TO - naked_identifier: my_fk - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t2 - keyword: DROP - keyword: CONSTRAINT - naked_identifier: my_fk - statement_terminator: ; - statement: alter_table_statement: alter_table_constraint_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: DROP - keyword: CONSTRAINT - keyword: IF - keyword: EXISTS - naked_identifier: PK_X - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_table_distribute_partition.sql000066400000000000000000000005421451700765000306120ustar00rootroot00000000000000ALTER TABLE my_table DROP DISTRIBUTION KEYS; ALTER TABLE my_table DROP DISTRIBUTION AND PARTITION KEYS; ALTER TABLE my_table DISTRIBUTE BY shop_id, PARTITION BY order_date; ALTER TABLE my_table PARTITION BY order_date, DISTRIBUTE BY shop_id, branch_no; ALTER TABLE my_table PARTITION BY order_date; ALTER TABLE my_table DISTRIBUTE BY shop_id, branch_no; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_table_distribute_partition.yml000066400000000000000000000057501451700765000306220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70378d3650e57431fc47fb48dc6e68eae04241f3a5e101bc929d22134e967574 file: - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: DROP - keyword: DISTRIBUTION - keyword: KEYS - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: DROP - keyword: DISTRIBUTION - keyword: AND - keyword: PARTITION - keyword: KEYS - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - table_distribution_partition_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: shop_id - comma: ',' - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: order_date - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - table_distribution_partition_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: order_date - comma: ',' - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: shop_id - comma: ',' - column_reference: naked_identifier: branch_no - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - table_distribution_partition_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: order_date - statement_terminator: ; - statement: alter_table_statement: alter_table_distribute_partition_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - table_distribution_partition_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: shop_id - comma: ',' - column_reference: naked_identifier: branch_no - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_user.sql000066400000000000000000000012021451700765000241440ustar00rootroot00000000000000ALTER USER user_1 IDENTIFIED BY "h22_xhz" REPLACE "h12_xhz"; ALTER USER user_1 IDENTIFIED BY "h12_xhz"; ALTER USER user_2 IDENTIFIED AT LDAP AS 'cn=user_2,dc=authorization,dc=exasol,dc=com'; ALTER USER user_3 PASSWORD_EXPIRY_POLICY = '42 days'; ALTER USER user_4 PASSWORD EXPIRE; ALTER USER user_5 RESET FAILED LOGIN ATTEMPTS; ALTER USER userx SET CONSUMER_GROUP = CEO; ALTER USER userx SET CONSUMER_GROUP = NULL; ALTER USER "ADMIN" SET CONSUMER_GROUP = "TABLE"; ALTER USER [admin] SET CONSUMER_GROUP = "DAY"; ALTER USER "ADMIN" SET CONSUMER_GROUP = [day]; ALTER USER oidctestuser IDENTIFIED BY OPENID SUBJECT 'database-user@exasol.example'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_user.yml000066400000000000000000000075331451700765000241630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e7a2f2ea950caab25ed06f8ec8d3055d846e1b0639a6f55d0fa4789665b3a050 file: - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_1 - keyword: IDENTIFIED - password_auth: keyword: BY password_literal: '"h22_xhz"' - keyword: REPLACE - password_literal: '"h12_xhz"' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_1 - keyword: IDENTIFIED - password_auth: keyword: BY password_literal: '"h12_xhz"' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_2 - keyword: IDENTIFIED - ldap_auth: - keyword: AT - keyword: LDAP - keyword: AS - quoted_literal: "'cn=user_2,dc=authorization,dc=exasol,dc=com'" - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_3 - keyword: PASSWORD_EXPIRY_POLICY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'42 days'" - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_4 - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: user_5 - keyword: RESET - keyword: FAILED - keyword: LOGIN - keyword: ATTEMPTS - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: userx - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - naked_identifier: CEO - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: userx - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - keyword: 'NULL' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: quoted_identifier: '"ADMIN"' - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: '"TABLE"' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: identifier: '[admin]' - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: '"DAY"' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: quoted_identifier: '"ADMIN"' - keyword: SET - keyword: CONSUMER_GROUP - comparison_operator: raw_comparison_operator: '=' - identifier: '[day]' - statement_terminator: ; - statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: oidctestuser - keyword: IDENTIFIED - openid_auth: - keyword: BY - keyword: OPENID - keyword: SUBJECT - quoted_literal: "'database-user@exasol.example'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_virtual_schema_statement.sql000066400000000000000000000003271451700765000302670ustar00rootroot00000000000000ALTER VIRTUAL SCHEMA s2 SET CONNECTION_STRING = 'jdbc:hive2://localhost:10000/default'; ALTER VIRTUAL SCHEMA s2 REFRESH; ALTER VIRTUAL SCHEMA s1 CHANGE OWNER "role1"; ALTER VIRTUAL SCHEMA s1 CHANGE OWNER role1; sqlfluff-2.3.5/test/fixtures/dialects/exasol/alter_virtual_schema_statement.yml000066400000000000000000000030261451700765000302700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a43f172c076893a09ec340877e05d2bfe8881c298e7fd4bd5614e0545818f817 file: - statement: alter_virtual_schema_statement: - keyword: ALTER - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: s2 - keyword: SET - column_reference: naked_identifier: CONNECTION_STRING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'jdbc:hive2://localhost:10000/default'" - statement_terminator: ; - statement: alter_virtual_schema_statement: - keyword: ALTER - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: s2 - keyword: REFRESH - statement_terminator: ; - statement: alter_virtual_schema_statement: - keyword: ALTER - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - quoted_identifier: '"role1"' - statement_terminator: ; - statement: alter_virtual_schema_statement: - keyword: ALTER - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: CHANGE - keyword: OWNER - naked_identifier: role1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/comment_statement.sql000066400000000000000000000004071451700765000255330ustar00rootroot00000000000000COMMENT ON SCHEMA s1 IS 'My first schema'; COMMENT ON TABLE a_schema.t1 IS 'My first table'; COMMENT ON t1 (id IS 'Identity column', zip IS 'Zip code'); COMMENT ON SCRIPT script1 IS 'My first script'; COMMENT ON CONSUMER GROUP admin_group IS 'VERY important!!!'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/comment_statement.yml000066400000000000000000000035051451700765000255370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 15da5ea0970e7931416e4cf4927a4a3556b20193c95dff5a6bbc958475813059 file: - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - object_reference: naked_identifier: s1 - keyword: IS - quoted_literal: "'My first schema'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: - naked_identifier: a_schema - dot: . - naked_identifier: t1 - keyword: IS - quoted_literal: "'My first table'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - naked_identifier: id - keyword: IS - quoted_literal: "'Identity column'" - comma: ',' - naked_identifier: zip - keyword: IS - quoted_literal: "'Zip code'" - end_bracket: ) - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: SCRIPT - object_reference: naked_identifier: script1 - keyword: IS - quoted_literal: "'My first script'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: CONSUMER - keyword: GROUP - object_reference: naked_identifier: admin_group - keyword: IS - quoted_literal: "'VERY important!!!'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_adapter_script_statement.sql000066400000000000000000000004421451700765000304170ustar00rootroot00000000000000CREATE JAVA ADAPTER SCRIPT my_script AS %jar hive_jdbc_adapter.jar; / CREATE OR REPLACE PYTHON ADAPTER SCRIPT test.adapter_dummy AS def adapter_call(in_json): return "BLABLA" / CREATE OR REPLACE LUA ADAPTER SCRIPT test.adapter_dummy AS function adapter_call(in_json): return 'BLABLA' / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_adapter_script_statement.yml000066400000000000000000000035661451700765000304330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 075ae3b127ab6cfb870b46b0576a1edfd9d58a34702b1003f265a86cdd59f932 file: - statement: create_adapter_script: - keyword: CREATE - keyword: JAVA - keyword: ADAPTER - keyword: SCRIPT - script_reference: naked_identifier: my_script - keyword: AS - script_content: - percent: '%' - word: jar - word: hive_jdbc_adapter - dot: . - word: jar - semicolon: ; - function_script_terminator: / - statement: create_adapter_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PYTHON - keyword: ADAPTER - keyword: SCRIPT - script_reference: - naked_identifier: test - dot: . - naked_identifier: adapter_dummy - keyword: AS - script_content: - word: def - word: adapter_call - bracketed: start_bracket: ( word: in_json end_bracket: ) - colon: ':' - word: return - double_quote: '"BLABLA"' - function_script_terminator: / - statement: create_adapter_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: ADAPTER - keyword: SCRIPT - script_reference: - naked_identifier: test - dot: . - naked_identifier: adapter_dummy - keyword: AS - script_content: - word: function - word: adapter_call - bracketed: start_bracket: ( word: in_json end_bracket: ) - colon: ':' - word: return - single_quote: "'BLABLA'" - function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_connection.sql000066400000000000000000000010571451700765000254710ustar00rootroot00000000000000CREATE CONNECTION ftp_connection TO 'ftp://192.168.1.1/' USER 'agent_007' IDENTIFIED BY 'secret'; ---- CREATE CONNECTION exa_connection TO '192.168.6.11..14:8563'; ---- CREATE CONNECTION ora_connection TO '(DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.6.54)(PORT = 1521)) (CONNECT_DATA = (SERVER = DEDICATED)(SERVICE_NAME = orcl)))'; ---- CREATE CONNECTION jdbc_connection_1 TO 'jdbc:mysql://192.168.6.1/my_db'; ---- CREATE CONNECTION jdbc_connection_2 TO 'jdbc:postgresql://192.168.6.2:5432/my_db?stringtype=unspecified'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_connection.yml000066400000000000000000000036501451700765000254740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1b30adf9c54a4aae2b5ccca87f898e2a5c6f7167c6e0b0da80b8a506a067a590 file: - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: ftp_connection - keyword: TO - connection_definition: - quoted_literal: "'ftp://192.168.1.1/'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - statement_terminator: ; - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: exa_connection - keyword: TO - connection_definition: quoted_literal: "'192.168.6.11..14:8563'" - statement_terminator: ; - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: ora_connection - keyword: TO - connection_definition: quoted_literal: "'(DESCRIPTION =\n (ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.6.54)(PORT\ \ = 1521))\n (CONNECT_DATA = (SERVER = DEDICATED)(SERVICE_NAME = orcl)))'" - statement_terminator: ; - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: jdbc_connection_1 - keyword: TO - connection_definition: quoted_literal: "'jdbc:mysql://192.168.6.1/my_db'" - statement_terminator: ; - statement: create_connection: - keyword: CREATE - keyword: CONNECTION - naked_identifier: jdbc_connection_2 - keyword: TO - connection_definition: quoted_literal: "'jdbc:postgresql://192.168.6.2:5432/my_db?stringtype=unspecified'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_consumer_group.sql000066400000000000000000000007541451700765000264040ustar00rootroot00000000000000CREATE CONSUMER GROUP CEO WITH PRECEDENCE = '1000', CPU_WEIGHT = '900'; CREATE CONSUMER GROUP BI_TEAM WITH PRECEDENCE = '900', CPU_WEIGHT = '500', GROUP_TEMP_DB_RAM_LIMIT = '200G', USER_TEMP_DB_RAM_LIMIT = '100G'; CREATE CONSUMER GROUP TEST_TEAM WITH PRECEDENCE = '800', CPU_WEIGHT = '100', GROUP_TEMP_DB_RAM_LIMIT = '10G', SESSION_TEMP_DB_RAM_LIMIT = '5G', QUERY_TIMEOUT = 60, IDLE_TIMEOUT = 3600; CREATE CONSUMER GROUP "ADMIN" WITH PRECEDENCE = '1000', CPU_WEIGHT = '900'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_consumer_group.yml000066400000000000000000000070741451700765000264100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d861350ac870b76d40cd5f810b294da273c5218402d8210df4b1bd3b7b9fd416 file: - statement: create_consumer_group_statement: - keyword: CREATE - keyword: CONSUMER - keyword: GROUP - naked_identifier: CEO - keyword: WITH - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1000'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'900'" - statement_terminator: ; - statement: create_consumer_group_statement: - keyword: CREATE - keyword: CONSUMER - keyword: GROUP - naked_identifier: BI_TEAM - keyword: WITH - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'900'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'500'" - comma: ',' - consumer_group_parameter: keyword: GROUP_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'200G'" - comma: ',' - consumer_group_parameter: keyword: USER_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'100G'" - statement_terminator: ; - statement: create_consumer_group_statement: - keyword: CREATE - keyword: CONSUMER - keyword: GROUP - naked_identifier: TEST_TEAM - keyword: WITH - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'800'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'100'" - comma: ',' - consumer_group_parameter: keyword: GROUP_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'10G'" - comma: ',' - consumer_group_parameter: keyword: SESSION_TEMP_DB_RAM_LIMIT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'5G'" - comma: ',' - consumer_group_parameter: keyword: QUERY_TIMEOUT comparison_operator: raw_comparison_operator: '=' numeric_literal: '60' - comma: ',' - consumer_group_parameter: keyword: IDLE_TIMEOUT comparison_operator: raw_comparison_operator: '=' numeric_literal: '3600' - statement_terminator: ; - statement: create_consumer_group_statement: - keyword: CREATE - keyword: CONSUMER - keyword: GROUP - quoted_identifier: '"ADMIN"' - keyword: WITH - consumer_group_parameter: keyword: PRECEDENCE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1000'" - comma: ',' - consumer_group_parameter: keyword: CPU_WEIGHT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'900'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_function_statement.sql000066400000000000000000000040551451700765000272440ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION percentage ( fraction DECIMAL, entirety DECIMAL) RETURN VARCHAR(10) AS res DECIMAL; BEGIN res := (100*fraction)/entirety; RETURN res || ' %'; END percentage; / ---- CREATE FUNCTION hello () RETURN VARCHAR(10) AS res DECIMAL; BEGIN res := hello.world("no"); RETURN 'HELLO'; END hello; / ---- CREATE FUNCTION case_function () RETURN VARCHAR(10) AS res DECIMAL; BEGIN res := CASE WHEN input_variable < 0 THEN 0 ELSE input_variable END; RETURN res; END case_function; / ---- CREATE FUNCTION assignment_function () RETURN VARCHAR(10) AS res DECIMAL; BEGIN res := 'Hello World'; RETURN res; END assignment_function; / ---- CREATE FUNCTION if_function () RETURN VARCHAR(10) AS res DECIMAL; BEGIN IF input_variable = 0 THEN res := NULL; ELSEIF input_variable = 1 THEN res := 'HELLO'; ELSEIF input_variable = 2 THEN res := 'HALLO'; ELSE res := input_variable; END IF; RETURN res; END if_function; / ---- CREATE FUNCTION for_loop_function () RETURN VARCHAR(10) AS res DECIMAL; BEGIN FOR cnt := 1 TO input_variable DO res := res*2; END FOR; RETURN res; END for_loop_function; / ---- CREATE FUNCTION for_loop_function2 () RETURN VARCHAR(10) AS res DECIMAL; BEGIN FOR cnt IN 1..10 LOOP res := res*2; END LOOP; RETURN res; END for_loop_function2; / ---- CREATE FUNCTION for_loop_function3 () RETURN VARCHAR(10) AS res DECIMAL; BEGIN WHILE cnt <= input_variable DO res := res*2; cnt := cnt+1; END WHILE; RETURN res; END for_loop_function3; / CREATE FUNCTION schem.func ( p1 VARCHAR(6), p2 VARCHAR(10) ) RETURN VARCHAR (20) IS res VARCHAR(20); BEGIN IF p1 IS NOT NULL AND p2 IS NOT NULL THEN IF p1 = 1 THEN res:= 'Hello World'; ELSE IF p2 = 3 THEN res:= 'ABC'; END IF; res:= 'WOHOOOO'; END IF; END IF; RETURN res; END schem.func; / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_function_statement.yml000066400000000000000000000416631451700765000272540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f33f127aa0ec656ffa63587a2620ba4bdcbca0870d610d867b8b656a567b6784 file: - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_reference: naked_identifier: percentage - bracketed: - start_bracket: ( - naked_identifier: fraction - data_type: keyword: DECIMAL - comma: ',' - naked_identifier: entirety - data_type: keyword: DECIMAL - end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_assignment: variable: res assignment_operator: := expression: bracketed: start_bracket: ( expression: numeric_literal: '100' binary_operator: '*' column_reference: naked_identifier: fraction end_bracket: ) binary_operator: / column_reference: naked_identifier: entirety statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res binary_operator: - pipe: '|' - pipe: '|' quoted_literal: "' %'" - statement_terminator: ; - keyword: END - function_reference: naked_identifier: percentage - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: hello - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_assignment: variable: res assignment_operator: := function: function_name: naked_identifier: hello dot: . function_name_identifier: world bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '"no"' end_bracket: ) statement_terminator: ; - keyword: RETURN - expression: quoted_literal: "'HELLO'" - statement_terminator: ; - keyword: END - function_reference: naked_identifier: hello - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: case_function - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_assignment: variable: res assignment_operator: := expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: input_variable comparison_operator: raw_comparison_operator: < numeric_literal: '0' - keyword: THEN - expression: numeric_literal: '0' - else_clause: keyword: ELSE expression: column_reference: naked_identifier: input_variable - keyword: END statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: case_function - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: assignment_function - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'Hello World'" statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: assignment_function - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: if_function - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_if_branch: - keyword: IF - expression: column_reference: naked_identifier: input_variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - keyword: THEN - function_body: function_assignment: - variable: res - assignment_operator: := - variable: 'NULL' - statement_terminator: ; - keyword: ELSEIF - expression: column_reference: naked_identifier: input_variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'HELLO'" statement_terminator: ; - keyword: ELSEIF - expression: column_reference: naked_identifier: input_variable comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - keyword: THEN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'HALLO'" statement_terminator: ; - keyword: ELSE - function_body: function_assignment: - variable: res - assignment_operator: := - variable: input_variable - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: if_function - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: for_loop_function - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_for_loop: - keyword: FOR - naked_identifier: cnt - assignment_operator: := - expression: numeric_literal: '1' - keyword: TO - expression: column_reference: naked_identifier: input_variable - keyword: DO - function_body: function_assignment: variable: res assignment_operator: := expression: column_reference: naked_identifier: res binary_operator: '*' numeric_literal: '2' statement_terminator: ; - keyword: END - keyword: FOR - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: for_loop_function - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: for_loop_function2 - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_for_loop: - keyword: FOR - naked_identifier: cnt - keyword: IN - expression: numeric_literal: '1' - range_operator: .. - expression: numeric_literal: '10' - keyword: LOOP - function_body: function_assignment: variable: res assignment_operator: := expression: column_reference: naked_identifier: res binary_operator: '*' numeric_literal: '2' statement_terminator: ; - keyword: END - keyword: LOOP - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: for_loop_function2 - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: naked_identifier: for_loop_function3 - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - keyword: AS - variable: res - data_type: keyword: DECIMAL - statement_terminator: ; - keyword: BEGIN - function_body: function_while_loop: - keyword: WHILE - expression: - column_reference: naked_identifier: cnt - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: input_variable - keyword: DO - function_body: function_assignment: variable: res assignment_operator: := expression: column_reference: naked_identifier: res binary_operator: '*' numeric_literal: '2' statement_terminator: ; - function_body: function_assignment: variable: cnt assignment_operator: := expression: column_reference: naked_identifier: cnt binary_operator: + numeric_literal: '1' statement_terminator: ; - keyword: END - keyword: WHILE - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: naked_identifier: for_loop_function3 - statement_terminator: ; - function_script_terminator: / - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_reference: - naked_identifier: schem - dot: . - naked_identifier: func - bracketed: - start_bracket: ( - naked_identifier: p1 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - comma: ',' - naked_identifier: p2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - end_bracket: ) - keyword: RETURN - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - keyword: IS - variable: res - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - statement_terminator: ; - keyword: BEGIN - function_body: function_if_branch: - keyword: IF - expression: - column_reference: naked_identifier: p1 - keyword: IS - keyword: NOT - keyword: 'NULL' - binary_operator: AND - column_reference: naked_identifier: p2 - keyword: IS - keyword: NOT - keyword: 'NULL' - keyword: THEN - function_body: function_if_branch: - keyword: IF - expression: column_reference: naked_identifier: p1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: THEN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'Hello World'" statement_terminator: ; - keyword: ELSE - function_body: function_if_branch: - keyword: IF - expression: column_reference: naked_identifier: p2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - keyword: THEN - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'ABC'" statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - function_body: function_assignment: variable: res assignment_operator: := quoted_literal: "'WOHOOOO'" statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: END - keyword: IF - statement_terminator: ; - keyword: RETURN - expression: column_reference: naked_identifier: res - statement_terminator: ; - keyword: END - function_reference: - naked_identifier: schem - dot: . - naked_identifier: func - statement_terminator: ; - function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_lua_script_bracket.sql000066400000000000000000000003451451700765000271710ustar00rootroot00000000000000CREATE OR REPLACE LUA SCRIPT BRACKET.SCRIPT_EXAMPLE RETURNS ROWCOUNT AS local _stmt = [[SOME ASSIGNMENT WITH OPEN BRACKET ( ]] x = 1 local _stmt = _stmt .. [[ ) ]] local _nsted = [=[one ([[two]] one]=] return 1 / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_lua_script_bracket.yml000066400000000000000000000024051451700765000271720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 63bc4f1ece0f16208c2155dffd001eb6fc1a592a49e5655d7fdad60cb8a23ec6 file: statement: create_scripting_lua_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: SCRIPT - script_reference: - naked_identifier: BRACKET - dot: . - naked_identifier: SCRIPT_EXAMPLE - keyword: RETURNS - keyword: ROWCOUNT - keyword: AS - script_content: - word: local - word: _stmt - equals: '=' - lua_multiline_quotes: '[[SOME ASSIGNMENT WITH OPEN BRACKET ( ]]' - word: x - equals: '=' - numeric_literal: '1' - word: local - word: _stmt - equals: '=' - word: _stmt - range_operator: .. - lua_multiline_quotes: '[[ ) ]]' - word: local - word: _nsted - equals: '=' - lua_nested_quotes: '[=[one ([[two]] one]=]' - word: return - numeric_literal: '1' function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_python_scalar_script.sql000066400000000000000000000010341451700765000275570ustar00rootroot00000000000000CREATE OR REPLACE PYTHON3 SCALAR SCRIPT MYSCHEMA.MYPYTHONSCRIPT( JSON_STR VARCHAR(2000000), LANGUAGE_KEY VARCHAR(50), TXT_KEY VARCHAR(50) ) EMITS ( X VARCHAR(2000000) ) AS """ /*==================================================================== e.g.: SELECT MYSCHEMA.MYPYTHONSCRIPT( '[{"@lang":"de-DE","$":"Krztxt"}, {"@lang":"en-GB","$":"Shrttxt"}]', '@lang', '$' ); ====================================================================*/ """ def run (ctx): pass / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_python_scalar_script.yml000066400000000000000000000060741451700765000275720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7039839de22522353c84a619e578e24afe3df5546c65e7058a3f8fd4ee1affd9 file: statement: create_udf_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - naked_identifier: PYTHON3 - keyword: SCALAR - keyword: SCRIPT - script_reference: - naked_identifier: MYSCHEMA - dot: . - naked_identifier: MYPYTHONSCRIPT - bracketed: - start_bracket: ( - column_datatype_definition: naked_identifier: JSON_STR data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000000' end_bracket: ) - comma: ',' - column_datatype_definition: naked_identifier: LANGUAGE_KEY data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_datatype_definition: naked_identifier: TXT_KEY data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - end_bracket: ) - emits_segment: keyword: EMITS bracketed: start_bracket: ( column_datatype_definition: naked_identifier: X data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000000' end_bracket: ) end_bracket: ) - keyword: AS - script_content: - double_quote: "\"\"\"\n/*====================================================================\n\ \ e.g.:\n SELECT MYSCHEMA.MYPYTHONSCRIPT(\n '[{\"" - at_sign_literal: '@lang' - double_quote: '":"' - word: de - minus: '-' - word: DE - double_quote: '","' - dollar_literal: $ - double_quote: '":"' - word: Krztxt - double_quote: '"}, {"' - at_sign_literal: '@lang' - double_quote: '":"' - word: en - minus: '-' - word: GB - double_quote: '","' - dollar_literal: $ - double_quote: '":"' - word: Shrttxt - double_quote: "\"}]',\n '@lang',\n '$'\n );\n\ \ ====================================================================*/\n\ \"\"\"" - word: def - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - colon: ':' - word: pass function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_role.sql000066400000000000000000000001051451700765000242640ustar00rootroot00000000000000CREATE ROLE test_role; CREATE ROLE "test_role"; CREATE ROLE [admin]; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_role.yml000066400000000000000000000015731451700765000243000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bd7ffab0539bafd2abf5b7da5f9aaba82cf5c5bcb4056b0facfa1f3518cb1861 file: - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: test_role - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: quoted_identifier: '"test_role"' - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: identifier: '[admin]' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_schema.sql000066400000000000000000000000621451700765000245650ustar00rootroot00000000000000CREATE SCHEMA s1; CREATE SCHEMA IF NOT EXISTS s2; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_schema.yml000066400000000000000000000014171451700765000245740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 39c90362dd81f364e7ea8dead40e4456c183e67d257b4a15f6cf83378ce117cb file: - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_scripting_lua_script_statement1.sql000066400000000000000000000002321451700765000317200ustar00rootroot00000000000000CREATE OR REPLACE LUA SCRIPT aschema.hello AS return 'HELLO' / -- and a second one CREATE OR REPLACE LUA SCRIPT aschema.world AS return 'WORLD' / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_scripting_lua_script_statement1.yml000066400000000000000000000022021451700765000317210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 08fde850eb3b6248d17fb899e859f6035fdaa61f73534686cb83339536b5ac3d file: - statement: create_scripting_lua_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: SCRIPT - script_reference: - naked_identifier: aschema - dot: . - naked_identifier: hello - keyword: AS - script_content: word: return single_quote: "'HELLO'" - function_script_terminator: / - statement: create_scripting_lua_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: SCRIPT - script_reference: - naked_identifier: aschema - dot: . - naked_identifier: world - keyword: AS - script_content: word: return single_quote: "'WORLD'" - function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_scripting_lua_script_statement2.sql000066400000000000000000000001051451700765000317200ustar00rootroot00000000000000CREATE OR REPLACE LUA SCRIPT aschema.hello() AS return 'HELLO' / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_scripting_lua_script_statement2.yml000066400000000000000000000015071451700765000317310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f49ff2d1bba51bc997e3ab0293cdef908a67c19ef90a886d4d94109e76ae9180 file: statement: create_scripting_lua_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: LUA - keyword: SCRIPT - script_reference: - naked_identifier: aschema - dot: . - naked_identifier: hello - bracketed: start_bracket: ( end_bracket: ) - keyword: AS - script_content: word: return single_quote: "'HELLO'" function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_scripting_lua_script_statement3.sql000066400000000000000000000003751451700765000317320ustar00rootroot00000000000000CREATE SCRIPT insert_low_high (param1, param2, param3) AS import('function_lib') -- accessing external function lowest, highest = function_lib.min_max(param1, param2, param3) query([[INSERT INTO t VALUES (:x, :y)]], {x=lowest, y=highest}) / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_scripting_lua_script_statement3.yml000066400000000000000000000032211451700765000317250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2dd5ac7bc28181687913476d87e9f155da9a3f082c0386ef1341526f964a5d52 file: statement: create_scripting_lua_script: - keyword: CREATE - keyword: SCRIPT - script_reference: naked_identifier: insert_low_high - bracketed: - start_bracket: ( - naked_identifier: param1 - comma: ',' - naked_identifier: param2 - comma: ',' - naked_identifier: param3 - end_bracket: ) - keyword: AS - script_content: - word: import - bracketed: start_bracket: ( single_quote: "'function_lib'" end_bracket: ) - word: lowest - comma: ',' - word: highest - equals: '=' - word: function_lib - dot: . - word: min_max - bracketed: - start_bracket: ( - word: param1 - comma: ',' - word: param2 - comma: ',' - word: param3 - end_bracket: ) - word: query - bracketed: - start_bracket: ( - lua_multiline_quotes: '[[INSERT INTO t VALUES (:x, :y)]]' - comma: ',' - start_curly_bracket: '{' - word: x - equals: '=' - word: lowest - comma: ',' - word: y - equals: '=' - word: highest - end_curly_bracket: '}' - end_bracket: ) function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_table_statement.sql000066400000000000000000000034051451700765000265040ustar00rootroot00000000000000CREATE TABLE myschema.t1 ( a VARCHAR(20) UTF8, b DECIMAL(24,4) NOT NULL COMMENT IS 'The B column', c DECIMAL DEFAULT 122, d DOUBLE, e TIMESTAMP DEFAULT CURRENT_TIMESTAMP, f BOOL); ---- CREATE TABLE "MYSCHEMA"."T2" AS (SELECT * FROM t1) WITH NO DATA; ---- CREATE OR REPLACE TABLE "MYSCHEMA".T2 AS SELECT a,b,c+1 AS c FROM t1; ---- CREATE TABLE t3 AS (SELECT count(*) AS my_count FROM t1) WITH NO DATA; ---- CREATE TABLE t4 LIKE t1; ---- CREATE TABLE t5 ( id int IDENTITY PRIMARY KEY DISABLE, LIKE t1 INCLUDING DEFAULTS, g DOUBLE, DISTRIBUTE BY a,b ); ---- CREATE TABLE t6 ( order_id INT, sales_id INT, order_price DOUBLE, order_date DATE, country VARCHAR(40), CONSTRAINT t6_pk PRIMARY KEY (order_id,sales_id), DISTRIBUTE BY order_id, PARTITION BY order_date) COMMENT IS 'a great table'; ---- CREATE OR REPLACE TABLE t8 (ref_id int CONSTRAINT FK_T5 REFERENCES t5 (id) DISABLE, b VARCHAR(20)); ---- CREATE TABLE IF NOT EXISTS SCHEM.TAB ( ID DECIMAL(18, 0) IDENTITY CONSTRAINT PRIMARY KEY DISABLE COMMENT IS 'without constraint name' ) COMMENT IS 'a nice table'; ---- CREATE TABLE SCHEM.TAB ( ID DECIMAL(18, 0), C1 CHAR(1), CONSTRAINT PRIMARY KEY (id) ); ---- CREATE TABLE SCHEM.TAB ( ID DECIMAL(18, 0), C1 CHAR(1), CONSTRAINT "ADMIN" PRIMARY KEY (id) ); ---- CREATE TABLE SCHEM.TAB ( C1 CHAR(1) CONSTRAINT "ADMIN" PRIMARY KEY ); ---- CREATE TABLE T AS SELECT * FROM A COMMENT IS 'BLABLA'; ---- CREATE TABLE "MYSCHEMA"."T2" AS SELECT * FROM t1 WITH NO DATA; ---- CREATE TABLE "MYSCHEMA"."T2" AS SELECT * FROM t1 WITH NO DATA COMMENT IS 'ABC'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_table_statement.yml000066400000000000000000000441131451700765000265070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 899769991a0703ecd90c18a7162dc7f624521b1cebf46d87f2d18368c7461560 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: a data_type: - keyword: VARCHAR - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - keyword: UTF8 - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: b data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '24' - comma: ',' - numeric_literal: '4' - end_bracket: ) column_constraint_segment: table_constraint_definition: - keyword: NOT - keyword: 'NULL' comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'The B column'" - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: c data_type: keyword: DECIMAL column_constraint_segment: keyword: DEFAULT numeric_literal: '122' - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: d data_type: keyword: DOUBLE - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: e data_type: keyword: TIMESTAMP column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: f data_type: keyword: BOOL - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '"MYSCHEMA"' - dot: . - quoted_identifier: '"T2"' - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: quoted_identifier: '"MYSCHEMA"' dot: . naked_identifier: T2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: c binary_operator: + numeric_literal: '1' alias_expression: keyword: AS naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t3 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: my_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t4 - table_like_clause: keyword: LIKE table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t5 - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: id data_type: keyword: int column_constraint_segment: keyword: IDENTITY table_constraint_definition: - keyword: PRIMARY - keyword: KEY - keyword: DISABLE - comma: ',' - table_content_definition: table_like_clause: - keyword: LIKE - table_reference: naked_identifier: t1 - keyword: INCLUDING - keyword: DEFAULTS - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: g data_type: keyword: DOUBLE - comma: ',' - table_distribution_partition_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t6 - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: order_id data_type: keyword: INT - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: sales_id data_type: keyword: INT - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: order_price data_type: keyword: DOUBLE - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: order_date data_type: keyword: DATE - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: country data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_content_definition: table_constraint_definition: - keyword: CONSTRAINT - naked_identifier: t6_pk - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: order_id - comma: ',' - column_reference: naked_identifier: sales_id - end_bracket: ) - comma: ',' - table_distribution_partition_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: order_id - comma: ',' - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: order_date - end_bracket: ) - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'a great table'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: naked_identifier: t8 - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: ref_id data_type: keyword: int column_constraint_segment: table_constraint_definition: - keyword: CONSTRAINT - naked_identifier: FK_T5 - keyword: REFERENCES - table_reference: naked_identifier: t5 - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: DISABLE - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: b data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: SCHEM - dot: . - naked_identifier: TAB - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: ID data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '18' - comma: ',' - numeric_literal: '0' - end_bracket: ) column_constraint_segment: keyword: IDENTITY table_constraint_definition: - keyword: CONSTRAINT - keyword: PRIMARY - keyword: KEY - keyword: DISABLE comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'without constraint name'" end_bracket: ) - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'a nice table'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: SCHEM - dot: . - naked_identifier: TAB - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: ID data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '18' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: C1 data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - table_content_definition: table_constraint_definition: - keyword: CONSTRAINT - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: SCHEM - dot: . - naked_identifier: TAB - bracketed: - start_bracket: ( - table_content_definition: column_definition: column_datatype_definition: naked_identifier: ID data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '18' - comma: ',' - numeric_literal: '0' - end_bracket: ) - comma: ',' - table_content_definition: column_definition: column_datatype_definition: naked_identifier: C1 data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - comma: ',' - table_content_definition: table_constraint_definition: - keyword: CONSTRAINT - quoted_identifier: '"ADMIN"' - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: SCHEM - dot: . - naked_identifier: TAB - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: C1 data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: table_constraint_definition: - keyword: CONSTRAINT - quoted_identifier: '"ADMIN"' - keyword: PRIMARY - keyword: KEY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: T - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'BLABLA'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '"MYSCHEMA"' - dot: . - quoted_identifier: '"T2"' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '"MYSCHEMA"' - dot: . - quoted_identifier: '"T2"' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'ABC'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_dot_syntax.sql000066400000000000000000000001461451700765000276070ustar00rootroot00000000000000CREATE PYTHON SCALAR SCRIPT sample_simple (...) EMITS (...) AS def run(ctx): ctx.emit(True, False) / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_dot_syntax.yml000066400000000000000000000023111451700765000276050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d331951eb3a334aad835fd5a4bc37ce40faf6927445dc9e90b48b67ad452fe9 file: statement: create_udf_script: - keyword: CREATE - keyword: PYTHON - keyword: SCALAR - keyword: SCRIPT - script_reference: naked_identifier: sample_simple - bracketed: start_bracket: ( identifier: '...' end_bracket: ) - emits_segment: keyword: EMITS bracketed: start_bracket: ( identifier: '...' end_bracket: ) - keyword: AS - script_content: - word: def - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - colon: ':' - word: ctx - dot: . - word: emit - bracketed: - start_bracket: ( - word: 'True' - comma: ',' - word: 'False' - end_bracket: ) function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement1.sql000066400000000000000000000010331451700765000274740ustar00rootroot00000000000000CREATE LUA SCALAR SCRIPT my_average (a DOUBLE, b DOUBLE ORDER BY 1 desc) RETURNS DOUBLE AS function run(ctx) if ctx.a == nil or ctx.b==nil then return NULL end return (ctx.a+ctx.b)/2 end / CREATE LUA SCALAR SCRIPT my_average (a DOUBLE, b DOUBLE ORDER BY 1 desc) RETURNS DOUBLE AS function run(ctx) if ctx.a == nil or ctx.b==nil then return NULL end x = 10 / 2 return (ctx.a+ctx.b) / 2 end / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement1.yml000066400000000000000000000066671451700765000275200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0035382ef47bde6ebf1104b4b6b199d904ed536514329a25e2355d4a48b84cfb file: - statement: create_udf_script: - keyword: CREATE - keyword: LUA - keyword: SCALAR - keyword: SCRIPT - script_reference: naked_identifier: my_average - bracketed: - start_bracket: ( - column_datatype_definition: naked_identifier: a data_type: keyword: DOUBLE - comma: ',' - column_datatype_definition: naked_identifier: b data_type: keyword: DOUBLE - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - keyword: desc - end_bracket: ) - keyword: RETURNS - data_type: keyword: DOUBLE - keyword: AS - script_content: - word: function - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - word: if - word: ctx - dot: . - word: a - equals: '=' - equals: '=' - word: nil - word: or - word: ctx - dot: . - word: b - equals: '=' - equals: '=' - word: nil - word: then - word: return - word: 'NULL' - word: end - word: return - bracketed: - start_bracket: ( - word: ctx - dot: . - word: a - plus: + - word: ctx - dot: . - word: b - end_bracket: ) - divide: / - numeric_literal: '2' - word: end - function_script_terminator: / - statement: create_udf_script: - keyword: CREATE - keyword: LUA - keyword: SCALAR - keyword: SCRIPT - script_reference: naked_identifier: my_average - bracketed: - start_bracket: ( - column_datatype_definition: naked_identifier: a data_type: keyword: DOUBLE - comma: ',' - column_datatype_definition: naked_identifier: b data_type: keyword: DOUBLE - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - keyword: desc - end_bracket: ) - keyword: RETURNS - data_type: keyword: DOUBLE - keyword: AS - script_content: - word: function - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - word: if - word: ctx - dot: . - word: a - equals: '=' - equals: '=' - word: nil - word: or - word: ctx - dot: . - word: b - equals: '=' - equals: '=' - word: nil - word: then - word: return - word: 'NULL' - word: end - word: x - equals: '=' - numeric_literal: '10' - divide: / - numeric_literal: '2' - word: return - bracketed: - start_bracket: ( - word: ctx - dot: . - word: a - plus: + - word: ctx - dot: . - word: b - end_bracket: ) - divide: / - numeric_literal: '2' - word: end - function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement2.sql000066400000000000000000000004171451700765000275020ustar00rootroot00000000000000CREATE LUA SCALAR SCRIPT map_words(w varchar(10000)) EMITS (words varchar(100)) AS function run(ctx) local word = ctx.w if (word ~= null) then for i in unicode.utf8.gmatch(word,'([%w%p]+)') do ctx.emit(i) end end end / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement2.yml000066400000000000000000000044611451700765000275070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 04bc713c05ce7ccbe9970017917dd1ee7b939d02e56b50a38eb6f06722952751 file: statement: create_udf_script: - keyword: CREATE - keyword: LUA - keyword: SCALAR - keyword: SCRIPT - script_reference: naked_identifier: map_words - bracketed: start_bracket: ( column_datatype_definition: naked_identifier: w data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10000' end_bracket: ) end_bracket: ) - emits_segment: keyword: EMITS bracketed: start_bracket: ( column_datatype_definition: naked_identifier: words data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) end_bracket: ) - keyword: AS - script_content: - word: function - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - word: local - word: word - equals: '=' - word: ctx - dot: . - word: w - word: if - bracketed: - start_bracket: ( - word: word - like_operator: '~' - equals: '=' - word: 'null' - end_bracket: ) - word: then - word: for - word: i - word: in - word: unicode - dot: . - word: utf8 - dot: . - word: gmatch - bracketed: start_bracket: ( word: word comma: ',' single_quote: "'([%w%p]+)'" end_bracket: ) - word: do - word: ctx - dot: . - word: emit - bracketed: start_bracket: ( word: i end_bracket: ) - word: end - word: end - word: end function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement3.sql000066400000000000000000000001711451700765000275000ustar00rootroot00000000000000CREATE OR REPLACE PYTHON3 SCALAR SCRIPT LIB.MYLIB() RETURNS INT AS def helloWorld(): return "Hello Python3 World!" / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement3.yml000066400000000000000000000020551451700765000275050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 30a734ddc52c073e74df7b0740c178aa2be3a7e4a512ee8fa5d1362bab60984b file: statement: create_udf_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - naked_identifier: PYTHON3 - keyword: SCALAR - keyword: SCRIPT - script_reference: - naked_identifier: LIB - dot: . - naked_identifier: MYLIB - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: INT - keyword: AS - script_content: - word: def - word: helloWorld - bracketed: start_bracket: ( end_bracket: ) - colon: ':' - word: return - double_quote: '"Hello Python3 World!"' function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement4.sql000066400000000000000000000002411451700765000274770ustar00rootroot00000000000000CREATE OR REPLACE PYTHON SCALAR SCRIPT TEST.MYHELLOWORLD() RETURNS VARCHAR(2000) AS l = exa.import_script('LIB.MYLIB') def run(ctx): return l.helloWorld() / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement4.yml000066400000000000000000000027501451700765000275100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3720cc2b0cd196b78b1b491299c5b6a662668b7ddb44461b09f7aaea7133957a file: statement: create_udf_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PYTHON - keyword: SCALAR - keyword: SCRIPT - script_reference: - naked_identifier: TEST - dot: . - naked_identifier: MYHELLOWORLD - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) - keyword: AS - script_content: - word: l - equals: '=' - word: exa - dot: . - word: import_script - bracketed: start_bracket: ( single_quote: "'LIB.MYLIB'" end_bracket: ) - word: def - word: run - bracketed: start_bracket: ( word: ctx end_bracket: ) - colon: ':' - word: return - word: l - dot: . - word: helloWorld - bracketed: start_bracket: ( end_bracket: ) function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement5.sql000066400000000000000000000002521451700765000275020ustar00rootroot00000000000000CREATE OR REPLACE JAVA SCALAR SCRIPT LIB.MYLIB() RETURNS VARCHAR(2000) AS class MYLIB { static String helloWorld(){ return "Hello Java World!"; } } / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_udfscript_statement5.yml000066400000000000000000000025621451700765000275120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eda93f93740e93130882f680bf0e81885097e099232b612ead8f976e26c15e7c file: statement: create_udf_script: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: JAVA - keyword: SCALAR - keyword: SCRIPT - script_reference: - naked_identifier: LIB - dot: . - naked_identifier: MYLIB - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) - keyword: AS - script_content: - word: class - word: MYLIB - start_curly_bracket: '{' - word: static - word: String - word: helloWorld - bracketed: start_bracket: ( end_bracket: ) - start_curly_bracket: '{' - word: return - double_quote: '"Hello Java World!"' - semicolon: ; - end_curly_bracket: '}' - end_curly_bracket: '}' function_script_terminator: / sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_user.sql000066400000000000000000000004401451700765000243030ustar00rootroot00000000000000CREATE USER user_1 IDENTIFIED BY "h12_xhz"; CREATE USER user_2 IDENTIFIED AT LDAP AS 'cn=user_2,dc=authorization,dc=exasol,dc=com'; CREATE USER user_3 IDENTIFIED BY KERBEROS PRINCIPAL '@'; CREATE USER oidctestuser IDENTIFIED BY OPENID SUBJECT 'database-user@exasol.example'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_user.yml000066400000000000000000000032041451700765000243060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e4a7904a8b469d0738ce520cc4d8bfda8ae032d25e5d9c46f3fd630217cf68dd file: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user_1 - keyword: IDENTIFIED - password_auth: keyword: BY password_literal: '"h12_xhz"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user_2 - keyword: IDENTIFIED - ldap_auth: - keyword: AT - keyword: LDAP - keyword: AS - quoted_literal: "'cn=user_2,dc=authorization,dc=exasol,dc=com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user_3 - keyword: IDENTIFIED - kerberos_auth: - keyword: BY - keyword: KERBEROS - keyword: PRINCIPAL - quoted_literal: "'@'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: oidctestuser - keyword: IDENTIFIED - openid_auth: - keyword: BY - keyword: OPENID - keyword: SUBJECT - quoted_literal: "'database-user@exasol.example'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_view_statement.sql000066400000000000000000000007331451700765000263700ustar00rootroot00000000000000CREATE VIEW my_view as (select x from t) COMMENT IS 'nice view'; CREATE VIEW my_view (col1 ) as (select x from t); CREATE OR REPLACE FORCE VIEW my_view as select y from t; CREATE OR REPLACE VIEW my_view (col_1 COMMENT IS 'something important',col2) as select max(y) from t; CREATE VIEW schem.few (col1 ) /* some view header */ as (select x from t); CREATE VIEW schem.few (col1 ) --- single line as (select x from t); --- CREATE VIEW T AS SELECT * FROM A COMMENT IS 'BLABLA'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_view_statement.yml000066400000000000000000000136421451700765000263750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 651b70f72362f42ca96206b4b51f7f42bbbbed4d17d670b24b6fef8ec7a7e0b4 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: naked_identifier: my_view - keyword: as - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'nice view'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: naked_identifier: my_view - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: as - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FORCE - keyword: VIEW - view_reference: naked_identifier: my_view - keyword: as - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: y from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - view_reference: naked_identifier: my_view - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'something important'" - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: y end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: - naked_identifier: schem - dot: . - naked_identifier: few - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: as - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: - naked_identifier: schem - dot: . - naked_identifier: few - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: as - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: x from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - view_reference: naked_identifier: T - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A - comment_clause: - keyword: COMMENT - keyword: IS - quoted_literal: "'BLABLA'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_virtual_schema_statement.sql000066400000000000000000000003721451700765000304230ustar00rootroot00000000000000CREATE VIRTUAL SCHEMA hive USING adapter.jdbc_adapter WITH SQL_DIALECT = 'HIVE' CONNECTION_STRING = 'jdbc:hive2://localhost:10000/default' SCHEMA_NAME = 'default' USERNAME = 'hive-usr' PASSWORD = 'hive-pwd'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/create_virtual_schema_statement.yml000066400000000000000000000026001451700765000304210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c9015c1cd3d7019aa2a1a344618fb599ad50b0da8060990046598c927fb8350c file: statement: create_virtual_schema_statement: - keyword: CREATE - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: hive - keyword: USING - object_reference: - naked_identifier: adapter - dot: . - naked_identifier: jdbc_adapter - keyword: WITH - parameter: SQL_DIALECT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'HIVE'" - parameter: CONNECTION_STRING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'jdbc:hive2://localhost:10000/default'" - parameter: SCHEMA_NAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'default'" - parameter: USERNAME - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hive-usr'" - parameter: PASSWORD - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hive-pwd'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/data_type_test.sql000066400000000000000000000024441451700765000250210ustar00rootroot00000000000000CREATE TABLE t (c1 DECIMAL); CREATE TABLE t (c1 DECIMAL(10)); CREATE TABLE t (c1 DECIMAL(10,2)); CREATE TABLE t (c1 DEC(10,2)); CREATE TABLE t (c1 NUMERIC(10)); CREATE TABLE t (c1 NUMBER(10,2)); CREATE TABLE t (c1 BIGINT); CREATE TABLE t (c1 DOUBLE); CREATE TABLE t (c1 DOUBLE PRECISION); CREATE TABLE t (c1 FLOAT); CREATE TABLE t (c1 INT); CREATE TABLE t (c1 INTEGER); CREATE TABLE t (c1 REAL); CREATE TABLE t (c1 SHORTINT); CREATE TABLE t (c1 TINYINT); CREATE TABLE t (c1 SMALLINT); CREATE TABLE t (c1 BOOL); CREATE TABLE t (c1 BOOLEAN); CREATE TABLE t (c1 DATE); CREATE TABLE t (c1 TIMESTAMP); CREATE TABLE t (c1 TIMESTAMP WITH LOCAL TIME ZONE); CREATE TABLE t (c1 INTERVAL YEAR (1) TO MONTH); CREATE TABLE t (c1 INTERVAL DAY (2) TO SECOND(1)); CREATE TABLE t (c1 GEOMETRY(1000)); CREATE TABLE t (c1 HASHTYPE); CREATE TABLE t (c1 HASHTYPE(8 BYTE)); CREATE TABLE t (c1 HASHTYPE(8 BIT)); CREATE TABLE t (c1 CHAR(1)); CREATE TABLE t (c1 CHAR VARYING (1)); CREATE TABLE t (c1 VARCHAR(2000 CHAR)); CREATE TABLE t (c1 VARCHAR2(2000)); CREATE TABLE t (c1 VARCHAR(2000 BYTE)); CREATE TABLE t (c1 LONG VARCHAR); CREATE TABLE t (c1 CHARACTER LARGE OBJECT(1000)); CREATE TABLE t (c1 CHARACTER VARYING(1000)); CREATE TABLE t (c1 CLOB(2000)); CREATE TABLE t (c1 CLOB(2000) ASCII); CREATE TABLE t (c1 VARCHAR(2000 CHAR) UTF8); sqlfluff-2.3.5/test/fixtures/dialects/exasol/data_type_test.yml000066400000000000000000000512261451700765000250250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1145859a4e145cf9c986fec7cf06740385f18279926d2013bca49b17377c8f26 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DECIMAL end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DECIMAL bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DEC bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: NUMERIC bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: BIGINT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DOUBLE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: DOUBLE - keyword: PRECISION end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: FLOAT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: INT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: INTEGER end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: REAL end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: SHORTINT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: TINYINT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: SMALLINT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: BOOL end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: BOOLEAN end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: DATE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: TIMESTAMP - keyword: WITH - keyword: LOCAL - keyword: TIME - keyword: ZONE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: INTERVAL - keyword: YEAR - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - keyword: TO - keyword: MONTH end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: INTERVAL - keyword: DAY - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - keyword: TO - keyword: SECOND - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: GEOMETRY bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: HASHTYPE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: HASHTYPE bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '8' keyword: BYTE end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: HASHTYPE bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '8' keyword: BIT end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: CHAR - keyword: VARYING - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' keyword: CHAR end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' keyword: BYTE end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: LONG - keyword: VARCHAR end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: CHARACTER - keyword: LARGE - keyword: OBJECT - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: CHARACTER - keyword: VARYING - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: keyword: CLOB bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: CLOB - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) - keyword: ASCII end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( table_content_definition: column_definition: column_datatype_definition: naked_identifier: c1 data_type: - keyword: VARCHAR - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' keyword: CHAR end_bracket: ) - keyword: UTF8 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/delete_statement.sql000066400000000000000000000001771451700765000253370ustar00rootroot00000000000000DELETE FROM staff WHERE name='SMITH'; DELETE * FROM staff; DELETE FROM staff PREFERRING (LOW change_date) PARTITION BY emp_no; sqlfluff-2.3.5/test/fixtures/dialects/exasol/delete_statement.yml000066400000000000000000000030021451700765000253270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41636b79e762da07127b4dc7bf5bb01db2dcac3263cafcf5b33fbb96a03a8e0e file: - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: staff - where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'SMITH'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - star: '*' - keyword: FROM - table_reference: naked_identifier: staff - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: staff - preferring_clause: keyword: PREFERRING bracketed: start_bracket: ( preference_term: keyword: LOW column_reference: naked_identifier: change_date end_bracket: ) partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: emp_no - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_cascade_restrict_statement.sql000066400000000000000000000001111451700765000304070ustar00rootroot00000000000000DROP VIEW IF EXISTS my_view RESTRICT; DROP FUNCTION my_function CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_cascade_restrict_statement.yml000066400000000000000000000014641451700765000304250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0e330e5f9012ac663311a479e2de220a366ba3e3b494fc360efbdf734d33a5d file: - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - view_reference: naked_identifier: my_view - keyword: RESTRICT - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: my_function - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_cascade_statement.sql000066400000000000000000000001561451700765000265010ustar00rootroot00000000000000DROP USER test_user1;DROP USER test_user2 CASCADE; DROP ROLE myrole; DROP USER "ADMIN"; DROP ROLE "Important" sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_cascade_statement.yml000066400000000000000000000022661451700765000265070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d9fd30c72dc770f52417ead9cd818a71d9634e0c07cd5c668b91b5bce95d9c6c file: - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: naked_identifier: test_user1 - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: naked_identifier: test_user2 - keyword: CASCADE - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - role_reference: naked_identifier: myrole - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: quoted_identifier: '"ADMIN"' - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - role_reference: quoted_identifier: '"Important"' sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_connection_statement.sql000066400000000000000000000001411451700765000272470ustar00rootroot00000000000000DROP CONNECTION my_connection; DROP CONNECTION IF EXISTS my_connection; DROP CONNECTION "myCon"; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_connection_statement.yml000066400000000000000000000015751451700765000272650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be64518afe0906f729329eabb5c72bea9de42b01a1f0f30421213d9b1073ffee file: - statement: drop_connection_statement: - keyword: DROP - keyword: CONNECTION - naked_identifier: my_connection - statement_terminator: ; - statement: drop_connection_statement: - keyword: DROP - keyword: CONNECTION - keyword: IF - keyword: EXISTS - naked_identifier: my_connection - statement_terminator: ; - statement: drop_connection_statement: - keyword: DROP - keyword: CONNECTION - quoted_identifier: '"myCon"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_consumer_group.sql000066400000000000000000000000661451700765000261010ustar00rootroot00000000000000DROP CONSUMER GROUP CEO; DROP CONSUMER GROUP "ADMIN"; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_consumer_group.yml000066400000000000000000000013371451700765000261050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a883a865ea068fef2ebbb05389eee862981840f9dd1ff7c602154440c7f9988d file: - statement: drop_consumer_group_statement: - keyword: DROP - keyword: CONSUMER - keyword: GROUP - naked_identifier: CEO - statement_terminator: ; - statement: drop_consumer_group_statement: - keyword: DROP - keyword: CONSUMER - keyword: GROUP - quoted_identifier: '"ADMIN"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_schema_statement.sql000066400000000000000000000002041451700765000263500ustar00rootroot00000000000000DROP FORCE SCHEMA my_schema; DROP SCHEMA IF EXISTS my_schema; DROP SCHEMA my_schema CASCADE; DROP VIRTUAL SCHEMA my_virtual_schema; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_schema_statement.yml000066400000000000000000000022351451700765000263600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c929968024c9d11e1906f33bb69d97f8fdfde93c2de59a3ca4f4c3895234ac48 file: - statement: drop_schema_statement: - keyword: DROP - keyword: FORCE - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - keyword: CASCADE - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: VIRTUAL - keyword: SCHEMA - schema_reference: naked_identifier: my_virtual_schema - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_script_statement.sql000066400000000000000000000001171451700765000264170ustar00rootroot00000000000000DROP SCRIPT my_script; DROP ADAPTER SCRIPT IF EXISTS my_schema.ADAPTER_SCRIPT; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_script_statement.yml000066400000000000000000000015211451700765000264210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc2412a6f4b64cadb5905abeb2c69cfdd39803647f0ed2f75c2a2bc4957c3bb3 file: - statement: drop_script_statement: - keyword: DROP - keyword: SCRIPT - script_reference: naked_identifier: my_script - statement_terminator: ; - statement: drop_script_statement: - keyword: DROP - keyword: ADAPTER - keyword: SCRIPT - keyword: IF - keyword: EXISTS - script_reference: - naked_identifier: my_schema - dot: . - naked_identifier: ADAPTER_SCRIPT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_table_statement.sql000066400000000000000000000001361451700765000262030ustar00rootroot00000000000000DROP TABLE my_table; DROP TABLE IF EXISTS "MY_SCHEMA"."MY_TABLE" CASCADE CASCADE CONSTRAINTS; sqlfluff-2.3.5/test/fixtures/dialects/exasol/drop_table_statement.yml000066400000000000000000000016001451700765000262020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 822913cd2e24616f0fced92ef4c7e9950759d8c638e53e6c556f8afb4362cc6d file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - quoted_identifier: '"MY_SCHEMA"' - dot: . - quoted_identifier: '"MY_TABLE"' - keyword: CASCADE - keyword: CASCADE - keyword: CONSTRAINTS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/execute_script.sql000066400000000000000000000002271451700765000250330ustar00rootroot00000000000000EXECUTE SCRIPT script_1; EXECUTE SCRIPT script_1 WITH OUTPUT; EXECUTE SCRIPT script_2 (1,3,'ABC') WITH OUTPUT; EXECUTE SCRIPT script_3 (ARRAY(3,4,5)); sqlfluff-2.3.5/test/fixtures/dialects/exasol/execute_script.yml000066400000000000000000000036111451700765000250350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e4ba845f29b7865fdff23fb31eb5570dee3f66d9bfaa6cb66eab375eb8bdccfb file: - statement: execute_script_statement: - keyword: EXECUTE - keyword: SCRIPT - script_reference: naked_identifier: script_1 - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - keyword: SCRIPT - script_reference: naked_identifier: script_1 - keyword: WITH - keyword: OUTPUT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - keyword: SCRIPT - script_reference: naked_identifier: script_2 - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'ABC'" - end_bracket: ) - keyword: WITH - keyword: OUTPUT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - keyword: SCRIPT - script_reference: naked_identifier: script_3 - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/explain_virtual.sql000066400000000000000000000003241451700765000252110ustar00rootroot00000000000000SELECT pushdown_id, pushdown_involved_tables, pushdown_sql FROM (EXPLAIN VIRTUAL SELECT * FROM vs_impala.sample_07 WHERE total_emp>10000); EXPLAIN VIRTUAL SELECT * FROM vs_impala.sample_07 WHERE total_emp>10000; sqlfluff-2.3.5/test/fixtures/dialects/exasol/explain_virtual.yml000066400000000000000000000060021451700765000252120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d88e69d20b6a87900b65adc3bb71901858ca2796d0deedf0bf8cff02b159bee file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: pushdown_id - comma: ',' - select_clause_element: column_reference: naked_identifier: pushdown_involved_tables - comma: ',' - select_clause_element: column_reference: naked_identifier: pushdown_sql from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: explain_virtual_statement: - keyword: EXPLAIN - keyword: VIRTUAL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: vs_impala - dot: . - naked_identifier: sample_07 where_clause: keyword: WHERE expression: column_reference: naked_identifier: total_emp comparison_operator: raw_comparison_operator: '>' numeric_literal: '10000' end_bracket: ) - statement_terminator: ; - statement: explain_virtual_statement: - keyword: EXPLAIN - keyword: VIRTUAL - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: vs_impala - dot: . - naked_identifier: sample_07 where_clause: keyword: WHERE expression: column_reference: naked_identifier: total_emp comparison_operator: raw_comparison_operator: '>' numeric_literal: '10000' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/export_statement.sql000066400000000000000000000025411451700765000254130ustar00rootroot00000000000000EXPORT tab1 INTO CSV AT 'ftp://192.168.1.1/' USER 'agent_007' IDENTIFIED BY 'secret' FILE 'tab1.csv' COLUMN SEPARATOR = ';' ENCODING = 'Latin1' WITH COLUMN NAMES; ---- EXPORT tab1 INTO CSV AT 'ftp://192.168.1.1/' USER 'agent_007' IDENTIFIED BY 'secret' FILE 'tab1.csv' ( 1 FORMAT='DD.MM.YYYY', 2..3 DELIMIT=NEVER ) COLUMN SEPARATOR = ';' ENCODING = 'Latin1' WITH COLUMN NAMES; ---- EXPORT (SELECT * FROM T WHERE id=3295) INTO FBV AT my_connection FILE 't1.fbv' FILE 't2.fbv' REPLACE; ---- EXPORT (SELECT * FROM my_view) INTO EXA AT '192.168.6.11..14:8563' USER 'my_user' IDENTIFIED BY 'my_secret' TABLE my_schema.my_table CREATED BY 'CREATE TABLE my_table(order_id INT, price DEC(18,2))'; ---- EXPORT tab1 INTO JDBC DRIVER='MSSQL' AT 'jdbc:sqlserver://dbserver;databaseName=testdb' USER 'agent_007' IDENTIFIED BY 'secret' TABLE my_schema.tab1; ---- EXPORT tab1 INTO CSV AT 'http://HadoopNode:50070/webhdfs/v1/tmp' FILE 'file.csv?op=CREATE&user.name=user'; ---- EXPORT tab1 INTO CSV AT 'https://testbucket.s3.amazonaws.com' USER '' IDENTIFIED BY '' FILE 'file.csv'; ---- EXPORT tab1 INTO SCRIPT etl.export_hcat_table WITH HCAT_DB = 'default' HCAT_TABLE = 'my_hcat_table' HCAT_ADDRESS = 'hcatalog-server:50111' HDFS_USER = 'hdfs'; ---- EXPORT tab1 INTO LOCAL CSV FILE '/tmp/my_table.csv' COLUMN SEPARATOR = ';'; ---- sqlfluff-2.3.5/test/fixtures/dialects/exasol/export_statement.yml000066400000000000000000000210031451700765000254070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff7361bc091cc9599a91f186e7b3cee7edee5f949a8cea05a25827bf36f91b73 file: - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'ftp://192.168.1.1/'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: FILE - quoted_literal: "'tab1.csv'" - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Latin1'" - keyword: WITH - keyword: COLUMN - keyword: NAMES - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'ftp://192.168.1.1/'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: FILE - quoted_literal: "'tab1.csv'" - csv_cols: bracketed: - start_bracket: ( - numeric_literal: '1' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DD.MM.YYYY'" - comma: ',' - numeric_literal: '2' - range_operator: .. - numeric_literal: '3' - keyword: DELIMIT - comparison_operator: raw_comparison_operator: '=' - keyword: NEVER - end_bracket: ) - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Latin1'" - keyword: WITH - keyword: COLUMN - keyword: NAMES - statement_terminator: ; - statement: export_statement: keyword: EXPORT bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '3295' end_bracket: ) export_into_clause: keyword: INTO import_file: - keyword: FBV - keyword: AT - connection_definition: naked_identifier: my_connection - keyword: FILE - quoted_literal: "'t1.fbv'" - keyword: FILE - quoted_literal: "'t2.fbv'" - file_opts: keyword: REPLACE - statement_terminator: ; - statement: export_statement: keyword: EXPORT bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_view end_bracket: ) export_into_clause: keyword: INTO import_export_dbsrc: - keyword: EXA - keyword: AT - connection_definition: - quoted_literal: "'192.168.6.11..14:8563'" - keyword: USER - quoted_literal: "'my_user'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'my_secret'" - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: CREATED - keyword: BY - quoted_literal: "'CREATE TABLE my_table(order_id INT, price DEC(18,2))'" - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_export_dbsrc: - keyword: JDBC - keyword: DRIVER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MSSQL'" - keyword: AT - connection_definition: - quoted_literal: "'jdbc:sqlserver://dbserver;databaseName=testdb'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: tab1 - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: CSV - keyword: AT - connection_definition: quoted_literal: "'http://HadoopNode:50070/webhdfs/v1/tmp'" - keyword: FILE - quoted_literal: "'file.csv?op=CREATE&user.name=user'" - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'https://testbucket.s3.amazonaws.com'" - keyword: USER - quoted_literal: "''" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "''" - keyword: FILE - quoted_literal: "'file.csv'" - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_script: - keyword: SCRIPT - object_reference: - naked_identifier: etl - dot: . - naked_identifier: export_hcat_table - keyword: WITH - parameter: HCAT_DB - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'default'" - parameter: HCAT_TABLE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_hcat_table'" - parameter: HCAT_ADDRESS - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hcatalog-server:50111'" - parameter: HDFS_USER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hdfs'" - statement_terminator: ; - statement: export_statement: keyword: EXPORT table_reference: naked_identifier: tab1 export_into_clause: keyword: INTO import_file: - keyword: LOCAL - keyword: CSV - keyword: FILE - quoted_literal: "'/tmp/my_table.csv'" - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/flush_statistics.sql000066400000000000000000000000221451700765000253710ustar00rootroot00000000000000FLUSH STATISTICS; sqlfluff-2.3.5/test/fixtures/dialects/exasol/flush_statistics.yml000066400000000000000000000010001451700765000253700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cf3dae76baed74833328c2789c63a1361844b4f116d93ce1dfe781f9969f0f53 file: statement: flush_statistics_statement: - keyword: FLUSH - keyword: STATISTICS statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/import_statement.sql000066400000000000000000000035431451700765000254070ustar00rootroot00000000000000IMPORT INTO table_3 (col1, col2, col4) FROM ORA AT my_oracle USER 'agent_008' IDENTIFIED BY 'secret' STATEMENT ' SELECT * FROM orders WHERE order_state=''OK'' ' ERRORS INTO error_table (CURRENT_TIMESTAMP) REJECT LIMIT 10 ; ---- IMPORT INTO table_3 (col1, col2, col4) FROM ORA AT my_oracle USER 'agent_008' IDENTIFIED BY 'secret' TABLE a.tab (c1,c2,c3) ERRORS INTO error_table (CURRENT_TIMESTAMP) REJECT LIMIT 10 ; ---- IMPORT INTO table_1 FROM CSV AT 'http://192.168.1.1:8080/' USER 'agent_007' IDENTIFIED BY 'secret' FILE 'tab1_part1.csv' FILE 'tab1_part2.csv' ( 1 FORMAT='DD-MM-YYYY', 2..4 FORMAT='YYYYMMDD' ) COLUMN SEPARATOR = ';' SKIP = 5; ---- IMPORT INTO table_2 FROM FBV AT my_fileserver FILE 'tab2_part1.fbv' ( SIZE=8 PADDING='+' ALIGN=RIGHT, SIZE=4, SIZE=8, SIZE=32 FORMAT='DD-MM-YYYY' ) TRIM ; ---- IMPORT INTO table_7 FROM SCRIPT etl.import_hcat_table AT my_oracle USER 'agent_008' IDENTIFIED BY 'secret' WITH HCAT_DB = 'default' HCAT_TABLE = 'my_hcat_table' HCAT_ADDRESS = 'hcatalog-server:50111' HDFS_USER = 'hdfs'; ---- IMPORT INTO table_4 FROM JDBC DRIVER='MSSQL' AT 'jdbc:sqlserver://dbserver;databaseName=testdb' USER 'agent_008' IDENTIFIED BY 'secret' STATEMENT ' SELECT * FROM orders WHERE order_state=''OK'' '; ---- IMPORT INTO table_5 FROM CSV AT 'http://HadoopNode:50070/webhdfs/v1/tmp' FILE 'file.csv?op=OPEN&user.name=user'; ---- IMPORT INTO table_6 FROM EXA AT my_exasol TABLE MY_SCHEMA.MY_TABLE; ---- IMPORT INTO (LIKE CAT) FROM EXA AT my_exa_conn STATEMENT ' SELECT OBJECT_NAME, OBJECT_TYPE FROM EXA_USER_OBJECTS WHERE OBJECT_TYPE IN (''TABLE'', ''VIEW'') '; ---- IMPORT INTO table_8 FROM LOCAL CSV FILE '~/my_table.csv' COLUMN SEPARATOR = ';' SKIP = 5; ---- IMPORT INTO table_1 FROM CSV AT 'https://.s3-.amazonaws.com/' USER '' IDENTIFIED BY '' FILE 'file.csv'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/import_statement.yml000066400000000000000000000264001451700765000254060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 78eb4b7335b845c9d9533762c7710a67691acd763acbeede3a6807a5b21ec22c file: - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_3 - bracketed: start_bracket: ( identifier_list: - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - comma: ',' - naked_identifier: col4 end_bracket: ) - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: ORA - keyword: AT - connection_definition: - naked_identifier: my_oracle - keyword: USER - quoted_literal: "'agent_008'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: STATEMENT - quoted_literal: "' SELECT * FROM orders WHERE order_state=''OK'' '" import_errors_clause: - keyword: ERRORS - keyword: INTO - import_error_destination: table_reference: naked_identifier: error_table - bracketed: start_bracket: ( expression: bare_function: CURRENT_TIMESTAMP end_bracket: ) - reject_clause: - keyword: REJECT - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_3 - bracketed: start_bracket: ( identifier_list: - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - comma: ',' - naked_identifier: col4 end_bracket: ) - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: ORA - keyword: AT - connection_definition: - naked_identifier: my_oracle - keyword: USER - quoted_literal: "'agent_008'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: TABLE - table_reference: - naked_identifier: a - dot: . - naked_identifier: tab - bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - comma: ',' - naked_identifier: c3 end_bracket: ) import_errors_clause: - keyword: ERRORS - keyword: INTO - import_error_destination: table_reference: naked_identifier: error_table - bracketed: start_bracket: ( expression: bare_function: CURRENT_TIMESTAMP end_bracket: ) - reject_clause: - keyword: REJECT - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_1 - import_from_clause: keyword: FROM import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'http://192.168.1.1:8080/'" - keyword: USER - quoted_literal: "'agent_007'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: FILE - quoted_literal: "'tab1_part1.csv'" - keyword: FILE - quoted_literal: "'tab1_part2.csv'" - csv_cols: bracketed: - start_bracket: ( - numeric_literal: '1' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DD-MM-YYYY'" - comma: ',' - numeric_literal: '2' - range_operator: .. - numeric_literal: '4' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'YYYYMMDD'" - end_bracket: ) - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - keyword: SKIP - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_2 - import_from_clause: keyword: FROM import_file: - keyword: FBV - keyword: AT - connection_definition: naked_identifier: my_fileserver - keyword: FILE - quoted_literal: "'tab2_part1.fbv'" - fbv_cols: bracketed: - start_bracket: ( - keyword: SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '8' - keyword: PADDING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'+'" - keyword: ALIGN - comparison_operator: raw_comparison_operator: '=' - keyword: RIGHT - comma: ',' - keyword: SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '4' - comma: ',' - keyword: SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '8' - comma: ',' - keyword: SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '32' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'DD-MM-YYYY'" - end_bracket: ) - file_opts: keyword: TRIM - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_7 - import_from_clause: keyword: FROM import_script: - keyword: SCRIPT - object_reference: - naked_identifier: etl - dot: . - naked_identifier: import_hcat_table - keyword: AT - connection_definition: - naked_identifier: my_oracle - keyword: USER - quoted_literal: "'agent_008'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: WITH - parameter: HCAT_DB - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'default'" - parameter: HCAT_TABLE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_hcat_table'" - parameter: HCAT_ADDRESS - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hcatalog-server:50111'" - parameter: HDFS_USER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hdfs'" - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_4 - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: JDBC - keyword: DRIVER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MSSQL'" - keyword: AT - connection_definition: - quoted_literal: "'jdbc:sqlserver://dbserver;databaseName=testdb'" - keyword: USER - quoted_literal: "'agent_008'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'secret'" - keyword: STATEMENT - quoted_literal: "' SELECT * FROM orders WHERE order_state=''OK'' '" - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_5 - import_from_clause: keyword: FROM import_file: - keyword: CSV - keyword: AT - connection_definition: quoted_literal: "'http://HadoopNode:50070/webhdfs/v1/tmp'" - keyword: FILE - quoted_literal: "'file.csv?op=OPEN&user.name=user'" - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_6 - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: EXA - keyword: AT - connection_definition: naked_identifier: my_exasol - keyword: TABLE - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_TABLE - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - bracketed: start_bracket: ( import_columns: table_like_clause: keyword: LIKE table_reference: naked_identifier: CAT end_bracket: ) - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: EXA - keyword: AT - connection_definition: naked_identifier: my_exa_conn - keyword: STATEMENT - quoted_literal: "' SELECT OBJECT_NAME, OBJECT_TYPE FROM EXA_USER_OBJECTS\ \ WHERE OBJECT_TYPE IN (''TABLE'', ''VIEW'') '" - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_8 - import_from_clause: keyword: FROM import_file: - keyword: LOCAL - keyword: CSV - keyword: FILE - quoted_literal: "'~/my_table.csv'" - file_opts: - keyword: COLUMN - keyword: SEPARATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - keyword: SKIP - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - statement_terminator: ; - statement: import_statement: - keyword: IMPORT - keyword: INTO - table_reference: naked_identifier: table_1 - import_from_clause: keyword: FROM import_file: - keyword: CSV - keyword: AT - connection_definition: - quoted_literal: "'https://.s3-.amazonaws.com/'" - keyword: USER - quoted_literal: "''" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "''" - keyword: FILE - quoted_literal: "'file.csv'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/insert_statement.sql000066400000000000000000000007731451700765000254030ustar00rootroot00000000000000INSERT INTO t (n1, n2, t1) VALUES (1, 2.34, 'abc'); INSERT INTO t VALUES (2, 1.56, 'ghi'), (3, 5.92, 'pqr'); INSERT INTO t VALUES (4, DEFAULT, 'xyz'); INSERT INTO t (i,k) SELECT * FROM u; INSERT INTO t (i) SELECT max(j) FROM u; INSERT INTO t DEFAULT VALUES; INSERT INTO t (SELECT * FROM u); INSERT INTO s.t(c1, c2, c3) VALUES((SELECT x FROM y), 'val1', 'val2'); INSERT INTO t (adate) values(current_timestamp); INSERT INTO t VALUES BETWEEN 1 AND 100; INSERT INTO t (i) VALUES BETWEEN 1 AND 100 WITH STEP 4; sqlfluff-2.3.5/test/fixtures/dialects/exasol/insert_statement.yml000066400000000000000000000157611451700765000254100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ca0a589ad98d8a2f91b99db2f29bc962416ea5cef4eaaee8fb4d1d07bd1e7c3 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: n1 - comma: ',' - column_reference: naked_identifier: n2 - comma: ',' - column_reference: naked_identifier: t1 - end_bracket: ) - values_insert_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2.34' - comma: ',' - quoted_literal: "'abc'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_insert_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '1.56' - comma: ',' - quoted_literal: "'ghi'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '5.92' - comma: ',' - quoted_literal: "'pqr'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_insert_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - keyword: DEFAULT - comma: ',' - quoted_literal: "'xyz'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: i - comma: ',' - column_reference: naked_identifier: k - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: i end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: j end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: s - dot: . - naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - values_insert_clause: keyword: VALUES bracketed: - start_bracket: ( - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: y end_bracket: ) - comma: ',' - quoted_literal: "'val1'" - comma: ',' - quoted_literal: "'val2'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: adate end_bracket: ) - values_insert_clause: keyword: values bracketed: start_bracket: ( bare_function: current_timestamp end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_range_clause: - keyword: VALUES - keyword: BETWEEN - numeric_literal: '1' - keyword: AND - numeric_literal: '100' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: i end_bracket: ) - values_range_clause: - keyword: VALUES - keyword: BETWEEN - numeric_literal: '1' - keyword: AND - numeric_literal: '100' - keyword: WITH - keyword: STEP - numeric_literal: '4' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/kill_statement.sql000066400000000000000000000002771451700765000250310ustar00rootroot00000000000000KILL STATEMENT IN SESSION 7792436882684342285; KILL SESSION 7792436882684342285; KILL SESSION CURRENT_SESSION; KILL STATEMENT 1234 IN SESSION 7792436882684342285 WITH MESSAGE 'not allowed!'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/kill_statement.yml000066400000000000000000000022161451700765000250260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c1a4373db22db074b7786018802971d2ddad47caa2e178cc8c204728654f9186 file: - statement: kill_statement: - keyword: KILL - keyword: STATEMENT - keyword: IN - keyword: SESSION - numeric_literal: '7792436882684342285' - statement_terminator: ; - statement: kill_statement: - keyword: KILL - keyword: SESSION - numeric_literal: '7792436882684342285' - statement_terminator: ; - statement: kill_statement: - keyword: KILL - keyword: SESSION - keyword: CURRENT_SESSION - statement_terminator: ; - statement: kill_statement: - keyword: KILL - keyword: STATEMENT - numeric_literal: '1234' - keyword: IN - keyword: SESSION - numeric_literal: '7792436882684342285' - keyword: WITH - keyword: MESSAGE - quoted_literal: "'not allowed!'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/merge_statement.sql000066400000000000000000000010051451700765000251630ustar00rootroot00000000000000MERGE INTO staff T USING changes U ON T.name = U.name WHEN MATCHED THEN UPDATE SET T.salary = U.salary, T.lastChange = CURRENT_DATE WHERE T.salary < U.salary WHEN NOT MATCHED THEN INSERT VALUES (U.name,U.salary,CURRENT_DATE); ---- MERGE INTO staff T USING (SELECT name FROM X) U ON T.name = U.name WHEN MATCHED THEN DELETE; --- MERGE INTO staff T USING (SELECT name FROM X) U ON T.name = U.name WHEN NOT MATCHED THEN INSERT VALUES (1,2,3) WHEN MATCHED THEN DELETE; sqlfluff-2.3.5/test/fixtures/dialects/exasol/merge_statement.yml000066400000000000000000000145641451700765000252030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4d138880f1919176af50be3073357c3b570fd5e695c4e10bceafe0075c0e799 file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: staff - alias_expression: naked_identifier: T - keyword: USING - table_reference: naked_identifier: changes - alias_expression: naked_identifier: U - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: T - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: U - dot: . - naked_identifier: name - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: column_reference: - naked_identifier: T - dot: . - naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary - comma: ',' - set_clause: column_reference: - naked_identifier: T - dot: . - naked_identifier: lastChange comparison_operator: raw_comparison_operator: '=' expression: bare_function: CURRENT_DATE where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: T - dot: . - naked_identifier: salary - comparison_operator: raw_comparison_operator: < - column_reference: - naked_identifier: U - dot: . - naked_identifier: salary merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: name - comma: ',' - expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary - comma: ',' - expression: bare_function: CURRENT_DATE - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: staff - alias_expression: naked_identifier: T - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: X end_bracket: ) - alias_expression: naked_identifier: U - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: T - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: U - dot: . - naked_identifier: name - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: staff - alias_expression: naked_identifier: T - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: X end_bracket: ) - alias_expression: naked_identifier: U - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: T - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: U - dot: . - naked_identifier: name - merge_match: merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/open_close_schema.sql000066400000000000000000000000641451700765000254520ustar00rootroot00000000000000OPEN SCHEMA test; OPEN SCHEMA "test"; CLOSE SCHEMA; sqlfluff-2.3.5/test/fixtures/dialects/exasol/open_close_schema.yml000066400000000000000000000015031451700765000254530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c35b9191fd265edef08cbb13dbc510e4c3887e8f5ddb58a18e60337dedd4c446 file: - statement: open_schema_statement: - keyword: OPEN - keyword: SCHEMA - schema_reference: naked_identifier: test - statement_terminator: ; - statement: open_schema_statement: - keyword: OPEN - keyword: SCHEMA - schema_reference: quoted_identifier: '"test"' - statement_terminator: ; - statement: close_schema_statement: - keyword: CLOSE - keyword: SCHEMA - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/preload_statement.sql000066400000000000000000000003141451700765000255140ustar00rootroot00000000000000PRELOAD TABLE t(i); PRELOAD DATABASE; PRELOAD TABLES t1,t2; PRELOAD SCHEMAS s1,s2; PRELOAD SCHEMA s1; TRUNCATE AUDIT LOGS; TRUNCATE AUDIT LOGS KEEP LAST MONTH; TRUNCATE AUDIT LOGS KEEP FROM '2019-01-01'; sqlfluff-2.3.5/test/fixtures/dialects/exasol/preload_statement.yml000066400000000000000000000036431451700765000255260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 748f74032b62b60eec6d0fc4fb7e71b6d26c0659bdacca0f33b22f1e64425b84 file: - statement: preload_statement: - keyword: PRELOAD - keyword: TABLE - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; - statement: preload_statement: - keyword: PRELOAD - keyword: DATABASE - statement_terminator: ; - statement: preload_statement: - keyword: PRELOAD - keyword: TABLES - table_reference: naked_identifier: t1 - comma: ',' - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: preload_statement: - keyword: PRELOAD - keyword: SCHEMAS - schema_reference: naked_identifier: s1 - comma: ',' - schema_reference: naked_identifier: s2 - statement_terminator: ; - statement: preload_statement: - keyword: PRELOAD - keyword: SCHEMA - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: truncate_audit_logs_statement: - keyword: TRUNCATE - keyword: AUDIT - keyword: LOGS - statement_terminator: ; - statement: truncate_audit_logs_statement: - keyword: TRUNCATE - keyword: AUDIT - keyword: LOGS - keyword: KEEP - keyword: LAST - keyword: MONTH - statement_terminator: ; - statement: truncate_audit_logs_statement: - keyword: TRUNCATE - keyword: AUDIT - keyword: LOGS - keyword: KEEP - keyword: FROM - quoted_literal: "'2019-01-01'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/recompress_statement.sql000066400000000000000000000001011451700765000262420ustar00rootroot00000000000000RECOMPRESS TABLE t1 (column_1); RECOMPRESS TABLES t2,t3 ENFORCE; sqlfluff-2.3.5/test/fixtures/dialects/exasol/recompress_statement.yml000066400000000000000000000017051451700765000262570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b87f2a72fec13d09f3cbdb1612300c8e0a8180987f9d1dfcdcf15ca928899fa1 file: - statement: recompress_reorganize_statement: - keyword: RECOMPRESS - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_reference: naked_identifier: column_1 end_bracket: ) - statement_terminator: ; - statement: recompress_reorganize_statement: - keyword: RECOMPRESS - keyword: TABLES - table_reference: naked_identifier: t2 - comma: ',' - table_reference: naked_identifier: t3 - keyword: ENFORCE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/rename_statement.sql000066400000000000000000000001321451700765000253330ustar00rootroot00000000000000RENAME SCHEMA s1 TO s2; RENAME TABLE t1 TO t2; RENAME s2.t3 TO t4; RENAME s2.t3 TO s2.t4; sqlfluff-2.3.5/test/fixtures/dialects/exasol/rename_statement.yml000066400000000000000000000025651451700765000253510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ce3aa039eb41a592cc11abb2b60f23315a74ffa3a705616f63f2b3223fa5c590 file: - statement: rename_statement: - keyword: RENAME - keyword: SCHEMA - object_reference: naked_identifier: s1 - keyword: TO - object_reference: naked_identifier: s2 - statement_terminator: ; - statement: rename_statement: - keyword: RENAME - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: TO - object_reference: naked_identifier: t2 - statement_terminator: ; - statement: rename_statement: - keyword: RENAME - object_reference: - naked_identifier: s2 - dot: . - naked_identifier: t3 - keyword: TO - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: rename_statement: - keyword: RENAME - object_reference: - naked_identifier: s2 - dot: . - naked_identifier: t3 - keyword: TO - object_reference: - naked_identifier: s2 - dot: . - naked_identifier: t4 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/select_statement.sql000066400000000000000000000075221451700765000253550ustar00rootroot00000000000000SELECT last_name, employee_id id, manager_id mgr_id, CONNECT_BY_ISLEAF leaf, LEVEL, LPAD(' ', 2*LEVEL-1)||SYS_CONNECT_BY_PATH(last_name, '/') "PATH" FROM employees CONNECT BY PRIOR employee_id = manager_id AND dept_no = dno START WITH last_name = 'Clark' ORDER BY employee_id; ---- SELECT store, SUM(price) AS volume FROM sales GROUP BY store ORDER BY store DESC; ---- SELECT name, SUM(price) AS volume FROM customers JOIN sales USING (c_id) GROUP BY name ORDER BY name; ---- WITH tmp_view AS (SELECT name, price, store FROM customers, sales WHERE customers.c_id=sales.c_id) SELECT sum(price) AS volume, name, store FROM tmp_view GROUP BY GROUPING SETS (name,store,()); ---- SELECT * FROM (IMPORT INTO (v VARCHAR(1)) FROM EXA AT my_connection TABLE sys.dual); ---- SELECT aschema.afunction('hello', 123) FROM aschema.mytable WHERE (a,2,substr(c,1,3)) IN (SELECT a,b,c FROM bschema.yourtable); ---- WITH mylist AS ( VALUES ('a','b','c'), ('d','e','f'), (f1('a'),'b','d') AS mylist (a,b,c) ) SELECT * from mylist; ---- SELECT rowid, ROW_NUMBER () OVER ( PARTITION BY ( col1, col2 ) ORDER BY col1 DESC, col2 DESC ); ---- SELECT rowid, ROW_NUMBER () OVER ( PARTITION BY ( col1, col2 )) ORDER BY col1 DESC, col2 DESC; ---- SELECT x WITH INVALID UNIQUE(myid) FROM t; ---- SELECT * FROM values('x', 'y'); ---- SELECT * FROM values('x', 'y') AS x(c1,c2); ---- SELECT * FROM values(('x','2'), ('y','2')) AS x(c1,c2); ---- SELECT * FROM(VALUES 1,2,3); ---- SELECT * FROM(VALUES 1,2,3) AS xs(n1); ---- SELECT * FROM VALUES BETWEEN 1 AND 15 WITH STEP 4; ---- SELECT first_name,name WITH INVALID FOREIGN KEY (nr) from T1 REFERENCING T2 (id); ---- SELECT * WITH INVALID FOREIGN KEY (first_name,name) from T1 REFERENCING T2; ---- SELECT INVALID FOREIGN KEY (nr,first_name,name) from T1 REFERENCING T2 (id, first_name,name); ---- SELECT * INTO TABLE t2 FROM t1 ORDER BY 1; ---- SELECT date'2021-09-21' FROM dual; ---- SELECT INVALID PRIMARY KEY (first_name) from T1; ---- SELECT JSON_EXTRACT(json_str, '$."@id"', '$.error()') EMITS ( id VARCHAR(2000), error_column VARCHAR(2000000) ) FROM t; ---- SELECT 10 / 2; ---- select count(*) as a, local.a*10 from x; ---- SELECT ABS(x) AS x FROM t WHERE local.x>10; ---- SELECT c1 as cx, count(*) as cc FROM x GROUP BY local.cx; ---- SELECT c1 as cx FROM x ORDER BY local.cx; ---- SELECT c1, count(*) as c FROM x GROUP BY 1 HAVING local.c > 1; ---- SELECT S_ID, C_ID, PRICE, ROW_NUMBER() OVER (PARTITION BY C_ID ORDER BY PRICE DESC) NUM FROM SALES QUALIFY local.NUM = 1; SELECT [day] FROM T; ---- SELECT "day" FROM T; ---- SELECT * FROM T PREFERRING HIGH LOCAL.ranking PARTITION BY local.c1; ---- SELECT * FROM T PREFERRING HIGH LOCAL.ranking PRIOR TO LOW LOCAL.budget PARTITION BY local.c1; ---- SELECT * FROM T PREFERRING HIGH LOCAL.ranking PLUS LOW LOCAL.budget PARTITION BY local.c1; ---- SELECT * FROM T PREFERRING HIGH LOCAL.ranking PRIOR TO LOW LOCAL.budget INVERSE col20 PARTITION BY local.c1; ---- SELECT * FROM T WHERE (LOCAL.c1, LOCAL.c2) NOT IN (SELECT c1,c2 FROM b); ---- SELECT 'ABC' as c1 FROM dual WHERE local.c1 = 'ABC'; SELECT a, b, c FROM x union SELECT a, b, c FROM y ORDER BY a; ---- SELECT -1 * row_number() OVER() AS nummer FROM sys.exa_sql_keywords CROSS JOIN sys.exa_sql_keywords UNION ALL SELECT 0; -- SELECT INTERVAL '5' MONTH, INTERVAL '130' MONTH (3), INTERVAL '27' YEAR, INTERVAL '100-1' YEAR(3) TO MONTH, INTERVAL '2-1' YEAR TO MONTH, INTERVAL '10:20' HOUR TO MINUTE, INTERVAL '2 23:10:59' DAY TO SECOND, INTERVAL '6' MINUTE, INTERVAL '5' DAY , INTERVAL '100' HOUR(3) , INTERVAL '1.99999' SECOND(2,2) , INTERVAL '23:10:59.123' HOUR(2) TO SECOND(3); -- SELECT v, DATE'2020-10-26' + v * INTERVAL'7'DAY AS late_2020_mondays, 5 * v AS five_times_table FROM VALUES BETWEEN 1 AND 9 AS v(v); sqlfluff-2.3.5/test/fixtures/dialects/exasol/select_statement.yml000066400000000000000000001470751451700765000253670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db5ec62e466fde8b17b5e49d240ef94c936848af9ed48ce964bb3663a0204256 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: employee_id alias_expression: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id alias_expression: naked_identifier: mgr_id - comma: ',' - select_clause_element: bare_function: CONNECT_BY_ISLEAF alias_expression: naked_identifier: leaf - comma: ',' - select_clause_element: bare_function: LEVEL - comma: ',' - select_clause_element: expression: - function: function_name: function_name_identifier: LPAD bracketed: - start_bracket: ( - expression: quoted_literal: "' '" - comma: ',' - expression: - numeric_literal: '2' - binary_operator: '*' - bare_function: LEVEL - binary_operator: '-' - numeric_literal: '1' - end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'/'" - end_bracket: ) alias_expression: quoted_identifier: '"PATH"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees connect_by_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - binary_operator: AND - column_reference: naked_identifier: dept_no - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: dno - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: last_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Clark'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: employee_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: store - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) alias_expression: keyword: AS naked_identifier: volume from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: store orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: store - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) alias_expression: keyword: AS naked_identifier: volume from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: sales - keyword: USING - bracketed: start_bracket: ( naked_identifier: c_id end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tmp_view keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: price - comma: ',' - select_clause_element: column_reference: naked_identifier: store from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: c_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sales - dot: . - naked_identifier: c_id end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) alias_expression: keyword: AS naked_identifier: volume - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: store from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp_view groupby_clause: - keyword: GROUP - keyword: BY - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: store - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: import_statement: - keyword: IMPORT - keyword: INTO - bracketed: start_bracket: ( import_columns: column_datatype_definition: naked_identifier: v data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) - import_from_clause: keyword: FROM import_export_dbsrc: - keyword: EXA - keyword: AT - connection_definition: naked_identifier: my_connection - keyword: TABLE - table_reference: - naked_identifier: sys - dot: . - naked_identifier: dual end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: naked_identifier: aschema dot: . function_name_identifier: afunction bracketed: - start_bracket: ( - expression: quoted_literal: "'hello'" - comma: ',' - expression: numeric_literal: '123' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: aschema - dot: . - naked_identifier: mytable where_clause: keyword: WHERE expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - numeric_literal: '2' - comma: ',' - function: function_name: function_name_identifier: substr bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: bschema - dot: . - naked_identifier: yourtable end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: mylist keyword: AS bracketed: start_bracket: ( values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - quoted_literal: "'d'" - comma: ',' - quoted_literal: "'e'" - comma: ',' - quoted_literal: "'f'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: f1 bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'d'" - end_bracket: ) - alias_expression: keyword: AS naked_identifier: mylist bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b - comma: ',' - naked_identifier: c end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mylist - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: rowid - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROW_NUMBER bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: col1 - comma: ',' - expression: column_reference: naked_identifier: col2 - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 - keyword: DESC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: rowid - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROW_NUMBER bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: col1 - comma: ',' - expression: column_reference: naked_identifier: col2 - end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 - keyword: DESC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: x with_invalid_unique_pk_clause: - keyword: WITH - keyword: INVALID - keyword: UNIQUE - bracketed: start_bracket: ( column_reference: naked_identifier: myid end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - quoted_literal: "'x'" - comma: ',' - quoted_literal: "'y'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - quoted_literal: "'x'" - comma: ',' - quoted_literal: "'y'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: x bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - expression: bracketed: - start_bracket: ( - quoted_literal: "'x'" - comma: ',' - quoted_literal: "'2'" - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - quoted_literal: "'y'" - comma: ',' - quoted_literal: "'2'" - end_bracket: ) - end_bracket: ) alias_expression: keyword: AS naked_identifier: x bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' end_bracket: ) alias_expression: keyword: AS naked_identifier: xs bracketed: start_bracket: ( identifier_list: naked_identifier: n1 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_range_clause: - keyword: VALUES - keyword: BETWEEN - numeric_literal: '1' - keyword: AND - numeric_literal: '15' - keyword: WITH - keyword: STEP - numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: column_reference: naked_identifier: name - with_invalid_foreign_key_clause: - keyword: WITH - keyword: INVALID - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: nr end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 referencing_clause: keyword: REFERENCING table_reference: naked_identifier: T2 bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' with_invalid_foreign_key_clause: - keyword: WITH - keyword: INVALID - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 referencing_clause: keyword: REFERENCING table_reference: naked_identifier: T2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT with_invalid_foreign_key_clause: - keyword: INVALID - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: nr - comma: ',' - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 referencing_clause: keyword: REFERENCING table_reference: naked_identifier: T2 bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_table_clause: - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: date date_constructor_literal: "'2021-09-21'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT with_invalid_unique_pk_clause: - keyword: INVALID - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: first_name end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: JSON_EXTRACT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: json_str - comma: ',' - expression: quoted_literal: "'$.\"@id\"'" - comma: ',' - expression: quoted_literal: "'$.error()'" - end_bracket: ) emits_segment: keyword: EMITS bracketed: - start_bracket: ( - column_datatype_definition: naked_identifier: id data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000' end_bracket: ) - comma: ',' - column_datatype_definition: naked_identifier: error_column data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '2000000' end_bracket: ) - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '10' - binary_operator: / - numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: as naked_identifier: a - comma: ',' - select_clause_element: expression: local_alias_segment: keyword: local dot: . naked_identifier: a binary_operator: '*' numeric_literal: '10' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ABS bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) alias_expression: keyword: AS naked_identifier: x from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: local_alias_segment: keyword: local dot: . naked_identifier: x comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c1 alias_expression: keyword: as naked_identifier: cx - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: as naked_identifier: cc from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x groupby_clause: - keyword: GROUP - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: cx - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c1 alias_expression: keyword: as naked_identifier: cx from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x orderby_clause: - keyword: ORDER - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: cx - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: as naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' having_clause: keyword: HAVING expression: local_alias_segment: keyword: local dot: . naked_identifier: c comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: S_ID - comma: ',' - select_clause_element: column_reference: naked_identifier: C_ID - comma: ',' - select_clause_element: column_reference: naked_identifier: PRICE - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROW_NUMBER bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: C_ID orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: PRICE - keyword: DESC end_bracket: ) alias_expression: naked_identifier: NUM from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: SALES qualify_clause: keyword: QUALIFY expression: local_alias_segment: keyword: local dot: . naked_identifier: NUM comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: identifier: '[day]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: '"day"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T preferring_clause: keyword: PREFERRING preference_term: keyword: HIGH local_alias_segment: keyword: LOCAL dot: . naked_identifier: ranking partitionby_clause: - keyword: PARTITION - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T preferring_clause: keyword: PREFERRING preference_term: keyword: HIGH local_alias_segment: keyword: LOCAL dot: . naked_identifier: ranking plus_prior_inverse: - keyword: PRIOR - keyword: TO - preference_term: keyword: LOW local_alias_segment: keyword: LOCAL dot: . naked_identifier: budget partitionby_clause: - keyword: PARTITION - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T preferring_clause: keyword: PREFERRING preference_term: keyword: HIGH local_alias_segment: keyword: LOCAL dot: . naked_identifier: ranking plus_prior_inverse: keyword: PLUS preference_term: keyword: LOW local_alias_segment: keyword: LOCAL dot: . naked_identifier: budget partitionby_clause: - keyword: PARTITION - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T preferring_clause: keyword: PREFERRING preference_term: keyword: HIGH local_alias_segment: keyword: LOCAL dot: . naked_identifier: ranking plus_prior_inverse: - keyword: PRIOR - keyword: TO - preference_term: keyword: LOW local_alias_segment: keyword: LOCAL dot: . naked_identifier: budget plus_prior_inverse: keyword: INVERSE preference_term: column_reference: naked_identifier: col20 partitionby_clause: - keyword: PARTITION - keyword: BY - expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: T where_clause: keyword: WHERE expression: - bracketed: - start_bracket: ( - local_alias_segment: keyword: LOCAL dot: . naked_identifier: c1 - comma: ',' - local_alias_segment: keyword: LOCAL dot: . naked_identifier: c2 - end_bracket: ) - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c1 - comma: ',' - select_clause_element: column_reference: naked_identifier: c2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'ABC'" alias_expression: keyword: as naked_identifier: c1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: WHERE expression: local_alias_segment: keyword: local dot: . naked_identifier: c1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ABC'" - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x - set_operator: keyword: union - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: y - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' binary_operator: '*' function: function_name: function_name_identifier: row_number bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( end_bracket: ) alias_expression: keyword: AS naked_identifier: nummer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: exa_sql_keywords join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: exa_sql_keywords - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'5'" - keyword: MONTH - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'130'" - keyword: MONTH - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'27'" - keyword: YEAR - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'100-1'" - keyword: YEAR - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - keyword: TO - keyword: MONTH - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'2-1'" - keyword: YEAR - keyword: TO - keyword: MONTH - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'10:20'" - keyword: HOUR - keyword: TO - keyword: MINUTE - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'2 23:10:59'" - keyword: DAY - keyword: TO - keyword: SECOND - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'6'" - keyword: MINUTE - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'5'" - keyword: DAY - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'100'" - keyword: HOUR - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'1.99999'" - keyword: SECOND - bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'23:10:59.123'" - keyword: HOUR - bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - keyword: TO - keyword: SECOND - bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: v - comma: ',' - select_clause_element: expression: - keyword: DATE - date_constructor_literal: "'2020-10-26'" - binary_operator: + - column_reference: naked_identifier: v - binary_operator: '*' - interval_expression: - keyword: INTERVAL - quoted_literal: "'7'" - keyword: DAY alias_expression: keyword: AS naked_identifier: late_2020_mondays - comma: ',' - select_clause_element: expression: numeric_literal: '5' binary_operator: '*' column_reference: naked_identifier: v alias_expression: keyword: AS naked_identifier: five_times_table from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_range_clause: - keyword: VALUES - keyword: BETWEEN - numeric_literal: '1' - keyword: AND - numeric_literal: '9' alias_expression: keyword: AS naked_identifier: v bracketed: start_bracket: ( identifier_list: naked_identifier: v end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/truncate_statement.sql000066400000000000000000000000251451700765000257120ustar00rootroot00000000000000TRUNCATE TABLE test; sqlfluff-2.3.5/test/fixtures/dialects/exasol/truncate_statement.yml000066400000000000000000000010501451700765000257130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7f00bfe35abda3c5f894dcbed8f1790f94822f36c9e94a05e0ebbd4be83a5a3 file: statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/update_statement.sql000066400000000000000000000007451451700765000253600ustar00rootroot00000000000000UPDATE staff SET salary=salary*1.1 WHERE name='SMITH'; ---- UPDATE staff AS U SET U.salary=U.salary/1.95583, U.currency='EUR' WHERE U.currency='DM'; ---- UPDATE staff AS U SET U.salary=V.salary, U.currency=V.currency FROM staff AS U, staff_updates AS V WHERE U.name=V.name; ---- UPDATE order_pos SET stocks=stocks*10 PREFERRING HIGH (order_date) PARTITION BY (shop_id, order_id); ---- UPDATE t1 SET x=t2.c1, w=t4.c2 FROM t2 JOIN t3 g ON t2.c1=t3.c2 LEFT JOIN t4 ON t4.c3=t3.c1 ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/update_statement.yml000066400000000000000000000173071451700765000253640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a155b30a76ea8dc167ff827e171f4c86fd931ab6e3365743f68cef6e793efd02 file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: staff set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: salary binary_operator: '*' numeric_literal: '1.1' where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'SMITH'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: staff alias_expression: keyword: AS naked_identifier: U set_clause_list: - keyword: SET - set_clause: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary binary_operator: / numeric_literal: '1.95583' - comma: ',' - set_clause: column_reference: - naked_identifier: U - dot: . - naked_identifier: currency comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'EUR'" where_clause: keyword: WHERE expression: column_reference: - naked_identifier: U - dot: . - naked_identifier: currency comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DM'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: staff alias_expression: keyword: AS naked_identifier: U set_clause_list: - keyword: SET - set_clause: column_reference: - naked_identifier: U - dot: . - naked_identifier: salary comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: V - dot: . - naked_identifier: salary - comma: ',' - set_clause: column_reference: - naked_identifier: U - dot: . - naked_identifier: currency comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: V - dot: . - naked_identifier: currency from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: staff alias_expression: keyword: AS naked_identifier: U - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: staff_updates alias_expression: keyword: AS naked_identifier: V where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: U - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: V - dot: . - naked_identifier: name - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: order_pos set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: stocks comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: stocks binary_operator: '*' numeric_literal: '10' preferring_clause: keyword: PREFERRING preference_term: function: function_name: function_name_identifier: HIGH bracketed: start_bracket: ( expression: column_reference: naked_identifier: order_date end_bracket: ) partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: shop_id - comma: ',' - expression: column_reference: naked_identifier: order_id - end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c1 - comma: ',' - set_clause: column_reference: naked_identifier: w comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: t4 - dot: . - naked_identifier: c2 from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: t3 alias_expression: naked_identifier: g join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: c2 - join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t4 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t4 - dot: . - naked_identifier: c3 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: c1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/exasol/values_in_subquery.sql000066400000000000000000000003671451700765000257360ustar00rootroot00000000000000WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt; WITH txt AS ( VALUES (1, 2), (3, 4) AS t (c1, c2) ) SELECT * FROM txt; SELECT * FROM ( VALUES (1) ) AS t(id); SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2); sqlfluff-2.3.5/test/fixtures/dialects/exasol/values_in_subquery.yml000066400000000000000000000117001451700765000257310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2f876c0643fe22eb9f152e9ffe39497b2d83c88b520e06030b7d4f9ba68700f5 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: txt keyword: AS bracketed: start_bracket: ( values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) alias_expression: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: id end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: txt - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: txt keyword: AS bracketed: start_bracket: ( values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_bracket: ) - alias_expression: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: txt - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/greenplum/000077500000000000000000000000001451700765000217665ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/greenplum/.sqlfluff000066400000000000000000000000371451700765000236110ustar00rootroot00000000000000[sqlfluff] dialect = greenplum sqlfluff-2.3.5/test/fixtures/dialects/greenplum/create_table.sql000066400000000000000000000012561451700765000251250ustar00rootroot00000000000000CREATE TABLE measurement ( city_id int NOT NULL, logdate date NOT NULL, peaktemp int, unitsales int ) WITH (appendoptimized=true, compresslevel=5) DISTRIBUTED BY (txn_id, other_field); CREATE TABLE measurement ( city_id int NOT NULL, logdate date NOT NULL, peaktemp int, unitsales int ) WITH (appendoptimized=true) DISTRIBUTED BY (txn_id); CREATE TEMP TABLE test ( test_id int NOT NULL, logdate date NOT NULL, test_text int ) DISTRIBUTED BY (txn_id); CREATE TABLE test_randomly ( test_id int NOT NULL, logdate date NOT NULL, test_text int ) DISTRIBUTED RANDOMLY; CREATE TABLE test_replicated ( test_id int NOT NULL, logdate date NOT NULL, test_text int ) DISTRIBUTED REPLICATED; sqlfluff-2.3.5/test/fixtures/dialects/greenplum/create_table.yml000066400000000000000000000137151451700765000251320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa079509f47722b87061b8271e67c58da8a3f67bfa948ae63312847c7cc96952 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement - bracketed: - start_bracket: ( - column_reference: naked_identifier: city_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - parameter: appendoptimized - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: compresslevel - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: txn_id - comma: ',' - column_reference: naked_identifier: other_field - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement - bracketed: - start_bracket: ( - column_reference: naked_identifier: city_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: WITH - bracketed: start_bracket: ( parameter: appendoptimized comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: txn_id end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: test_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: test_text - data_type: keyword: int - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: txn_id end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_randomly - bracketed: - start_bracket: ( - column_reference: naked_identifier: test_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: test_text - data_type: keyword: int - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: RANDOMLY - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_replicated - bracketed: - start_bracket: ( - column_reference: naked_identifier: test_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: test_text - data_type: keyword: int - end_bracket: ) - distributed_by: - keyword: DISTRIBUTED - keyword: REPLICATED - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/greenplum/create_table_as.sql000066400000000000000000000004471451700765000256110ustar00rootroot00000000000000create table table1 ( column1 int , column2 varchar , column3 boolean ) with (appendoptimized = true, compresstype = zstd) distributed by (column1, column2); create table new_table as select * from old_table distributed randomly; create table new_table as select * from old_table; sqlfluff-2.3.5/test/fixtures/dialects/greenplum/create_table_as.yml000066400000000000000000000056011451700765000256100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e4e1aa2cd8a3b99b85132351c464959bc680b1c2b1dbfecd808135ffe8fe54a1 file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: column2 - data_type: keyword: varchar - comma: ',' - column_reference: naked_identifier: column3 - data_type: keyword: boolean - end_bracket: ) - keyword: with - bracketed: - start_bracket: ( - parameter: appendoptimized - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: compresstype - comparison_operator: raw_comparison_operator: '=' - naked_identifier: zstd - end_bracket: ) - distributed_by: - keyword: distributed - keyword: by - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: create - keyword: table - table_reference: naked_identifier: new_table - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: old_table - distributed_by: - keyword: distributed - keyword: randomly - statement_terminator: ; - statement: create_table_as_statement: - keyword: create - keyword: table - table_reference: naked_identifier: new_table - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: old_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/000077500000000000000000000000001451700765000207235ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/hive/.sqlfluff000066400000000000000000000000321451700765000225410ustar00rootroot00000000000000[sqlfluff] dialect = hive sqlfluff-2.3.5/test/fixtures/dialects/hive/alter_database.sql000066400000000000000000000002531451700765000243770ustar00rootroot00000000000000ALTER DATABASE foo SET DBPROPERTIES ("prop1"="val1", "prop2"="val2"); ALTER DATABASE bar SET LOCATION "hdfs://alternate_path"; ALTER DATABASE foo2 SET OWNER USER "user";sqlfluff-2.3.5/test/fixtures/dialects/hive/alter_database.yml000066400000000000000000000027201451700765000244020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e471a0e0892ed298475a0ee64729fffe5b1b2144edcf454d92f384e0f8e79a2b file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: foo - keyword: SET - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"prop1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - quoted_literal: '"prop2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: bar - keyword: SET - keyword: LOCATION - quoted_literal: '"hdfs://alternate_path"' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: foo2 - keyword: SET - keyword: OWNER - keyword: USER - quoted_literal: '"user"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/alter_schema.sql000066400000000000000000000001521451700765000240710ustar00rootroot00000000000000ALTER DATABASE foo SET OWNER ROLE "role"; ALTER DATABASE bar SET MANAGEDLOCATION "hdfs://alternate_path";sqlfluff-2.3.5/test/fixtures/dialects/hive/alter_schema.yml000066400000000000000000000016331451700765000241000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 01fbe10e33e9d07cea2ff2a3357ec3730607c4d29d3f169c31f6d322c7e59715 file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: foo - keyword: SET - keyword: OWNER - keyword: ROLE - quoted_literal: '"role"' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: bar - keyword: SET - keyword: MANAGEDLOCATION - quoted_literal: '"hdfs://alternate_path"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/alter_table.sql000066400000000000000000000002521451700765000237210ustar00rootroot00000000000000ALTER TABLE schema.table1 rename TO schema.table2; ALTER TABLE schema.table1 rename TO schema.table2; ALTER TABLE table2 EXCHANGE PARTITION (ds='1') WITH TABLE table1; sqlfluff-2.3.5/test/fixtures/dialects/hive/alter_table.yml000066400000000000000000000031441451700765000237260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1a33737331ae1fc881a03e0a9fff034dbdaa2e6b9cb12f594763523bb80389e1 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: schema - dot: . - naked_identifier: table1 - keyword: rename - keyword: TO - table_reference: - naked_identifier: schema - dot: . - naked_identifier: table2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: schema - dot: . - naked_identifier: table1 - keyword: rename - keyword: TO - table_reference: - naked_identifier: schema - dot: . - naked_identifier: table2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table2 - keyword: EXCHANGE - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: ds comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" end_bracket: ) - keyword: WITH - keyword: TABLE - table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/array_types.sql000066400000000000000000000006511451700765000240100ustar00rootroot00000000000000-- simple select array[a, b, c] as arr from sch.tbl; -- bit harder select t.a from unnest(array[1, 3, 6, 12]) as t(f); -- complex select map_from_entries(array[ row('pending.freebet', pending_fb), row('bonus.balance', bonus) ]) from sch.tbl; -- string consts select array['a', 'b', 'c'] as arr from sch.tbl; -- null select array['a', null] as arr from sch.tbl; -- empty array select array[] as arr from sch.tbl; sqlfluff-2.3.5/test/fixtures/dialects/hive/array_types.yml000066400000000000000000000153111451700765000240110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 703561cc4a19243823ae4bb0768e333f8a24d41beb5f3f9cb4406b66ac180ced file: - statement: select_statement: select_clause: keyword: select select_clause_element: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_square_bracket: ']' alias_expression: keyword: as naked_identifier: arr from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: unnest bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - comma: ',' - numeric_literal: '6' - comma: ',' - numeric_literal: '12' - end_square_bracket: ']' end_bracket: ) alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: f end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: map_from_entries bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - function: function_name: function_name_identifier: row bracketed: - start_bracket: ( - expression: quoted_literal: "'pending.freebet'" - comma: ',' - expression: column_reference: naked_identifier: pending_fb - end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: row bracketed: - start_bracket: ( - expression: quoted_literal: "'bonus.balance'" - comma: ',' - expression: column_reference: naked_identifier: bonus - end_bracket: ) - end_square_bracket: ']' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "'b'" - comma: ',' - quoted_literal: "'c'" - end_square_bracket: ']' alias_expression: keyword: as naked_identifier: arr from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: typed_array_literal: array_type: keyword: array array_literal: start_square_bracket: '[' quoted_literal: "'a'" comma: ',' null_literal: 'null' end_square_bracket: ']' alias_expression: keyword: as naked_identifier: arr from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: typed_array_literal: array_type: keyword: array array_literal: start_square_bracket: '[' end_square_bracket: ']' alias_expression: keyword: as naked_identifier: arr from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_database.sql000066400000000000000000000001201451700765000245240ustar00rootroot00000000000000CREATE DATABASE foo LOCATION 'hdfs://path' WITH DBPROPERTIES ("a"="1", "b"="2");sqlfluff-2.3.5/test/fixtures/dialects/hive/create_database.yml000066400000000000000000000017701451700765000245420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c38275052007b35100536da793098268e048b72d43a9365b76f33a9bd8fd07c8 file: statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: foo - keyword: LOCATION - quoted_literal: "'hdfs://path'" - keyword: WITH - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"a"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"1"' - comma: ',' - quoted_literal: '"b"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"2"' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_schema.sql000066400000000000000000000001241451700765000242240ustar00rootroot00000000000000CREATE SCHEMA IF NOT EXISTS foo COMMENT 'test schema' MANAGEDLOCATION 'hdfs://path';sqlfluff-2.3.5/test/fixtures/dialects/hive/create_schema.yml000066400000000000000000000013611451700765000242320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d2afc92273acb32b004f31d3072610f06db97ec4f12c26894040452da396c24d file: statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: foo - keyword: COMMENT - quoted_literal: "'test schema'" - keyword: MANAGEDLOCATION - quoted_literal: "'hdfs://path'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_as_select.sql000066400000000000000000000002661451700765000261040ustar00rootroot00000000000000CREATE TABLE new_foo ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" STORED AS RCFile AS SELECT (col1 % 1024) col, concat(col1, col2) col12 FROM foo;sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_as_select.yml000066400000000000000000000040121451700765000260770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 71ca19bc76efe01ca9e35dc44c5928a1c78c212052692f7afef805ff82836b62 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: new_foo - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: '"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"' - keyword: STORED - keyword: AS - file_format: RCFile - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 binary_operator: '%' numeric_literal: '1024' end_bracket: ) alias_expression: naked_identifier: col - comma: ',' - select_clause_element: function: function_name: function_name_identifier: concat bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: col1 - comma: ',' - expression: column_reference: naked_identifier: col2 - end_bracket: ) alias_expression: naked_identifier: col12 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_clustered_by.sql000066400000000000000000000001741451700765000266240ustar00rootroot00000000000000CREATE TABLE IF NOT EXISTS foo ( col1 string, col2 float ) CLUSTERED BY (col2) SORTED BY (col1 DESC) INTO 5 BUCKETS;sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_clustered_by.yml000066400000000000000000000025701451700765000266300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a459e89000ef9b718b41f3b3bb840a5ea04a49545ba3b2628f60731d35ad97b5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: float - end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - keyword: SORTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 keyword: DESC end_bracket: ) - keyword: INTO - numeric_literal: '5' - keyword: BUCKETS statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_constraints.sql000066400000000000000000000016371451700765000265140ustar00rootroot00000000000000CREATE TABLE foo( col1 INT PRIMARY KEY, col2 INTEGER NOT NULL, col3 BIGINT NOT NULL, col4 STRING, col5 STRING COMMENT 'Column 5' ) COMMENT 'This is a test table' STORED AS ORC; CREATE TABLE product ( product_id INTEGER, product_vendor_id INTEGER, PRIMARY KEY (product_id) DISABLE NOVALIDATE, CONSTRAINT product_fk_1 FOREIGN KEY (product_vendor_id) REFERENCES vendor(vendor_id) DISABLE NOVALIDATE ); CREATE TABLE vendor ( vendor_id INTEGER, PRIMARY KEY (vendor_id) DISABLE NOVALIDATE RELY ); CREATE TABLE product ( product_id INTEGER, product_vendor_id INTEGER, PRIMARY KEY (product_id) DISABLE NOVALIDATE, CONSTRAINT product_fk_1 FOREIGN KEY (product_vendor_id) REFERENCES vendor(vendor_id) DISABLE NOVALIDATE ); CREATE TABLE vendor ( vendor_id INTEGER, PRIMARY KEY (vendor_id) DISABLE NOVALIDATE NORELY ); sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_constraints.yml000066400000000000000000000143101451700765000265060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 33362b518a68ba15d8346bcee14783b8dc1efcf5370199f667d53e4095821211 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: INT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: BIGINT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col5 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'Column 5'" - end_bracket: ) - keyword: COMMENT - quoted_literal: "'This is a test table'" - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: product_id data_type: primitive_type: keyword: INTEGER - comma: ',' - column_definition: naked_identifier: product_vendor_id data_type: primitive_type: keyword: INTEGER - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: product_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: product_fk_1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: product_vendor_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: vendor - bracketed: start_bracket: ( column_reference: naked_identifier: vendor_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: vendor - bracketed: start_bracket: ( column_definition: naked_identifier: vendor_id data_type: primitive_type: keyword: INTEGER comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: vendor_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - keyword: RELY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: product_id data_type: primitive_type: keyword: INTEGER - comma: ',' - column_definition: naked_identifier: product_vendor_id data_type: primitive_type: keyword: INTEGER - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: product_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: product_fk_1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: product_vendor_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: vendor - bracketed: start_bracket: ( column_reference: naked_identifier: vendor_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: vendor - bracketed: start_bracket: ( column_definition: naked_identifier: vendor_id data_type: primitive_type: keyword: INTEGER comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: vendor_id end_bracket: ) - keyword: DISABLE - keyword: NOVALIDATE - keyword: NORELY end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_datatypes.sql000066400000000000000000000004561451700765000261410ustar00rootroot00000000000000CREATE TABLE db.foo ( col1 string, col2 int, col3 decimal, col4 decimal(10, 2), col5 ARRAY, col6 MAP, col7 STRUCT< field1: boolean, field2: ARRAY, field3: UNIONTYPE>, col8 UNIONTYPE> );sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_datatypes.yml000066400000000000000000000104411451700765000261360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 224d06e8d35e55927c093e397084485a36d7cdf2f67f972db87effd738945841 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: decimal - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: naked_identifier: col5 data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: double end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: col6 data_type: keyword: MAP start_angle_bracket: < primitive_type: keyword: varchar comma: ',' data_type: primitive_type: keyword: date end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: col7 data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: field1 - colon: ':' - data_type: primitive_type: keyword: boolean - comma: ',' - naked_identifier: field2 - colon: ':' - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: - keyword: double - keyword: precision end_angle_bracket: '>' - comma: ',' - naked_identifier: field3 - colon: ':' - data_type: - keyword: UNIONTYPE - start_angle_bracket: < - data_type: primitive_type: keyword: string - comma: ',' - data_type: primitive_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_angle_bracket: '>' - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: col8 data_type: - keyword: UNIONTYPE - start_angle_bracket: < - data_type: primitive_type: keyword: string - comma: ',' - data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: char end_angle_bracket: '>' - end_angle_bracket: '>' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_external_partitioned.sql000066400000000000000000000002111451700765000303540ustar00rootroot00000000000000CREATE EXTERNAL TABLE IF NOT EXISTS foo ( col1 int, col2 string ) PARTITIONED BY (col3 string, col4 date) LOCATION 'hdfs://path';sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_external_partitioned.yml000066400000000000000000000026721451700765000303730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9c65c1477ba5ee9674e2c6d78a096c9faa9e75e3c2e86d7c9650f0b99cc55c7 file: statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: date - end_bracket: ) - keyword: LOCATION - quoted_literal: "'hdfs://path'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_like.sql000066400000000000000000000001221451700765000250550ustar00rootroot00000000000000CREATE TABLE db.new_foo LIKE foo TBLPROPERTIES ("property_name"="property_value");sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_like.yml000066400000000000000000000016351451700765000250710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21d89aac8d7e7c77878bd528bcada3dee345999498ff51dbaf3687b618a2edd8 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: db - dot: . - naked_identifier: new_foo - keyword: LIKE - table_reference: naked_identifier: foo - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"property_name"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"property_value"' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_row_format_delimited.sql000066400000000000000000000005531451700765000303400ustar00rootroot00000000000000 CREATE TABLE foo( col1 INT PRIMARY KEY, col2 BIGINT NOT NULL, col3 STRING, col4 STRING COMMENT 'Column 4') COMMENT 'This is a test table' ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001' COLLECTION ITEMS TERMINATED BY '\002' MAP KEYS TERMINATED BY '\003' LINES TERMINATED BY '\004' NULL DEFINED AS '\005' STORED AS SEQUENCEFILE; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_row_format_delimited.yml000066400000000000000000000044731451700765000303470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac9e95c49548942f83bb6d6b5237911e69fd7b18e546df7ae7e7d844c43c2547 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: INT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: BIGINT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'Column 4'" - end_bracket: ) - keyword: COMMENT - quoted_literal: "'This is a test table'" - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\001'" - keyword: COLLECTION - keyword: ITEMS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\002'" - keyword: MAP - keyword: KEYS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\003'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\004'" - keyword: 'NULL' - keyword: DEFINED - keyword: AS - quoted_literal: "'\\005'" - keyword: STORED - keyword: AS - file_format: SEQUENCEFILE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_row_format_serde.sql000066400000000000000000000005451451700765000275030ustar00rootroot00000000000000CREATE TABLE foo ( col1 int, col2 string ) PARTITIONED BY (col3 string, col4 date) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.RegexSerDe' WITH SERDEPROPERTIES ( "input.regex" = "([^]*) ([^]*) ([^]*) (-|\\[^\\]*\\]) ([^ \"]*|\"[^\"]*\") (-|[0-9]*) (-|[0-9]*)(?: ([^ \"]*|\".*\") ([^ \"]*|\".*\"))?" ) STORED AS TEXTFILE LOCATION 'hdfs://path';sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_row_format_serde.yml000066400000000000000000000037361451700765000275120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a8424ed6b464bbf9d21199a3457a8b45022d4b1812cafd6bece6bb62926d95e8 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: date - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.RegexSerDe'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"input.regex"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"([^]*) ([^]*) ([^]*) (-|\\[^\\]*\\]) ([^ \"]*|\"[^\"]*\") (-|[0-9]*) (-|[0-9]*)(?: ([^ \"]*|\".*\") ([^ \"]*|\".*\"))?"' - end_bracket: ) - keyword: STORED - keyword: AS - file_format: TEXTFILE - keyword: LOCATION - quoted_literal: "'hdfs://path'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_skewed_by.sql000066400000000000000000000002251451700765000261110ustar00rootroot00000000000000CREATE TABLE foo (col1 STRING, col2 int, col3 STRING) SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) STORED AS DIRECTORIES;sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_skewed_by.yml000066400000000000000000000043311451700765000261150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cfdee566c51cf22377d6fd7927b46e4ffd39df02297addfac9b152c52f138215 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: STRING - end_bracket: ) - skewed_by_clause: - keyword: SKEWED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: 'ON' - bracketed: - start_bracket: ( - bracketed: start_bracket: ( quoted_literal: "'s1'" comma: ',' numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'s3'" comma: ',' numeric_literal: '3' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'s13'" comma: ',' numeric_literal: '13' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( quoted_literal: "'s78'" comma: ',' numeric_literal: '78' end_bracket: ) - end_bracket: ) - keyword: STORED - keyword: AS - keyword: DIRECTORIES statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_temporary_external.sql000066400000000000000000000002241451700765000300600ustar00rootroot00000000000000CREATE TEMPORARY EXTERNAL TABLE IF NOT EXISTS foo ( col1 int, col2 string ) PARTITIONED BY (col3 string, col4 date) LOCATION 'hdfs://path'; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_temporary_external.yml000066400000000000000000000027231451700765000300700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dc767c556f694799b90024fb1ee65403c0e120b9b316c9e0784f5a5eccc96bfd file: statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: string - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_definition: naked_identifier: col3 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col4 data_type: primitive_type: keyword: date - end_bracket: ) - keyword: LOCATION - quoted_literal: "'hdfs://path'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_temporary_properties.sql000066400000000000000000000001721451700765000304340ustar00rootroot00000000000000CREATE TEMPORARY TABLE foo ( col1 string, col2 float ) TBLPROPERTIES ("property1"="value1", "property2"="value2");sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_temporary_properties.yml000066400000000000000000000024721451700765000304430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f888d36a12373e5d91926cdff2333e67fe22096024b892a00e1226a415b7e48c file: statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: col1 data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: col2 data_type: primitive_type: keyword: float - end_bracket: ) - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - quoted_literal: '"property1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"value1"' - comma: ',' - quoted_literal: '"property2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"value2"' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_with.sql000066400000000000000000000002071451700765000251100ustar00rootroot00000000000000CREATE TABLE masonboro_sandbox.test AS WITH us_sales AS ( SELECT rev FROM masonboro_sales.us_2021 ) SELECT rev FROM us_sales; sqlfluff-2.3.5/test/fixtures/dialects/hive/create_table_with.yml000066400000000000000000000034471451700765000251230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9b6e5c9ad01dd44a93dbd5dac68c1f42b3a7984d1b2c7eae9bc8ac0460e1a03e file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: masonboro_sandbox - dot: . - naked_identifier: test - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: us_sales keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: rev from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: masonboro_sales - dot: . - naked_identifier: us_2021 end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: rev from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: us_sales statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/drop_database.sql000066400000000000000000000000601451700765000242300ustar00rootroot00000000000000DROP DATABASE foo; DROP DATABASE bar RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/hive/drop_database.yml000066400000000000000000000013621451700765000242400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 627698553969b8096046732155df04afb6d8d25701c26005ce447a34b08aab71 file: - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/drop_schema.sql000066400000000000000000000000431451700765000237250ustar00rootroot00000000000000DROP SCHEMA IF EXISTS foo CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/hive/drop_schema.yml000066400000000000000000000011531451700765000237320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 05bb4fc104ce95efd334d8ee7bfe45d835542a27ed67b9081412c605d076ff1d file: statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: foo - keyword: CASCADE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/drop_table.sql000066400000000000000000000000601451700765000235530ustar00rootroot00000000000000DROP TABLE foo; DROP TABLE IF exists bar PURGE;sqlfluff-2.3.5/test/fixtures/dialects/hive/drop_table.yml000066400000000000000000000014051451700765000235610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1907fbb08c02a5acfea04c7cee28cc55bb58d4b259c93a1b66f841f33aee718c file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: exists - table_reference: naked_identifier: bar - keyword: PURGE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_into_table.sql000066400000000000000000000001231451700765000251440ustar00rootroot00000000000000INSERT INTO TABLE foo SELECT a, b FROM bar; INSERT INTO foo SELECT a, b FROM bar; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_into_table.yml000066400000000000000000000033031451700765000251510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2a26381ae66bfbdbfbac6d6ec79085122f726a8b22522b87c638c76e5bc8fc68 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: foo - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_into_table_partition.sql000066400000000000000000000001311451700765000272340ustar00rootroot00000000000000INSERT INTO TABLE foo PARTITION (a='test_foo', b='test_bar') SELECT a, b, c, d FROM bar; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_into_table_partition.yml000066400000000000000000000033141451700765000272440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 586b862acdc2d1965587b0ccfdbd6113f67c2b7a8b93b33c7284fd6a195297c5 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_foo'" - comma: ',' - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_bar'" - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_into_table_values.sql000066400000000000000000000000751451700765000265310ustar00rootroot00000000000000INSERT INTO TABLE foo VALUES ('foo', 'bar'), ('rab', 'oof'); sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_into_table_values.yml000066400000000000000000000016731451700765000265400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3fccc06426db971be25d14f3a419e1e6b06eaf6c617d7104c65d13abbccd51f8 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: foo - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - quoted_literal: "'rab'" - comma: ',' - quoted_literal: "'oof'" - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_overwrite_directory.sql000066400000000000000000000000771451700765000271460ustar00rootroot00000000000000INSERT OVERWRITE LOCAL DIRECTORY '/path' SELECT a, b FROM foo; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_overwrite_directory.yml000066400000000000000000000020721451700765000271450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4104e48f686af7243fb0baa28f274a99d313d6a28d408db7c2379e970b52638b file: statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: LOCAL - keyword: DIRECTORY - quoted_literal: "'/path'" - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_overwrite_table.sql000066400000000000000000000000611451700765000262220ustar00rootroot00000000000000INSERT OVERWRITE TABLE foo SELECT a, b FROM bar; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_overwrite_table.yml000066400000000000000000000020661451700765000262330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9222db3adb9b7864b9638c8ffe6ec8858d02af5032632400b1f43d3fc785f410 file: statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: naked_identifier: foo - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_overwrite_table_dynamic_partition.sql000066400000000000000000000003301451700765000320160ustar00rootroot00000000000000INSERT OVERWRITE TABLE foo PARTITION (a = 'test_foo', b) IF NOT EXISTS SELECT a, 'test_bar' AS b FROM bar; INSERT OVERWRITE TABLE foo PARTITION (a, b) IF NOT EXISTS SELECT 'test_foo' AS a, 'test_bar' AS b FROM bar; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_overwrite_table_dynamic_partition.yml000066400000000000000000000051041451700765000320240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 202beaad04e718b7b9b006acd5376ec0ca72c8d7eaf0677716fd721f5a86e6ef file: - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_foo'" - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: IF - keyword: NOT - keyword: EXISTS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: quoted_literal: "'test_bar'" alias_expression: keyword: AS naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: IF - keyword: NOT - keyword: EXISTS - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'test_foo'" alias_expression: keyword: AS naked_identifier: a - comma: ',' - select_clause_element: quoted_literal: "'test_bar'" alias_expression: keyword: AS naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_overwrite_table_partition.sql000066400000000000000000000001541451700765000303160ustar00rootroot00000000000000INSERT OVERWRITE TABLE foo PARTITION (a='test_foo', b='test_bar') IF NOT EXISTS SELECT a, b, c, d FROM bar; sqlfluff-2.3.5/test/fixtures/dialects/hive/insert_overwrite_table_partition.yml000066400000000000000000000034141451700765000303220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7991628915770c1bebd1f6cd929c6294bac7ee9787c46926500e4e5a7de11c7e file: statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: naked_identifier: foo - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_foo'" - comma: ',' - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_bar'" - end_bracket: ) - keyword: IF - keyword: NOT - keyword: EXISTS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/msck_repair_table.sql000066400000000000000000000006151451700765000251140ustar00rootroot00000000000000-- REPAIR TABLE with all optional syntax MSCK REPAIR TABLE table_identifier ADD PARTITIONS; MSCK REPAIR TABLE table_identifier DROP PARTITIONS; MSCK REPAIR TABLE table_identifier SYNC PARTITIONS; -- REPAIR TABLE with no optional syntax MSCK REPAIR TABLE table_identifier; -- run MSCK REPAIR TABLE to recovers all the partitions MSCK REPAIR TABLE t1; MSCK REPAIR TABLE emp_part DROP PARTITIONS; sqlfluff-2.3.5/test/fixtures/dialects/hive/msck_repair_table.yml000066400000000000000000000033411451700765000251150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 04d14147ab5e6a91ad4684a54a2eb40029b7ff458e480c86896bc441e03b0e01 file: - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: ADD - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: SYNC - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: t1 - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: emp_part - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/msck_table.sql000066400000000000000000000005431451700765000235520ustar00rootroot00000000000000-- REPAIR TABLE with all optional syntax MSCK TABLE table_identifier ADD PARTITIONS; MSCK TABLE table_identifier DROP PARTITIONS; MSCK TABLE table_identifier SYNC PARTITIONS; -- REPAIR TABLE with no optional syntax MSCK TABLE table_identifier; -- run MSCK REPAIR TABLE to recovers all the partitions MSCK TABLE t1; MSCK TABLE emp_part DROP PARTITIONS; sqlfluff-2.3.5/test/fixtures/dialects/hive/msck_table.yml000066400000000000000000000030631451700765000235540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2453ec227853be4f8cd172f70bedf92bdc447d41ed96b716b1aa01e5dc191bb3 file: - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: ADD - keyword: PARTITIONS - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: SYNC - keyword: PARTITIONS - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: table_identifier - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: t1 - statement_terminator: ; - statement: msck_table_statement: - keyword: MSCK - keyword: TABLE - table_reference: naked_identifier: emp_part - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/quoted_literal.sql000066400000000000000000000001571451700765000244640ustar00rootroot00000000000000SELECT result, `timestamp` as `timestamp` FROM student WHERE name = "John Smith" OR name = 'Jane Doe'; sqlfluff-2.3.5/test/fixtures/dialects/hive/quoted_literal.yml000066400000000000000000000026261451700765000244710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7e00d6e76ba083393dd9dac558bd15b8a5b991e76c8012ff290b745c84fd3db file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: result - comma: ',' - select_clause_element: quoted_literal: '`timestamp`' alias_expression: keyword: as quoted_identifier: '`timestamp`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"John Smith"' - binary_operator: OR - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Jane Doe'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_cast.sql000066400000000000000000000001701451700765000237330ustar00rootroot00000000000000select cast(row(col1, col2) as row(a bigint, b decimal(23, 2))) from sch.tbl; select cast(a as json) from sch.tbl; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_cast.yml000066400000000000000000000055741451700765000237520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ffab9520bca3181f57fc2f9cd06f404e2fcf543b9939c8e11725b69357e14c6 file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: function: - function_name: row - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: as - keyword: row - bracketed: - start_bracket: ( - naked_identifier: a - data_type: primitive_type: keyword: bigint - comma: ',' - naked_identifier: b - data_type: primitive_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '23' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: a keyword: as data_type: primitive_type: keyword: json end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch - dot: . - naked_identifier: tbl - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_cluster_distribute_sort_by.sql000066400000000000000000000006331451700765000304650ustar00rootroot00000000000000SELECT col1, col2 FROM t1 ORDER BY col1; SELECT col1, col2 FROM t1 CLUSTER BY col1; SELECT col1, col2 FROM t1 ORDER BY col1 CLUSTER BY col1; SELECT key, value FROM src SORT BY key ASC, value DESC; SELECT col1, col2 FROM t1 DISTRIBUTE BY col1; SELECT col1, col2 FROM t1 DISTRIBUTE BY col1 SORT BY col1 ASC, col2 DESC; SELECT col1, col2 FROM t1 ORDER BY col1 DISTRIBUTE BY col1 SORT BY col1 ASC, col2 DESC; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_cluster_distribute_sort_by.yml000066400000000000000000000131521451700765000304670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 687d8bd77aea2b7099c821762f7a783d5e459ac8c2f220f1d7856a73a89b1124 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 clusterby_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 clusterby_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: key - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src sortby_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: key - keyword: ASC - comma: ',' - column_reference: naked_identifier: value - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 distributeby_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 distributeby_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: col1 sortby_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: col1 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: col1 distributeby_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: col1 sortby_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: col1 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_interval.sql000066400000000000000000000013611451700765000246300ustar00rootroot00000000000000SELECT current_date + INTERVAL '2' DAY; SELECT current_date - INTERVAL '1' DAY AS yesterday; SELECT current_date + INTERVAL '12' HOUR; -- These examples are from: -- https://cwiki.apache.org/confluence/display/hive/languagemanual+types#LanguageManualTypes-Intervals SELECT INTERVAL '1' DAY; SELECT INTERVAL '1-2' YEAR TO MONTH; SELECT INTERVAL '1' YEAR + INTERVAL '2' MONTH; SELECT INTERVAL '1 2:3:4.000005' DAY; SELECT INTERVAL '1' DAY+ INTERVAL '2' HOUR + INTERVAL '3' MINUTE + INTERVAL '4' SECOND + INTERVAL '5' NANO; SELECT INTERVAL 1 DAY; SELECT INTERVAL (1+dt) DAY; SELECT 1 DAY; SELECT INTERVAL 1 DAY; SELECT '1-2' YEAR TO MONTH; SELECT INTERVAL '1-2' YEARS TO MONTH; SELECT 2 SECONDS; SELECT 2 SECOND; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_interval.yml000066400000000000000000000137211451700765000246350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c322d8cc3fd616da40547b7098c14333a8fef354726448120c756ca563fddf2b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: current_date binary_operator: + interval_expression: keyword: INTERVAL quoted_literal: "'2'" date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: current_date binary_operator: '-' interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: DAY alias_expression: keyword: AS naked_identifier: yesterday - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: current_date binary_operator: + interval_expression: keyword: INTERVAL quoted_literal: "'12'" date_part: HOUR - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'1-2'" - date_part: YEAR - keyword: TO - date_part: MONTH - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: YEAR - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'2'" date_part: MONTH - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL quoted_literal: "'1 2:3:4.000005'" date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - interval_expression: keyword: INTERVAL quoted_literal: "'1'" date_part: DAY - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'2'" date_part: HOUR - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'3'" date_part: MINUTE - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'4'" date_part: SECOND - binary_operator: + - interval_expression: keyword: INTERVAL quoted_literal: "'5'" date_part: NANO - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL bracketed: start_bracket: ( expression: numeric_literal: '1' binary_operator: + column_reference: naked_identifier: dt end_bracket: ) date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: numeric_literal: '1' date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL numeric_literal: '1' date_part: DAY - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - quoted_literal: "'1-2'" - date_part: YEAR - keyword: TO - date_part: MONTH - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - quoted_literal: "'1-2'" - date_part: YEARS - keyword: TO - date_part: MONTH - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: numeric_literal: '2' date_part: SECONDS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: numeric_literal: '2' date_part: SECOND - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_lateral_view.sql000066400000000000000000000010751451700765000254640ustar00rootroot00000000000000SELECT pageid, adid FROM pageAds LATERAL VIEW explode(adid_list) adTable AS adid; SELECT adid, count(1) FROM pageAds LATERAL VIEW explode(adid_list) adTable AS adid GROUP BY adid; SELECT * FROM exampleTable LATERAL VIEW explode(col1) myTable1 AS myCol1 LATERAL VIEW explode(myCol1) myTable2 AS myCol2; SELECT myCol1, myCol2 FROM baseTable LATERAL VIEW explode(col1) myTable1 AS myCol1 LATERAL VIEW explode(col2) myTable2 AS myCol2; SELECT * FROM src LATERAL VIEW explode(array()) C AS a limit 10; SELECT * FROM src LATERAL VIEW OUTER explode(array()) C AS a limit 10; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_lateral_view.yml000066400000000000000000000176501451700765000254740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42595fd8445bfd6551382c053b6f52b96f0690bd47979028dff4cfd3d862add3 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: pageid - comma: ',' - select_clause_element: column_reference: naked_identifier: adid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pageAds lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: column_reference: naked_identifier: adid_list end_bracket: ) - naked_identifier: adTable - keyword: AS - naked_identifier: adid - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: adid - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pageAds lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: column_reference: naked_identifier: adid_list end_bracket: ) - naked_identifier: adTable - keyword: AS - naked_identifier: adid groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: adid - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: exampleTable - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) - naked_identifier: myTable1 - keyword: AS - naked_identifier: myCol1 - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: column_reference: naked_identifier: myCol1 end_bracket: ) - naked_identifier: myTable2 - keyword: AS - naked_identifier: myCol2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: myCol1 - comma: ',' - select_clause_element: column_reference: naked_identifier: myCol2 from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: baseTable - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 end_bracket: ) - naked_identifier: myTable1 - keyword: AS - naked_identifier: myCol1 - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: column_reference: naked_identifier: col2 end_bracket: ) - naked_identifier: myTable2 - keyword: AS - naked_identifier: myCol2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: C - keyword: AS - naked_identifier: a limit_clause: keyword: limit numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: C - keyword: AS - naked_identifier: a limit_clause: keyword: limit numeric_literal: '10' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_left_semi_join.sql000066400000000000000000000000771451700765000257750ustar00rootroot00000000000000SELECT a.key, a.val FROM a LEFT SEMI JOIN b ON (a.key = b.key) sqlfluff-2.3.5/test/fixtures/dialects/hive/select_left_semi_join.yml000066400000000000000000000033561451700765000260020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8d293e9a5291804e22108e9862a1832a4a92dce82c84efadf59d77bdcd5fe03 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: key - comma: ',' - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: val from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: key end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/hive/select_regexp.sql000066400000000000000000000005741451700765000243030ustar00rootroot00000000000000SELECT * FROM example WHERE url REGEXP '(/click)'; SELECT *, url REGEXP '(/click)' FROM example; SELECT * FROM example WHERE url IREGEXP '(/click)'; SELECT *, url IREGEXP '(/click)' FROM example; SELECT * FROM example WHERE url RLIKE '(/click)'; SELECT *, url RLIKE '(/click)' FROM example; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_regexp.yml000066400000000000000000000100771451700765000243040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 31d8fa7eb7f89b69c0f2ca215a958826d9519672935d6a042d73d401c5463b03 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example where_clause: keyword: WHERE expression: column_reference: naked_identifier: url keyword: REGEXP quoted_literal: "'(/click)'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: url keyword: REGEXP quoted_literal: "'(/click)'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example where_clause: keyword: WHERE expression: column_reference: naked_identifier: url keyword: IREGEXP quoted_literal: "'(/click)'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: url keyword: IREGEXP quoted_literal: "'(/click)'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example where_clause: keyword: WHERE expression: column_reference: naked_identifier: url keyword: RLIKE quoted_literal: "'(/click)'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: url keyword: RLIKE quoted_literal: "'(/click)'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/select_sampling_table.sql000066400000000000000000000004151451700765000257640ustar00rootroot00000000000000SELECT * FROM source TABLESAMPLE(BUCKET 3 OUT OF 32 ON id) s; SELECT * FROM source TABLESAMPLE(BUCKET 3 OUT OF 32 ON rand()) s; SELECT * FROM source TABLESAMPLE(0.1 PERCENT) AS s; SELECT * FROM source TABLESAMPLE(100M) s; SELECT * FROM source TABLESAMPLE(10 ROWS); sqlfluff-2.3.5/test/fixtures/dialects/hive/select_sampling_table.yml000066400000000000000000000105131451700765000257660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a373b0698e2abe00ac058b9cd8c923627b6e3c5fe89147f61a360e36271a4963 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '3' - keyword: OUT - keyword: OF - numeric_literal: '32' - keyword: 'ON' - naked_identifier: id - end_bracket: ) alias_expression: naked_identifier: s - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '3' - keyword: OUT - keyword: OF - numeric_literal: '32' - keyword: 'ON' - function: function_name: function_name_identifier: rand bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) alias_expression: naked_identifier: s - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '0.1' keyword: PERCENT end_bracket: ) alias_expression: keyword: AS naked_identifier: s - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( byte_length_literal: 100M end_bracket: ) alias_expression: naked_identifier: s - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '10' keyword: ROWS end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/set.sql000066400000000000000000000001511451700765000222340ustar00rootroot00000000000000set; set -v; set foo = 2; set foo = 'bar'; set hivevar:cat="Chloe"; set mapreduce.reduce.memory.mb=12000;sqlfluff-2.3.5/test/fixtures/dialects/hive/set.yml000066400000000000000000000026011451700765000222400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13f747fc5ac1c7705c740c0f2e0df1f6c0d06c41ee6c4cfe2f5d2177415d625d file: - statement: set_statement: keyword: set - statement_terminator: ; - statement: set_statement: keyword: set option_indicator: '-' option: v - statement_terminator: ; - statement: set_statement: keyword: set parameter: foo raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: set_statement: keyword: set parameter: foo raw_comparison_operator: '=' quoted_literal: "'bar'" - statement_terminator: ; - statement: set_statement: - keyword: set - parameter: hivevar - colon_delimiter: ':' - parameter: cat - raw_comparison_operator: '=' - quoted_literal: '"Chloe"' - statement_terminator: ; - statement: set_statement: - keyword: set - parameter: mapreduce - dot: . - parameter: reduce - dot: . - parameter: memory - dot: . - parameter: mb - raw_comparison_operator: '=' - numeric_literal: '12000' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/truncate_table.sql000066400000000000000000000000701451700765000244350ustar00rootroot00000000000000TRUNCATE TABLE foo; TRUNCATE bar PARTITION (col='val');sqlfluff-2.3.5/test/fixtures/dialects/hive/truncate_table.yml000066400000000000000000000016471451700765000244520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bbaa4a03f49727439bf083228d7f2b6949e04930c348c175f8a43f85c5fb078c file: - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: foo - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bar - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '=' quoted_literal: "'val'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/hive/use.sql000066400000000000000000000000071451700765000222350ustar00rootroot00000000000000USE db;sqlfluff-2.3.5/test/fixtures/dialects/hive/use.yml000066400000000000000000000010161451700765000222400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1247f4ff8e352edcb3de7913a509af396d6c2bdcdfad9481ebf698f8b319e7a4 file: statement: use_statement: keyword: USE database_reference: naked_identifier: db statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/000077500000000000000000000000001451700765000222765ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/materialize/.sqlfluff000066400000000000000000000000411451700765000241140ustar00rootroot00000000000000[sqlfluff] dialect = materialize sqlfluff-2.3.5/test/fixtures/dialects/materialize/alter_statements.sql000066400000000000000000000015161451700765000264000ustar00rootroot00000000000000 -- Alter connection rotate keys ALTER CONNECTION test rotate keys; -- Alter default privileges ALTER DEFAULT PRIVILEGES FOR ROLE mike GRANT SELECT ON TABLES TO joe; ALTER DEFAULT PRIVILEGES FOR ALL ROLES GRANT SELECT ON TABLES TO managers; -- Alter name ALTER CONNECTION test RENAME TO test2; ALTER INDEX test RENAME TO test2; ALTER MATERIALIZED VIEW test RENAME TO test2; ALTER SOURCE test RENAME TO test2; ALTER SINK test RENAME TO test2; ALTER TABLE test RENAME TO test2; ALTER VIEW test RENAME TO test2; ALTER SECRET test RENAME TO test2; -- Alter index enable ALTER INDEX test_idx SET ENABLED; -- Alter secret value ALTER SECRET IF EXISTS name AS value; ALTER SECRET name AS value; -- Alter Sink size ALTER SOURCE IF EXISTS sink_name SET ( SIZE 'xsmall' ); -- Alter Source size ALTER SINK IF EXISTS source_name SET ( SIZE 'xsmall' ); sqlfluff-2.3.5/test/fixtures/dialects/materialize/alter_statements.yml000066400000000000000000000114031451700765000263760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0b4b5f77bd55d4361375bd19915c13f6a9f0ee98744911bca5a3fc39be5afebc file: - statement: alter_connection_rotate_keys: - keyword: ALTER - keyword: CONNECTION - object_reference: naked_identifier: test - keyword: rotate - keyword: keys - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: mike - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: TABLES - keyword: TO - object_reference: naked_identifier: joe - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ALL - keyword: ROLES - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: TABLES - keyword: TO - object_reference: naked_identifier: managers - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: CONNECTION - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: SOURCE - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: SINK - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: test - keyword: RENAME - keyword: TO - table_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: VIEW - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_rename_statement: - keyword: ALTER - keyword: SECRET - object_reference: naked_identifier: test - keyword: RENAME - keyword: TO - object_reference: naked_identifier: test2 - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: SET - keyword: ENABLED - statement_terminator: ; - statement: alter_secret_statement: - keyword: ALTER - keyword: SECRET - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: AS - word: value - statement_terminator: ; - statement: alter_secret_statement: - keyword: ALTER - keyword: SECRET - object_reference: naked_identifier: name - keyword: AS - word: value - statement_terminator: ; - statement: alter_source_sink_size_statement: - keyword: ALTER - keyword: SOURCE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: sink_name - keyword: SET - bracketed: start_bracket: ( keyword: SIZE compression_type: "'xsmall'" end_bracket: ) - statement_terminator: ; - statement: alter_source_sink_size_statement: - keyword: ALTER - keyword: SINK - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: source_name - keyword: SET - bracketed: start_bracket: ( keyword: SIZE compression_type: "'xsmall'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/begin_close_statements.sql000066400000000000000000000000701451700765000275340ustar00rootroot00000000000000 BEGIN; CLOSE my_cursor; END; CLOSE CURSOR; COMMIT; sqlfluff-2.3.5/test/fixtures/dialects/materialize/begin_close_statements.yml000066400000000000000000000016451451700765000275470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0de000b9f06ec8d81805df41727cab47fcc79f2671969f20c7365d5d379ee2ae file: - statement: transaction_statement: keyword: BEGIN - statement_terminator: ; - statement: close_statement: keyword: CLOSE object_reference: naked_identifier: my_cursor - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: ; - statement: close_statement: keyword: CLOSE object_reference: naked_identifier: CURSOR - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/copy_to_from_statements.sql000066400000000000000000000010071451700765000277630ustar00rootroot00000000000000 COPY (SELECT * FROM t1 WHERE value < 100) TO STDOUT; COPY (SELECT * FROM t1 WHERE value < 100) TO STDOUT WITH (FORMAT binary); COPY (SUBSCRIBE some_view) TO STDOUT; COPY (SUBSCRIBE some_view) TO STDOUT WITH (FORMAT binary); COPY (VALUES (1, '2'), (3, '4'), (5, '\\\t\n\rtest\\N'), (6, NULL) ORDER BY column1) TO STDOUT; COPY t FROM STDIN; COPY t FROM STDIN WITH (FORMAT CSV, DELIMITER '!', QUOTE '!'); COPY t FROM STDIN WITH (DELIMITER '|'); COPY t FROM STDIN (FORMAT CSV); COPY t FROM STDIN (DELIMITER '|'); sqlfluff-2.3.5/test/fixtures/dialects/materialize/copy_to_from_statements.yml000066400000000000000000000125021451700765000277670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0968790cb56470e98c103f4209f96d0374b6d1aa6802cfb77e19bd97512c6dd8 file: - statement: copy_to_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: value comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_to_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: value comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - word: FORMAT - word: binary - end_bracket: ) - statement_terminator: ; - statement: copy_to_statement: - keyword: COPY - bracketed: start_bracket: ( keyword: SUBSCRIBE object_reference: naked_identifier: some_view end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_to_statement: - keyword: COPY - bracketed: start_bracket: ( keyword: SUBSCRIBE object_reference: naked_identifier: some_view end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - word: FORMAT - word: binary - end_bracket: ) - statement_terminator: ; - statement: copy_to_statement: - keyword: COPY - bracketed: - start_bracket: ( - keyword: VALUES - bracketed: start_bracket: ( numeric_literal: '1' comma: ',' single_quote: "'2'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( numeric_literal: '3' comma: ',' single_quote: "'4'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( numeric_literal: '5' comma: ',' single_quote: "'\\\\\\t\\n\\rtest\\\\N'" end_bracket: ) - comma: ',' - bracketed: start_bracket: ( numeric_literal: '6' comma: ',' word: 'NULL' end_bracket: ) - word: ORDER - word: BY - word: column1 - end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - word: FORMAT - word: CSV - comma: ',' - word: DELIMITER - single_quote: "'!'" - comma: ',' - word: QUOTE - single_quote: "'!'" - end_bracket: ) - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: start_bracket: ( word: DELIMITER single_quote: "'|'" end_bracket: ) - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - bracketed: - start_bracket: ( - word: FORMAT - word: CSV - end_bracket: ) - statement_terminator: ; - statement: copy_from_statement: - keyword: COPY - object_reference: naked_identifier: t - keyword: FROM - keyword: STDIN - bracketed: start_bracket: ( word: DELIMITER single_quote: "'|'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_cluster_replica_statements.sql000066400000000000000000000007341451700765000317750ustar00rootroot00000000000000 CREATE CLUSTER mz_joe REPLICAS (r1 (size '1')); CREATE CLUSTER mz_joe REPLICAS (r1 (size '1'), r2 (size '1')); CREATE CLUSTER c1 SIZE = 'medium', REPLICATION FACTOR = 2; CREATE CLUSTER c SIZE = 'xsmall', INTROSPECTION INTERVAL = 0; CREATE CLUSTER c1 SIZE 'xsmall', REPLICATION FACTOR = 0; CREATE CLUSTER REPLICA default.size_1 SIZE 'large'; CREATE CLUSTER REPLICA c1.r1 SIZE = 'medium'; CREATE CLUSTER REPLICA default.replica AVAILABILITY ZONE 'a', AVAILABILITY ZONE 'b'; sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_cluster_replica_statements.yml000066400000000000000000000063171451700765000320020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7e3e79b049fa84d6e57eb5f7b994e1718aa357e4f550141e6041a4185ad9145f file: - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: mz_joe - keyword: REPLICAS - bracketed: start_bracket: ( word: r1 bracketed: start_bracket: ( word: size single_quote: "'1'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: mz_joe - keyword: REPLICAS - bracketed: - start_bracket: ( - word: r1 - bracketed: start_bracket: ( word: size single_quote: "'1'" end_bracket: ) - comma: ',' - word: r2 - bracketed: start_bracket: ( word: size single_quote: "'1'" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: c1 - word: SIZE - equals: '=' - single_quote: "'medium'" - comma: ',' - word: REPLICATION - word: FACTOR - equals: '=' - numeric_literal: '2' - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: c - word: SIZE - equals: '=' - single_quote: "'xsmall'" - comma: ',' - word: INTROSPECTION - word: INTERVAL - equals: '=' - numeric_literal: '0' - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: c1 - word: SIZE - single_quote: "'xsmall'" - comma: ',' - word: REPLICATION - word: FACTOR - equals: '=' - numeric_literal: '0' - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: REPLICA - word: default - dot: . - word: size_1 - word: SIZE - single_quote: "'large'" - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: REPLICA - word: c1 - dot: . - word: r1 - word: SIZE - equals: '=' - single_quote: "'medium'" - statement_terminator: ; - statement: create_cluster_statement: - keyword: CREATE - keyword: CLUSTER - object_reference: naked_identifier: REPLICA - word: default - dot: . - word: replica - word: AVAILABILITY - word: ZONE - single_quote: "'a'" - comma: ',' - word: AVAILABILITY - word: ZONE - single_quote: "'b'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_connection_statement.sql000066400000000000000000000031261451700765000305670ustar00rootroot00000000000000 CREATE SECRET IF NOT EXISTS name AS value; CREATE SECRET name AS value; CREATE CONNECTION privatelink_svc TO AWS PRIVATELINK ( SERVICE NAME 'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc', AVAILABILITY ZONES ('use1-az1', 'use1-az4') ); CREATE CONNECTION csr_ssl TO CONFLUENT SCHEMA REGISTRY ( URL 'https://rp-f00000bar.data.vectorized.cloud:30993', SSL KEY = SECRET csr_ssl_key, SSL CERTIFICATE = SECRET csr_ssl_crt, USERNAME = 'foo', PASSWORD = SECRET csr_password ); CREATE CONNECTION privatelink_svc TO AWS PRIVATELINK ( SERVICE NAME 'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc', AVAILABILITY ZONES ('use1-az1', 'use1-az4') ); CREATE CONNECTION csr_privatelink TO CONFLUENT SCHEMA REGISTRY ( URL 'http://my-confluent-schema-registry:8081', AWS PRIVATELINK privatelink_svc ); CREATE CONNECTION kafka_connection TO KAFKA ( BROKER 'rp-f00000bar.data.vectorized.cloud:30365', SSL KEY = SECRET kafka_ssl_key, SSL CERTIFICATE = SECRET kafka_ssl_crt ); CREATE CONNECTION kafka_connection TO KAFKA ( BROKERS ('broker1:9092', 'broker2:9092') ); CREATE CONNECTION pg_connection TO POSTGRES ( HOST 'instance.foo000.us-west-1.rds.amazonaws.com', PORT 5432, USER 'postgres', PASSWORD SECRET pgpass, SSL MODE 'require', DATABASE 'postgres' ); CREATE CONNECTION tunnel TO SSH TUNNEL ( HOST 'bastion-host', PORT 22, USER 'materialize', ); CREATE CONNECTION pg_connection TO POSTGRES ( HOST 'instance.foo000.us-west-1.rds.amazonaws.com', PORT 5432, SSH TUNNEL tunnel, DATABASE 'postgres' ); sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_connection_statement.yml000066400000000000000000000147341451700765000306000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8feb4d3ab52a9feac513436ec5190bdea7f8c3ad906dc692cd4a8f93bd8dcab1 file: - statement: create_secret_statement: - keyword: CREATE - keyword: SECRET - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: name - keyword: AS - word: value - statement_terminator: ; - statement: create_secret_statement: - keyword: CREATE - keyword: SECRET - object_reference: naked_identifier: name - keyword: AS - word: value - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: privatelink_svc - keyword: TO - keyword: AWS - keyword: PRIVATELINK - bracketed: - start_bracket: ( - word: SERVICE - word: NAME - single_quote: "'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc'" - comma: ',' - word: AVAILABILITY - word: ZONES - bracketed: - start_bracket: ( - single_quote: "'use1-az1'" - comma: ',' - single_quote: "'use1-az4'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: csr_ssl - keyword: TO - keyword: CONFLUENT - keyword: SCHEMA - keyword: REGISTRY - bracketed: - start_bracket: ( - word: URL - single_quote: "'https://rp-f00000bar.data.vectorized.cloud:30993'" - comma: ',' - word: SSL - word: KEY - equals: '=' - word: SECRET - word: csr_ssl_key - comma: ',' - word: SSL - word: CERTIFICATE - equals: '=' - word: SECRET - word: csr_ssl_crt - comma: ',' - word: USERNAME - equals: '=' - single_quote: "'foo'" - comma: ',' - word: PASSWORD - equals: '=' - word: SECRET - word: csr_password - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: privatelink_svc - keyword: TO - keyword: AWS - keyword: PRIVATELINK - bracketed: - start_bracket: ( - word: SERVICE - word: NAME - single_quote: "'com.amazonaws.vpce.us-east-1.vpce-svc-0e123abc123198abc'" - comma: ',' - word: AVAILABILITY - word: ZONES - bracketed: - start_bracket: ( - single_quote: "'use1-az1'" - comma: ',' - single_quote: "'use1-az4'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: csr_privatelink - keyword: TO - keyword: CONFLUENT - keyword: SCHEMA - keyword: REGISTRY - bracketed: - start_bracket: ( - word: URL - single_quote: "'http://my-confluent-schema-registry:8081'" - comma: ',' - word: AWS - word: PRIVATELINK - word: privatelink_svc - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - keyword: TO - keyword: KAFKA - bracketed: - start_bracket: ( - word: BROKER - single_quote: "'rp-f00000bar.data.vectorized.cloud:30365'" - comma: ',' - word: SSL - word: KEY - equals: '=' - word: SECRET - word: kafka_ssl_key - comma: ',' - word: SSL - word: CERTIFICATE - equals: '=' - word: SECRET - word: kafka_ssl_crt - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - keyword: TO - keyword: KAFKA - bracketed: start_bracket: ( word: BROKERS bracketed: - start_bracket: ( - single_quote: "'broker1:9092'" - comma: ',' - single_quote: "'broker2:9092'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - keyword: TO - keyword: POSTGRES - bracketed: - start_bracket: ( - word: HOST - single_quote: "'instance.foo000.us-west-1.rds.amazonaws.com'" - comma: ',' - word: PORT - numeric_literal: '5432' - comma: ',' - word: USER - single_quote: "'postgres'" - comma: ',' - word: PASSWORD - word: SECRET - word: pgpass - comma: ',' - word: SSL - word: MODE - single_quote: "'require'" - comma: ',' - word: DATABASE - single_quote: "'postgres'" - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: tunnel - keyword: TO - keyword: SSH - keyword: TUNNEL - bracketed: - start_bracket: ( - word: HOST - single_quote: "'bastion-host'" - comma: ',' - word: PORT - numeric_literal: '22' - comma: ',' - word: USER - single_quote: "'materialize'" - comma: ',' - end_bracket: ) - statement_terminator: ; - statement: create_connection_statement: - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - keyword: TO - keyword: POSTGRES - bracketed: - start_bracket: ( - word: HOST - single_quote: "'instance.foo000.us-west-1.rds.amazonaws.com'" - comma: ',' - word: PORT - numeric_literal: '5432' - comma: ',' - word: SSH - word: TUNNEL - word: tunnel - comma: ',' - word: DATABASE - single_quote: "'postgres'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_index.sql000066400000000000000000000003001451700765000254420ustar00rootroot00000000000000 CREATE INDEX active_customers_geo_idx ON active_customers (geo_id); CREATE INDEX active_customers_exp_idx ON active_customers (upper(guid)); CREATE INDEX i2 IN CLUSTER cluster2 ON t1 (f1); sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_index.yml000066400000000000000000000031231451700765000254520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d5e8e3fb676ceada06a3127eb1fc1be40b9896c5b0f0dd82f01043d3ee2f6d3 file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - object_reference: naked_identifier: active_customers_geo_idx - keyword: 'ON' - object_reference: naked_identifier: active_customers - bracketed: start_bracket: ( word: geo_id end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - object_reference: naked_identifier: active_customers_exp_idx - keyword: 'ON' - object_reference: naked_identifier: active_customers - bracketed: start_bracket: ( word: upper bracketed: start_bracket: ( word: guid end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - object_reference: naked_identifier: i2 - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: cluster2 - keyword: 'ON' - object_reference: naked_identifier: t1 - bracketed: start_bracket: ( word: f1 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_sink_statements.sql000066400000000000000000000010141451700765000275510ustar00rootroot00000000000000CREATE SINK quotes_sink FROM quotes INTO KAFKA CONNECTION kafka_connection (TOPIC 'quotes-sink') FORMAT JSON ENVELOPE DEBEZIUM WITH (SIZE = '3xsmall'); CREATE SINK frank_quotes_sink FROM frank_quotes INTO KAFKA CONNECTION kafka_connection (TOPIC 'frank-quotes-sink') FORMAT JSON ENVELOPE DEBEZIUM WITH (SIZE = '3xsmall'); CREATE SINK frank_quotes_cluster IN CLUSTER my_cluster FROM frank_quotes INTO KAFKA CONNECTION kafka_connection (TOPIC 'frank-quotes-sink') FORMAT JSON ENVELOPE DEBEZIUM;sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_sink_statements.yml000066400000000000000000000046551451700765000275710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fffc00dca6123acc63abc6ba2388540b602a3c2210442ca659b4bb8fae7dbda6 file: - statement: create_sink_kafka_statement: - keyword: CREATE - keyword: SINK - object_reference: naked_identifier: quotes_sink - keyword: FROM - object_reference: naked_identifier: quotes - keyword: INTO - word: KAFKA - word: CONNECTION - word: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'quotes-sink'" end_bracket: ) - word: FORMAT - word: JSON - word: ENVELOPE - word: DEBEZIUM - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_sink_kafka_statement: - keyword: CREATE - keyword: SINK - object_reference: naked_identifier: frank_quotes_sink - keyword: FROM - object_reference: naked_identifier: frank_quotes - keyword: INTO - word: KAFKA - word: CONNECTION - word: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'frank-quotes-sink'" end_bracket: ) - word: FORMAT - word: JSON - word: ENVELOPE - word: DEBEZIUM - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_sink_kafka_statement: - keyword: CREATE - keyword: SINK - object_reference: naked_identifier: frank_quotes_cluster - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - object_reference: naked_identifier: frank_quotes - keyword: INTO - word: KAFKA - word: CONNECTION - word: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'frank-quotes-sink'" end_bracket: ) - word: FORMAT - word: JSON - word: ENVELOPE - word: DEBEZIUM - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_source_statements.sql000066400000000000000000000057411451700765000301200ustar00rootroot00000000000000 CREATE SOURCE avro_source FROM KAFKA CONNECTION kafka_connection (TOPIC 'test_topic') FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_connection WITH (SIZE = '3xsmall'); CREATE VIEW jsonified_kafka_source AS SELECT data->>'field1' AS field_1, data->>'field2' AS field_2, data->>'field3' AS field_3 FROM (SELECT CONVERT_FROM(data, 'utf8')::jsonb AS data FROM json_source); CREATE SOURCE proto_source FROM KAFKA CONNECTION kafka_connection (TOPIC 'test_topic') FORMAT PROTOBUF USING CONFLUENT SCHEMA REGISTRY CONNECTION csr_connection WITH (SIZE = '3xsmall'); CREATE SOURCE text_source FROM KAFKA CONNECTION kafka_connection (TOPIC 'test_topic') FORMAT TEXT ENVELOPE UPSERT WITH (SIZE = '3xsmall'); CREATE SOURCE csv_source (col_foo, col_bar, col_baz) FROM KAFKA CONNECTION kafka_connection (TOPIC 'test_topic') FORMAT CSV WITH 3 COLUMNS WITH (SIZE = '3xsmall'); CREATE SOURCE auction_house FROM LOAD GENERATOR AUCTION FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE marketing FROM LOAD GENERATOR MARKETING (SCALE FACTOR 1) FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE marketing IN CLUSTER my_cluster FROM LOAD GENERATOR MARKETING FOR ALL TABLES; CREATE SOURCE tpch FROM LOAD GENERATOR TPCH (SCALE FACTOR 1) FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE counter FROM LOAD GENERATOR COUNTER WITH (SIZE = '3xsmall'); CREATE SOURCE mz_source FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'mz_source') FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE mz_source IN CLUSTER my_cluster FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'mz_source') FOR ALL TABLES; CREATE SOURCE mz_source FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'mz_source') FOR TABLES (table_1, table_2 AS alias_table_2) WITH (SIZE = '3xsmall'); CREATE SOURCE mz_source FROM POSTGRES CONNECTION pg_connection ( PUBLICATION 'mz_source', TEXT COLUMNS (table.column_of_unsupported_type) ) FOR ALL TABLES WITH (SIZE = '3xsmall'); CREATE SOURCE mz_source FROM POSTGRES CONNECTION pg_connection (PUBLICATION 'mz_source') WITH (SIZE = '3xsmall'); CREATE SOURCE my_webhook_source IN CLUSTER my_cluster FROM WEBHOOK BODY FORMAT JSON INCLUDE HEADERS ( NOT 'authorization', NOT 'x-api-key' ); CREATE SOURCE my_webhook_source IN CLUSTER my_cluster FROM WEBHOOK BODY FORMAT JSON CHECK ( WITH ( HEADERS, BODY AS request_body, SECRET my_webhook_shared_secret ) decode(headers->'x-signature', 'base64') = hmac(request_body, my_webhook_shared_secret, 'sha256') ); CREATE SOURCE webhook_with_basic_auth IN CLUSTER my_cluster FROM WEBHOOK BODY FORMAT JSON CHECK ( WITH ( HEADERS, BODY AS request_body, SECRET BASIC_HOOK_AUTH ) headers->'authorization' = BASIC_HOOK_AUTH ); CREATE TYPE type_name AS ( field_name field_type , field_name field_type ); CREATE TYPE row_type AS (a int, b text); CREATE TYPE nested_row_type AS (a row_type, b float8); sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_source_statements.yml000066400000000000000000000407721451700765000301250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3422c43e1113bb4644db2e4623d2b31f6eaa9006485912b4f6b147adc11f820b file: - statement: create_source_kafka_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: avro_source - keyword: FROM - keyword: KAFKA - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'test_topic'" end_bracket: ) - keyword: FORMAT - word: AVRO - word: USING - word: CONFLUENT - word: SCHEMA - word: REGISTRY - word: CONNECTION - word: csr_connection - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: jsonified_kafka_source - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: data binary_operator: ->> quoted_literal: "'field1'" alias_expression: keyword: AS naked_identifier: field_1 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: data binary_operator: ->> quoted_literal: "'field2'" alias_expression: keyword: AS naked_identifier: field_2 - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: data binary_operator: ->> quoted_literal: "'field3'" alias_expression: keyword: AS naked_identifier: field_3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: function: function_name: function_name_identifier: CONVERT_FROM bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: data - comma: ',' - expression: quoted_literal: "'utf8'" - end_bracket: ) casting_operator: '::' data_type: keyword: jsonb alias_expression: keyword: AS naked_identifier: data from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: json_source end_bracket: ) - statement_terminator: ; - statement: create_source_kafka_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: proto_source - keyword: FROM - keyword: KAFKA - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'test_topic'" end_bracket: ) - keyword: FORMAT - word: PROTOBUF - word: USING - word: CONFLUENT - word: SCHEMA - word: REGISTRY - word: CONNECTION - word: csr_connection - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_kafka_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: text_source - keyword: FROM - keyword: KAFKA - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'test_topic'" end_bracket: ) - keyword: FORMAT - word: TEXT - word: ENVELOPE - word: UPSERT - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_kafka_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: csv_source - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_foo - comma: ',' - column_reference: naked_identifier: col_bar - comma: ',' - column_reference: naked_identifier: col_baz - end_bracket: ) - keyword: FROM - keyword: KAFKA - keyword: CONNECTION - object_reference: naked_identifier: kafka_connection - bracketed: start_bracket: ( word: TOPIC single_quote: "'test_topic'" end_bracket: ) - keyword: FORMAT - word: CSV - word: WITH - numeric_literal: '3' - word: COLUMNS - word: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: auction_house - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: AUCTION - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: marketing - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: MARKETING - bracketed: - start_bracket: ( - word: SCALE - word: FACTOR - numeric_literal: '1' - end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: marketing - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: MARKETING - keyword: FOR - keyword: ALL - keyword: TABLES - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: tpch - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: TPCH - bracketed: - start_bracket: ( - word: SCALE - word: FACTOR - numeric_literal: '1' - end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: counter - keyword: FROM - keyword: LOAD - keyword: GENERATOR - keyword: COUNTER - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: start_bracket: ( word: PUBLICATION single_quote: "'mz_source'" end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: start_bracket: ( word: PUBLICATION single_quote: "'mz_source'" end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: start_bracket: ( word: PUBLICATION single_quote: "'mz_source'" end_bracket: ) - keyword: FOR - keyword: TABLES - bracketed: - start_bracket: ( - word: table_1 - comma: ',' - word: table_2 - word: AS - word: alias_table_2 - end_bracket: ) - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: - start_bracket: ( - word: PUBLICATION - single_quote: "'mz_source'" - comma: ',' - word: TEXT - word: COLUMNS - bracketed: - start_bracket: ( - word: table - dot: . - word: column_of_unsupported_type - end_bracket: ) - end_bracket: ) - keyword: FOR - keyword: ALL - keyword: TABLES - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_postgres_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: mz_source - keyword: FROM - keyword: POSTGRES - keyword: CONNECTION - object_reference: naked_identifier: pg_connection - bracketed: start_bracket: ( word: PUBLICATION single_quote: "'mz_source'" end_bracket: ) - keyword: WITH - bracketed: start_bracket: ( word: SIZE equals: '=' single_quote: "'3xsmall'" end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: my_webhook_source - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: WEBHOOK - keyword: BODY - keyword: FORMAT - keyword: JSON - keyword: INCLUDE - keyword: HEADERS - bracketed: - start_bracket: ( - word: NOT - single_quote: "'authorization'" - comma: ',' - word: NOT - single_quote: "'x-api-key'" - end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: my_webhook_source - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: WEBHOOK - keyword: BODY - keyword: FORMAT - keyword: JSON - word: CHECK - bracketed: - start_bracket: ( - word: WITH - bracketed: - start_bracket: ( - word: HEADERS - comma: ',' - word: BODY - word: AS - word: request_body - comma: ',' - word: SECRET - word: my_webhook_shared_secret - end_bracket: ) - word: decode - bracketed: - start_bracket: ( - word: headers - json_operator: -> - single_quote: "'x-signature'" - comma: ',' - single_quote: "'base64'" - end_bracket: ) - equals: '=' - word: hmac - bracketed: - start_bracket: ( - word: request_body - comma: ',' - word: my_webhook_shared_secret - comma: ',' - single_quote: "'sha256'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_source_load_generator_statement: - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: webhook_with_basic_auth - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: my_cluster - keyword: FROM - keyword: WEBHOOK - keyword: BODY - keyword: FORMAT - keyword: JSON - word: CHECK - bracketed: - start_bracket: ( - word: WITH - bracketed: - start_bracket: ( - word: HEADERS - comma: ',' - word: BODY - word: AS - word: request_body - comma: ',' - word: SECRET - word: BASIC_HOOK_AUTH - end_bracket: ) - word: headers - json_operator: -> - single_quote: "'authorization'" - equals: '=' - word: BASIC_HOOK_AUTH - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: type_name - keyword: AS - bracketed: - start_bracket: ( - object_reference: naked_identifier: field_name - data_type: data_type_identifier: field_type - comma: ',' - object_reference: naked_identifier: field_name - data_type: data_type_identifier: field_type - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: row_type - keyword: AS - bracketed: - start_bracket: ( - object_reference: naked_identifier: a - data_type: keyword: int - comma: ',' - object_reference: naked_identifier: b - data_type: keyword: text - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: nested_row_type - keyword: AS - bracketed: - start_bracket: ( - object_reference: naked_identifier: a - data_type: data_type_identifier: row_type - comma: ',' - object_reference: naked_identifier: b - data_type: keyword: float8 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_views.sql000066400000000000000000000016021451700765000254760ustar00rootroot00000000000000 CREATE MATERIALIZED VIEW "test"."test" AS SELECT 1 AS "id"; CREATE VIEW "test"."test" AS SELECT 1 AS "id"; CREATE MATERIALIZED VIEW "test"."test" AS SELECT '{"a": 1}'::json AS "id"; CREATE MATERIALIZED VIEW active_customer_per_geo AS SELECT geo.name, count(*) FROM geo_regions AS geo JOIN active_customers ON active_customers.geo_id = geo.id GROUP BY geo.name; CREATE MATERIALIZED VIEW active_customers AS SELECT guid, geo_id, last_active_on FROM customer_source GROUP BY geo_id; CREATE VIEW purchase_sum_by_region AS SELECT sum(purchase.amount) AS region_sum, region.id AS region_id FROM region INNER JOIN user ON region.id = user.region_id INNER JOIN purchase ON purchase.user_id = user.id GROUP BY region.id; CREATE TEMP VIEW "test"."test" AS SELECT 1 AS "id"; CREATE TEMPORARY TABLE t (a int, b text NOT NULL); sqlfluff-2.3.5/test/fixtures/dialects/materialize/create_views.yml000066400000000000000000000152101451700765000255000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f2b3a5496bca43ee44807e85f3e26db32e94392d3c66f9a81351dd0176c6a9e file: - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: - quoted_identifier: '"test"' - dot: . - quoted_identifier: '"test"' - keyword: AS - word: SELECT - numeric_literal: '1' - word: AS - double_quote: '"id"' - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - quoted_identifier: '"test"' - dot: . - quoted_identifier: '"test"' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS quoted_identifier: '"id"' - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: - quoted_identifier: '"test"' - dot: . - quoted_identifier: '"test"' - keyword: AS - word: SELECT - single_quote: "'{\"a\": 1}'" - casting_operator: '::' - word: json - word: AS - double_quote: '"id"' - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: active_customer_per_geo - keyword: AS - word: SELECT - word: geo - dot: . - word: name - comma: ',' - word: count - bracketed: start_bracket: ( star: '*' end_bracket: ) - word: FROM - word: geo_regions - word: AS - word: geo - word: JOIN - word: active_customers - word: 'ON' - word: active_customers - dot: . - word: geo_id - equals: '=' - word: geo - dot: . - word: id - word: GROUP - word: BY - word: geo - dot: . - word: name - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: active_customers - keyword: AS - word: SELECT - word: guid - comma: ',' - word: geo_id - comma: ',' - word: last_active_on - word: FROM - word: customer_source - word: GROUP - word: BY - word: geo_id - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: purchase_sum_by_region - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: - naked_identifier: purchase - dot: . - naked_identifier: amount end_bracket: ) alias_expression: keyword: AS naked_identifier: region_sum - comma: ',' - select_clause_element: column_reference: - naked_identifier: region - dot: . - naked_identifier: id alias_expression: keyword: AS naked_identifier: region_id from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: region - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: user - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: region - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: user - dot: . - naked_identifier: region_id - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: purchase - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: purchase - dot: . - naked_identifier: user_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: user - dot: . - naked_identifier: id groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: region - dot: . - naked_identifier: id - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMP - keyword: VIEW - object_reference: - quoted_identifier: '"test"' - dot: . - quoted_identifier: '"test"' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS quoted_identifier: '"id"' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: text - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/drop_statements.sql000066400000000000000000000015111451700765000262300ustar00rootroot00000000000000DROP CONNECTION IF EXISTS name CASCADE; DROP CLUSTER IF EXISTS name CASCADE; DROP CLUSTER REPLICA IF EXISTS name CASCADE; DROP DATABASE IF EXISTS name CASCADE; DROP INDEX IF EXISTS name CASCADE; DROP MATERIALIZED VIEW IF EXISTS name CASCADE; DROP ROLE IF EXISTS name CASCADE; DROP SECRET IF EXISTS name CASCADE; DROP SCHEMA IF EXISTS name CASCADE; DROP SINK IF EXISTS name CASCADE; DROP SOURCE IF EXISTS name CASCADE; DROP TABLE IF EXISTS name CASCADE; DROP TYPE IF EXISTS name CASCADE; DROP VIEW IF EXISTS name CASCADE; DROP USER IF EXISTS name CASCADE; DROP CONNECTION name; DROP CLUSTER name; DROP CLUSTER REPLICA name; DROP DATABASE name; DROP INDEX name; DROP MATERIALIZED VIEW name; DROP ROLE name; DROP SECRET name; DROP SCHEMA name; DROP SINK name; DROP SOURCE name; DROP TABLE name; DROP TYPE name; DROP VIEW name; DROP USER name; sqlfluff-2.3.5/test/fixtures/dialects/materialize/drop_statements.yml000066400000000000000000000137721451700765000262460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 60d61fd583710b501d72748aa44e061fe48d833ac28dc786840388d1e463b0e4 file: - statement: drop_statement: - keyword: DROP - keyword: CONNECTION - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CLUSTER - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CLUSTER - keyword: REPLICA - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: ROLE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SECRET - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SINK - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SOURCE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - keyword: IF - keyword: EXISTS - data_type: data_type_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: USER - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: name - keyword: CASCADE - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CONNECTION - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CLUSTER - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: CLUSTER - keyword: REPLICA - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: INDEX - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SECRET - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SINK - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_statement: - keyword: DROP - keyword: SOURCE - object_reference: naked_identifier: name - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: name - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: name - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: name - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: naked_identifier: name - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/explain_statements.sql000066400000000000000000000003361451700765000267300ustar00rootroot00000000000000 EXPLAIN SELECT * FROM t1; EXPLAIN SELECT * FROM t1 WHERE f1 = 1; EXPLAIN PHYSICAL PLAN FOR VIEW v1; EXPLAIN VIEW v; EXPLAIN WITH(arity, join_impls) VIEW foo; EXPLAIN OPTIMIZED PLAN WITH(arity) AS TEXT FOR VIEW test1; sqlfluff-2.3.5/test/fixtures/dialects/materialize/explain_statements.yml000066400000000000000000000051771451700765000267420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b038606aef92dbf2699f3794a67c439727bbfc8cde01ece39a63d8c8f0d3b2df file: - statement: explain_statement: keyword: EXPLAIN select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: f1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: EXPLAIN - keyword: PHYSICAL - keyword: PLAN - keyword: FOR - keyword: VIEW - object_reference: naked_identifier: v1 - statement_terminator: ; - statement: explain_statement: - keyword: EXPLAIN - keyword: VIEW - object_reference: naked_identifier: v - statement_terminator: ; - statement: explain_statement: - keyword: EXPLAIN - keyword: WITH - bracketed: - start_bracket: ( - word: arity - comma: ',' - word: join_impls - end_bracket: ) - keyword: VIEW - object_reference: naked_identifier: foo - statement_terminator: ; - statement: explain_statement: - keyword: EXPLAIN - keyword: OPTIMIZED - keyword: PLAN - keyword: WITH - bracketed: start_bracket: ( word: arity end_bracket: ) - keyword: AS - keyword: TEXT - keyword: FOR - keyword: VIEW - object_reference: naked_identifier: test1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/grant_statements.sql000066400000000000000000000002311451700765000263750ustar00rootroot00000000000000GRANT SELECT ON mv TO joe, mike; GRANT USAGE, CREATE ON DATABASE materialize TO joe; GRANT ALL ON CLUSTER dev TO joe; GRANT CREATEDB ON SYSTEM TO joe;sqlfluff-2.3.5/test/fixtures/dialects/materialize/grant_statements.yml000066400000000000000000000030201451700765000263760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 994ca2114abc64576e549d934ff7e6e1ef58be7efffacde90e0f385c5b84b108 file: - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - object_reference: naked_identifier: mv - keyword: TO - role_reference: naked_identifier: joe - comma: ',' - role_reference: naked_identifier: mike - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - comma: ',' - keyword: CREATE - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: materialize - keyword: TO - role_reference: naked_identifier: joe - statement_terminator: ; - statement: grant_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: CLUSTER - object_reference: naked_identifier: dev - keyword: TO - object_reference: naked_identifier: joe - statement_terminator: ; - statement: grant_statement: - keyword: GRANT - keyword: CREATEDB - keyword: 'ON' - object_reference: naked_identifier: SYSTEM - keyword: TO - object_reference: naked_identifier: joe - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/insert_statements.sql000066400000000000000000000006401451700765000265720ustar00rootroot00000000000000INSERT INTO kv VALUES ('A'); INSERT INTO kv (v) VALUES ('a'); INSERT INTO kv (k) VALUES ('nil1'); INSERT INTO kv (k) VALUES ('nil2'); INSERT INTO kv VALUES ('nil3', NULL); INSERT INTO kv VALUES ('nil4', NULL); INSERT INTO kv (k,v) VALUES ('a', 'b'), ('c', 'd'); -- RETURNING INSERT INTO t (a) VALUES (10) RETURNING b; INSERT INTO t VALUES (7, 8) RETURNING (SELECT 1); INSERT INTO t VALUES (7, 8) RETURNING z; sqlfluff-2.3.5/test/fixtures/dialects/materialize/insert_statements.yml000066400000000000000000000125221451700765000265760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b70bc8e81398cb2679cd091578388c7e7f969820047dfb10a3f4b941fe43b014 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'A'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - bracketed: start_bracket: ( column_reference: naked_identifier: v end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'a'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - bracketed: start_bracket: ( column_reference: naked_identifier: k end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'nil1'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - bracketed: start_bracket: ( column_reference: naked_identifier: k end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'nil2'" end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'nil3'" - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'nil4'" - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: kv - bracketed: - start_bracket: ( - column_reference: naked_identifier: k - comma: ',' - column_reference: naked_identifier: v - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'c'" - comma: ',' - expression: quoted_literal: "'d'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: b - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '7' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - keyword: RETURNING - expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '7' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: z - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/show_statements.sql000066400000000000000000000020741451700765000262510ustar00rootroot00000000000000 SHOW COLUMNS; SHOW CONNECTIONS; SHOW CLUSTERS; SHOW CLUSTER REPLICAS; SHOW DATABASES; SHOW INDEXES; SHOW MATERIALIZED VIEWS; SHOW SECRETS; SHOW SCHEMAS; SHOW SINKS; SHOW SOURCES; SHOW TABLES; SHOW TYPES; SHOW VIEWS; SHOW OBJECTS; SHOW CREATE CONNECTION connection_name; SHOW CREATE INDEX index_name; SHOW CREATE MATERIALIZED VIEW view_name; SHOW CREATE SINK sink_name; SHOW CREATE SOURCE source_name; SHOW CREATE TABLE table_name; SHOW CREATE VIEW view_name; SHOW TABLES LIKE 'v%'; SHOW DATABASES WHERE database_name = 'database_name'; SHOW SECRETS WHERE name='secret_name'; SHOW SECRETS LIKE 'secret_name%'; SHOW COLUMNS FROM some_table WHERE name='column_name'; SHOW SECRETS FROM some_schema WHERE name='secret_name'; SHOW SECRETS FROM some_schema WHERE LIKE 'name%'; SHOW SCHEMAS FROM database_name; SHOW SINKS FROM some_schema; SHOW SOURCES FROM some_schema; SHOW TABLES FROM some_schema; SHOW TYPES FROM some_schema; SHOW VIEWS FROM some_schema; SHOW OBJECTS FROM some_schema; SHOW INDEXES IN CLUSTER bar WHERE name NOT LIKE 'mz_%'; SHOW MATERIALIZED VIEWS IN CLUSTER other; sqlfluff-2.3.5/test/fixtures/dialects/materialize/show_statements.yml000066400000000000000000000161541451700765000262570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a6516db64934fcd250390c63be7bb912cd6df80a7c1aff070ec66c432087cbad file: - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CONNECTIONS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CLUSTERS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CLUSTER - keyword: REPLICAS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: INDEXES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: MATERIALIZED - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SINKS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SOURCES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TYPES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: OBJECTS - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: CONNECTION - object_reference: naked_identifier: connection_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: INDEX - object_reference: naked_identifier: index_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: view_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: SINK - object_reference: naked_identifier: sink_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: SOURCE - object_reference: naked_identifier: source_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: show_create_statement: - keyword: SHOW - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: view_name - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: LIKE - quoted_literal: "'v%'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - keyword: WHERE - expression: column_reference: naked_identifier: database_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'database_name'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - keyword: WHERE - expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'secret_name'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - keyword: LIKE - quoted_literal: "'secret_name%'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: FROM - object_reference: naked_identifier: some_table - keyword: WHERE - expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'column_name'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - keyword: FROM - object_reference: naked_identifier: some_schema - keyword: WHERE - expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'secret_name'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SECRETS - keyword: FROM - object_reference: naked_identifier: some_schema - keyword: WHERE - expression: data_type: data_type_identifier: LIKE quoted_literal: "'name%'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - keyword: FROM - object_reference: naked_identifier: database_name - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SINKS - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SOURCES - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TYPES - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: OBJECTS - keyword: FROM - object_reference: naked_identifier: some_schema - statement_terminator: ; - statement: show_indexes_statement: - keyword: SHOW - keyword: INDEXES - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: bar - keyword: WHERE - expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - quoted_literal: "'mz_%'" - statement_terminator: ; - statement: show_materialized_views_statement: - keyword: SHOW - keyword: MATERIALIZED - keyword: VIEWS - keyword: IN - keyword: CLUSTER - object_reference: naked_identifier: other - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/materialize/subscribe_fetch_statements.sql000066400000000000000000000002311451700765000304140ustar00rootroot00000000000000 FETCH 100 c WITH (timeout='1s'); FETCH ALL c1; DECLARE c CURSOR FOR SUBSCRIBE fetch_during_ingest; DECLARE c CURSOR FOR SUBSCRIBE (SELECT * FROM t1); sqlfluff-2.3.5/test/fixtures/dialects/materialize/subscribe_fetch_statements.yml000066400000000000000000000026351451700765000304300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a53a4cfb918341fb42dc92aae4012c91742007bed90308f7aa3972c94e9583b6 file: - statement: fetch_statement: - keyword: FETCH - numeric_literal: '100' - object_reference: naked_identifier: c - keyword: WITH - bracketed: start_bracket: ( word: timeout equals: '=' single_quote: "'1s'" end_bracket: ) - statement_terminator: ; - statement: fetch_statement: - keyword: FETCH - keyword: ALL - object_reference: naked_identifier: c1 - statement_terminator: ; - statement: declare_statement: - keyword: DECLARE - object_reference: naked_identifier: c - keyword: CURSOR - keyword: FOR - word: SUBSCRIBE - word: fetch_during_ingest - statement_terminator: ; - statement: declare_statement: - keyword: DECLARE - object_reference: naked_identifier: c - keyword: CURSOR - keyword: FOR - word: SUBSCRIBE - bracketed: - start_bracket: ( - word: SELECT - star: '*' - word: FROM - word: t1 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/000077500000000000000000000000001451700765000211355ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/mysql/.sqlfluff000066400000000000000000000000331451700765000227540ustar00rootroot00000000000000[sqlfluff] dialect = mysql sqlfluff-2.3.5/test/fixtures/dialects/mysql/alter_database.sql000066400000000000000000000007561451700765000246210ustar00rootroot00000000000000ALTER DATABASE my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; ALTER DATABASE my_database DEFAULT CHARACTER SET = utf8mb4 COLLATE = utf8mb4_0900_ai_ci DEFAULT ENCRYPTION = 'N'; ALTER SCHEMA my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; ALTER DATABASE my_database READ ONLY DEFAULT; ALTER DATABASE my_database READ ONLY 0; ALTER DATABASE my_database READ ONLY 1; ALTER DATABASE READ ONLY DEFAULT; sqlfluff-2.3.5/test/fixtures/dialects/mysql/alter_database.yml000066400000000000000000000063431451700765000246210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5d0dbcc43a689f9226f66b080216e263fb9711d557ce16897b279cff342bf6d7 file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - comparison_operator: raw_comparison_operator: '=' - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE comparison_operator: raw_comparison_operator: '=' collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - alter_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - alter_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - keyword: DEFAULT - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - numeric_literal: '0' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: my_database - alter_option_segment: - keyword: READ - keyword: ONLY - numeric_literal: '1' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - alter_option_segment: - keyword: READ - keyword: ONLY - keyword: DEFAULT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/alter_table.sql000066400000000000000000000034031451700765000241340ustar00rootroot00000000000000ALTER TABLE `users` MODIFY COLUMN `name` varchar(255) NOT NULL, COMMENT "name of user"; ALTER TABLE `users` RENAME TO `user`; ALTER TABLE `user` RENAME AS `users`; ALTER TABLE `users` RENAME `user`; ALTER TABLE `users` RENAME COLUMN `col_1` TO `del_col_1`; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) NULL DEFAULT NULL; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) NOT NULL; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) FIRST; ALTER TABLE `users` CHANGE COLUMN `birthday` `date_of_birth` INT(11) AFTER `name`; ALTER TABLE `users` DROP COLUMN `age`; ALTER TABLE `foo`.`bar` ADD CONSTRAINT `index_name` UNIQUE(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD UNIQUE `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD CONSTRAINT `index_name` UNIQUE INDEX (`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD UNIQUE INDEX `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`); ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE = 8; ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE 8; ALTER TABLE `foo`.`bar` ADD INDEX `index_name`(`col_1`, `col_2`, `col_3`) KEY_BLOCK_SIZE 8 COMMENT 'index for col_1, col_2, col_3'; ALTER TABLE `foo`.`bar` DROP INDEX `index_name`; ALTER TABLE `foo`.`bar` RENAME INDEX `index_name` to `new_index_name`; ALTER TABLE `foo`.`bar` RENAME KEY `key_name` to `new_key_name`; ALTER TABLE `x` ADD CONSTRAINT FOREIGN KEY(`xk`) REFERENCES `y`(`yk`); ALTER TABLE `users` ADD COLUMN `active` tinyint(1) DEFAULT '0'; ALTER TABLE `users` ADD COLUMN IF NOT EXISTS `active` tinyint(1) DEFAULT '0'; sqlfluff-2.3.5/test/fixtures/dialects/mysql/alter_table.yml000066400000000000000000000331251451700765000241420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d0df57b5c85a69c3d6155fc53a5d0294c392601ad748cdcf5e78191a73ef7d9 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: MODIFY - keyword: COLUMN - column_definition: quoted_identifier: '`name`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - parameter: COMMENT - quoted_literal: '"name of user"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - keyword: TO - table_reference: quoted_identifier: '`user`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`user`' - keyword: RENAME - keyword: AS - table_reference: quoted_identifier: '`users`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - table_reference: quoted_identifier: '`user`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: RENAME - keyword: COLUMN - column_reference: quoted_identifier: '`col_1`' - keyword: TO - column_reference: quoted_identifier: '`del_col_1`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: - quoted_identifier: '`date_of_birth`' - data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT null_literal: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: CHANGE - keyword: COLUMN - column_reference: quoted_identifier: '`birthday`' - column_definition: quoted_identifier: '`date_of_birth`' data_type: data_type_identifier: INT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) - keyword: AFTER - column_reference: quoted_identifier: '`name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: DROP - keyword: COLUMN - column_reference: quoted_identifier: '`age`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '`index_name`' - keyword: UNIQUE - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: UNIQUE index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '`index_name`' - keyword: UNIQUE - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE comparison_operator: raw_comparison_operator: '=' numeric_literal: '8' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE numeric_literal: '8' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: ADD - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`index_name`' bracketed: - start_bracket: ( - column_reference: quoted_identifier: '`col_1`' - comma: ',' - column_reference: quoted_identifier: '`col_2`' - comma: ',' - column_reference: quoted_identifier: '`col_3`' - end_bracket: ) index_option: keyword: KEY_BLOCK_SIZE numeric_literal: '8' comment_clause: keyword: COMMENT quoted_literal: "'index for col_1, col_2, col_3'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: RENAME - keyword: INDEX - index_reference: quoted_identifier: '`index_name`' - keyword: to - index_reference: quoted_identifier: '`new_index_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: RENAME - keyword: KEY - index_reference: quoted_identifier: '`key_name`' - keyword: to - index_reference: quoted_identifier: '`new_key_name`' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`x`' - keyword: ADD - table_constraint: - keyword: CONSTRAINT - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`xk`' end_bracket: ) - keyword: REFERENCES - column_reference: quoted_identifier: '`y`' - bracketed: start_bracket: ( column_reference: quoted_identifier: '`yk`' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: ADD - keyword: COLUMN - column_definition: quoted_identifier: '`active`' data_type: data_type_identifier: tinyint bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'0'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: quoted_identifier: '`users`' - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_definition: quoted_identifier: '`active`' data_type: data_type_identifier: tinyint bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'0'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/alter_view.sql000066400000000000000000000004141451700765000240160ustar00rootroot00000000000000ALTER VIEW v2 AS SELECT c, d FROM v1; ALTER VIEW v2 AS (SELECT c, d FROM v1); ALTER VIEW v1 (c,d) AS SELECT a,max(b) FROM t1 GROUP BY a; ALTER VIEW v2 AS SELECT * FROM t2 WHERE s1 IN (SELECT s1 FROM t1) WITH CHECK OPTION; ALTER VIEW v2 AS SELECT 1 UNION SELECT 2; sqlfluff-2.3.5/test/fixtures/dialects/mysql/alter_view.yml000066400000000000000000000116511451700765000240250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 592fa1665920db815669f5cbafad024f74137b2b6513a2e77b79e155af09eb38 file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v1 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v1 end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: a - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: s1 keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: s1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_check_options: - keyword: WITH - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/analyze_table.sql000066400000000000000000000023311451700765000244670ustar00rootroot00000000000000ANALYZE TABLE some_table; ANALYZE TABLE some_table1, some_table2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; ANALYZE LOCAL TABLE some_table; ANALYZE LOCAL TABLE some_table1, some_table2; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE LOCAL TABLE some_table UPDATE HISTOGRAM ON some_col; ANALYZE LOCAL TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col WITH 10 BUCKETS; ANALYZE TABLE some_table UPDATE HISTOGRAM ON some_col1, some_col2 WITH 10 BUCKETS; ANALYZE TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE NO_WRITE_TO_BINLOG TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; ANALYZE LOCAL TABLE some_table DROP HISTOGRAM ON some_col; ANALYZE LOCAL TABLE some_table DROP HISTOGRAM ON some_col1, some_col2; sqlfluff-2.3.5/test/fixtures/dialects/mysql/analyze_table.yml000066400000000000000000000152711451700765000245000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: da31a67c607ae72d4f415cdc0446249963c84a488dfa30c0355b264bc81d221c file: - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - keyword: WITH - numeric_literal: '10' - keyword: BUCKETS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: UPDATE - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - keyword: WITH - numeric_literal: '10' - keyword: BUCKETS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: DROP - keyword: HISTOGRAM - keyword: 'ON' - column_reference: naked_identifier: some_col1 - comma: ',' - column_reference: naked_identifier: some_col2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/begin.sql000066400000000000000000000000421451700765000227360ustar00rootroot00000000000000blocks:BEGIN select 1; END blocks~sqlfluff-2.3.5/test/fixtures/dialects/mysql/begin.yml000066400000000000000000000014551451700765000227510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c17580c773c4cd4a54b495af7c9b0c627eeecc3755e81f3e64bcf2954d9fc267 file: - statement: transaction_statement: naked_identifier: blocks colon: ':' keyword: BEGIN statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: END naked_identifier: blocks - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/bit_value_literal.sql000066400000000000000000000000511451700765000253400ustar00rootroot00000000000000SELECT b'01'; SELECT B'01'; SELECT 0b01; sqlfluff-2.3.5/test/fixtures/dialects/mysql/bit_value_literal.yml000066400000000000000000000016151451700765000253510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 40e24928b664123e80cbcf5989b8fd4cb1d2301a677f09d21dbe7694667876eb file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "b'01'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "B'01'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0b01' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement.sql000066400000000000000000000000271451700765000246540ustar00rootroot00000000000000CALL somefunction('a');sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement.yml000066400000000000000000000011331451700765000246550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 90c6e7a8d3e7df9cc440d3a47ba3420af408d660fad2e96c0d473ac4c284bfec file: statement: call_segment: keyword: CALL naked_identifier: somefunction bracketed: start_bracket: ( quoted_literal: "'a'" end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement_function_param.sql000066400000000000000000000000451451700765000277410ustar00rootroot00000000000000CALL somefunction(test('something'));sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement_function_param.yml000066400000000000000000000014441451700765000277470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ce9527be1de4dc1ff68dcf5861b117c69aded6989df3417ddae83c6b05070aab file: statement: call_segment: keyword: CALL naked_identifier: somefunction bracketed: start_bracket: ( function: function_name: function_name_identifier: test bracketed: start_bracket: ( expression: quoted_literal: "'something'" end_bracket: ) end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement_multiple_param.sql000066400000000000000000000001011451700765000277400ustar00rootroot00000000000000CALL somefunction('test', @test1, test2, test3('test'), "test4");sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement_multiple_param.yml000066400000000000000000000017421451700765000277560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5a778027b0f59465226efdfe0c2630023d79b2de2005eb01e3d8a4c274aad8ff file: statement: call_segment: keyword: CALL naked_identifier: somefunction bracketed: - start_bracket: ( - quoted_literal: "'test'" - comma: ',' - variable: '@test1' - comma: ',' - variable: test2 - comma: ',' - function: function_name: function_name_identifier: test3 bracketed: start_bracket: ( expression: quoted_literal: "'test'" end_bracket: ) - comma: ',' - quoted_literal: '"test4"' - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement_no_param.sql000066400000000000000000000000241451700765000265250ustar00rootroot00000000000000CALL somefunction();sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement_no_param.yml000066400000000000000000000010751451700765000265360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 61c00e19d51530dfbb152d6d47c8f86ee79a82aea89a387391abe0ba4ed18b84 file: statement: call_segment: keyword: CALL naked_identifier: somefunction bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement_quoted.sql000066400000000000000000000000311451700765000262300ustar00rootroot00000000000000CALL `somefunction`('a');sqlfluff-2.3.5/test/fixtures/dialects/mysql/call_statement_quoted.yml000066400000000000000000000011401451700765000262340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff044a0e6a64e6b52701b1c0dc3f42436d42357c1ebde6133234212aeb5fdd22 file: statement: call_segment: keyword: CALL quoted_identifier: '`somefunction`' bracketed: start_bracket: ( quoted_literal: "'a'" end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/check_constraint.sql000066400000000000000000000004531451700765000252010ustar00rootroot00000000000000CREATE TABLE t1 ( CHECK (c1 <> c2), c1 INT CHECK (c1 > 10), c2 INT CONSTRAINT c2_positive CHECK (c2 > 0), c3 INT CHECK (c3 < 100), CONSTRAINT c1_nonzero CHECK (c1 <> 0), CHECK (c1 > c3) ); ALTER TABLE t1 ALTER CHECK c2_positive NOT ENFORCED; ALTER TABLE t1 DROP CONSTRAINT c1_nonzero; sqlfluff-2.3.5/test/fixtures/dialects/mysql/check_constraint.yml000066400000000000000000000100161451700765000251770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 819548e097fc750b37e657682d090047d51f01a458161bb97c18b26796a51181 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: c1 - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - column_reference: naked_identifier: c2 end_bracket: ) - comma: ',' - column_definition: naked_identifier: c1 data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: INT column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: c2_positive - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: c2 comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - column_definition: naked_identifier: c3 data_type: data_type_identifier: INT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: c3 comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: c1_nonzero - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - comma: ',' - table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: - column_reference: naked_identifier: c1 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: c3 end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: ALTER - keyword: CHECK - object_reference: naked_identifier: c2_positive - keyword: NOT - keyword: ENFORCED - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: c1_nonzero - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/check_table.sql000066400000000000000000000010011451700765000240720ustar00rootroot00000000000000CHECK TABLE some_table FOR UPGRADE; CHECK TABLE some_table1, some_table2 FOR UPGRADE; CHECK TABLE some_table QUICK; CHECK TABLE some_table1, some_table2 QUICK; CHECK TABLE some_table FAST; CHECK TABLE some_table1, some_table2 FAST; CHECK TABLE some_table MEDIUM; CHECK TABLE some_table1, some_table2 MEDIUM; CHECK TABLE some_table EXTENDED; CHECK TABLE some_table1, some_table2 EXTENDED; CHECK TABLE some_table CHANGED; CHECK TABLE some_table1, some_table2 CHANGED; CHECK TABLE some_table FAST QUICK; sqlfluff-2.3.5/test/fixtures/dialects/mysql/check_table.yml000066400000000000000000000065051451700765000241120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5db8cdf9f2d5bbf23fbb9a36401f9236af4d0b54707dcbbfaa0a41d12905720b file: - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FOR - keyword: UPGRADE - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: FOR - keyword: UPGRADE - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: QUICK - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FAST - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: FAST - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: MEDIUM - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: MEDIUM - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: EXTENDED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: CHANGED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: CHANGED - statement_terminator: ; - statement: check_table_statement: - keyword: CHECK - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: FAST - keyword: QUICK - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/checksum_table.sql000066400000000000000000000002511451700765000246250ustar00rootroot00000000000000CHECKSUM TABLE some_table QUICK; CHECKSUM TABLE some_table1, some_table2 QUICK; CHECKSUM TABLE some_table EXTENDED; CHECKSUM TABLE some_table1, some_table2 EXTENDED; sqlfluff-2.3.5/test/fixtures/dialects/mysql/checksum_table.yml000066400000000000000000000024721451700765000246360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 59a13f3bdf5aff9ad8d165d083d825c9d4897885a57ea598ca91a825143195fe file: - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: QUICK - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: checksum_table_statement: - keyword: CHECKSUM - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - keyword: EXTENDED - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/close.sql000066400000000000000000000000201451700765000227530ustar00rootroot00000000000000CLOSE curcursor;sqlfluff-2.3.5/test/fixtures/dialects/mysql/close.yml000066400000000000000000000010071451700765000227630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be7b55beb5bbee604a34bad94304e061d9d9078a0755be0f543c4104ba563e86 file: statement: cursor_open_close_segment: keyword: CLOSE naked_identifier: curcursor statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/close_qualified.sql000066400000000000000000000000221451700765000250000ustar00rootroot00000000000000CLOSE `curcursor`;sqlfluff-2.3.5/test/fixtures/dialects/mysql/close_qualified.yml000066400000000000000000000010141451700765000250040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ff3a6055061615df1ede7802e3b9da4ce61b68f2c3adb5ead2883c43efe7e5d file: statement: cursor_open_close_segment: keyword: CLOSE quoted_identifier: '`curcursor`' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/column_alias.sql000066400000000000000000000000731451700765000243240ustar00rootroot00000000000000SELECT 1 AS `one`; SELECT 2 AS 'two'; SELECT 3 AS "three"; sqlfluff-2.3.5/test/fixtures/dialects/mysql/column_alias.yml000066400000000000000000000022171451700765000243300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c9775a3b31f10dbd2195648c11645df65acb8dd428e41c317fbbd0c2941c615e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS quoted_identifier: '`one`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' alias_expression: keyword: AS quoted_literal: "'two'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '3' alias_expression: keyword: AS quoted_literal: '"three"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_database.sql000066400000000000000000000006271451700765000247520ustar00rootroot00000000000000CREATE DATABASE my_database; CREATE DATABASE IF NOT EXISTS my_database; CREATE DATABASE my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; CREATE DATABASE my_database DEFAULT CHARACTER SET = utf8mb4 COLLATE = utf8mb4_0900_ai_ci DEFAULT ENCRYPTION = 'N'; CREATE SCHEMA my_database DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT ENCRYPTION 'N'; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_database.yml000066400000000000000000000052011451700765000247450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d4ddb2e6e7c37d2270c3b4da1083779a7f7c8217fadb6566c35021d6eb462f2c file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - comparison_operator: raw_comparison_operator: '=' - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE comparison_operator: raw_comparison_operator: '=' collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'N'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - database_reference: naked_identifier: my_database - create_option_segment: - keyword: DEFAULT - keyword: CHARACTER - keyword: SET - naked_identifier: utf8mb4 - create_option_segment: keyword: COLLATE collation_reference: naked_identifier: utf8mb4_0900_ai_ci - create_option_segment: - keyword: DEFAULT - keyword: ENCRYPTION - quoted_literal: "'N'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_index.sql000066400000000000000000000011361451700765000243110ustar00rootroot00000000000000CREATE INDEX idx ON tbl (col); CREATE UNIQUE INDEX idx ON tbl (col); CREATE FULLTEXT INDEX idx ON tbl (col); CREATE SPATIAL INDEX idx ON tbl (col); CREATE INDEX idx USING BTREE ON tbl (col); CREATE INDEX idx USING HASH ON tbl (col); CREATE INDEX idx ON tbl (col ASC); CREATE INDEX idx ON tbl (col DESC); CREATE INDEX part_of_name ON customer (name(10)); CREATE INDEX idx ON tbl (col) ALGORITHM DEFAULT; CREATE INDEX idx ON tbl (col) ALGORITHM NOCOPY; CREATE INDEX idx ON tbl (col) ALGORITHM INSTANT; CREATE INDEX idx ON tbl (col) LOCK DEFAULT; CREATE INDEX idx ON tbl ((col1 + col2), (col1 - col2), col1); sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_index.yml000066400000000000000000000145701451700765000243210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ba008b762b05319be63ea1b01262c62e054e8ba120801da8589dc396cd1fdd3 file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: SPATIAL - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - index_type: - keyword: USING - keyword: BTREE - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - index_type: - keyword: USING - keyword: HASH - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col keyword: ASC end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col keyword: DESC end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: part_of_name - keyword: 'ON' - table_reference: naked_identifier: customer - bracketed: start_bracket: ( column_reference: naked_identifier: name bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: DEFAULT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: NOCOPY - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: ALGORITHM - keyword: INSTANT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: LOCK - keyword: DEFAULT - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: idx - keyword: 'ON' - table_reference: naked_identifier: tbl - bracketed: - start_bracket: ( - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col1 - binary_operator: + - column_reference: naked_identifier: col2 end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col1 - binary_operator: '-' - column_reference: naked_identifier: col2 end_bracket: ) - comma: ',' - column_reference: naked_identifier: col1 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table.sql000066400000000000000000000007101451700765000242660ustar00rootroot00000000000000CREATE TABLE `foo` ( b VARCHAR(255) BINARY, `id` int(11) unsigned NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; create table `tickets` ( `id` serial primary key, `material_number` varchar(255) default null, `material_name` varchar(255) default null, `date_created` date not null default (current_date), `date_closed` date default null ); create table _ (a int); sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table.yml000066400000000000000000000104061451700765000242730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f28a08cecf7c9aa1e1307f542e733f7375214f368705d08276c01e474442702 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) keyword: BINARY - comma: ',' - column_definition: - quoted_identifier: '`id`' - data_type: data_type_identifier: int bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '11' end_bracket: ) keyword: unsigned - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: AUTO_INCREMENT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '`id`' end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB - keyword: DEFAULT - parameter: CHARSET - comparison_operator: raw_comparison_operator: '=' - parameter: utf8mb4 - parameter: COLLATE - comparison_operator: raw_comparison_operator: '=' - parameter: utf8mb4_unicode_ci - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: quoted_identifier: '`tickets`' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '`id`' data_type: data_type_identifier: serial column_constraint_segment: - keyword: primary - keyword: key - comma: ',' - column_definition: quoted_identifier: '`material_number`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: keyword: default null_literal: 'null' - comma: ',' - column_definition: quoted_identifier: '`material_name`' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) column_constraint_segment: keyword: default null_literal: 'null' - comma: ',' - column_definition: - quoted_identifier: '`date_created`' - data_type: data_type_identifier: date - column_constraint_segment: - keyword: not - keyword: 'null' - column_constraint_segment: keyword: default bracketed: start_bracket: ( bare_function: current_date end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '`date_closed`' data_type: data_type_identifier: date column_constraint_segment: keyword: default null_literal: 'null' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: _ - bracketed: start_bracket: ( column_definition: naked_identifier: a data_type: data_type_identifier: int end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_column_charset.sql000066400000000000000000000001441451700765000273550ustar00rootroot00000000000000CREATE TABLE t1 ( col1 VARCHAR(5) CHARACTER SET latin1 COLLATE latin1_german1_ci ); sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_column_charset.yml000066400000000000000000000022361451700765000273630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f2c0b6a7781e5ac8b770f06d49f23f0556e6c70e8d26d5485bbeb6bf0f2f041c file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: - naked_identifier: col1 - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: CHARACTER - keyword: SET - naked_identifier: latin1 - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_german1_ci end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_constraint_unique.sql000066400000000000000000000002501451700765000301170ustar00rootroot00000000000000CREATE TABLE a( a INT NOT NULL, UNIQUE (a), UNIQUE idx_c(a), UNIQUE KEY (a), UNIQUE KEY idx_a(a), UNIQUE INDEX (a), UNIQUE INDEX idx_b(a) ) sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_constraint_unique.yml000066400000000000000000000044651451700765000301350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dc9e42ca7dac29e96e7c93596f533e5ccef4a0db1fe35bb1f46c8aaff86b57f5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE index_reference: naked_identifier: idx_c bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: KEY - index_reference: naked_identifier: idx_a - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: INDEX - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - comma: ',' - table_constraint: - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: idx_b - bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_datetime.sql000066400000000000000000000022771451700765000261540ustar00rootroot00000000000000CREATE TABLE `foo` ( created_date DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL, ts1 TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, dt1 DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, ts2 TIMESTAMP DEFAULT CURRENT_TIMESTAMP, dt2 DATETIME DEFAULT CURRENT_TIMESTAMP, ts3 TIMESTAMP DEFAULT 0, dt3 DATETIME DEFAULT 0, ts4 TIMESTAMP DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, dt4 DATETIME DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, ts5 TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, -- default 0 ts6 TIMESTAMP NULL ON UPDATE CURRENT_TIMESTAMP, -- default NULL dt5 DATETIME ON UPDATE CURRENT_TIMESTAMP, -- default NULL dt6 DATETIME NOT NULL ON UPDATE CURRENT_TIMESTAMP, -- default 0 ts7 TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), ts8 TIMESTAMP NULL DEFAULT NULL, ts9 TIMESTAMP NULL DEFAULT 0, ts10 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP, ts11 TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP(), ts12 TIMESTAMP NULL DEFAULT '0000-00-00 00:00:00', ts13 TIMESTAMP NULL DEFAULT NOW ON UPDATE NOW, ts14 TIMESTAMP NULL DEFAULT NOW() ON UPDATE NOW(), ts15 TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP ) sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_datetime.yml000066400000000000000000000136441451700765000261560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 37b2a74b15a9dd3b024b90fe985bd48e80b07477eacd4e2b224f992196ac4ac5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: quoted_identifier: '`foo`' - bracketed: - start_bracket: ( - column_definition: - naked_identifier: created_date - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ts1 - keyword: TIMESTAMP - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt1 - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts2 - keyword: TIMESTAMP - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt2 - keyword: DATETIME - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts3 - keyword: TIMESTAMP - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: dt3 - keyword: DATETIME - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: ts4 - keyword: TIMESTAMP - keyword: DEFAULT - numeric_literal: '0' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt4 - keyword: DATETIME - keyword: DEFAULT - numeric_literal: '0' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts5 - keyword: TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts6 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt5 - keyword: DATETIME - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: dt6 - keyword: DATETIME - keyword: NOT - keyword: 'NULL' - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts7 - keyword: TIMESTAMP - bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts8 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ts9 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - numeric_literal: '0' - comma: ',' - column_definition: - naked_identifier: ts10 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - comma: ',' - column_definition: - naked_identifier: ts11 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts12 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - quoted_literal: "'0000-00-00 00:00:00'" - comma: ',' - column_definition: - naked_identifier: ts13 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: NOW - keyword: 'ON' - keyword: UPDATE - keyword: NOW - comma: ',' - column_definition: - naked_identifier: ts14 - keyword: TIMESTAMP - keyword: 'NULL' - keyword: DEFAULT - keyword: NOW - bracketed: start_bracket: ( end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: NOW - bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: - naked_identifier: ts15 - keyword: TIMESTAMP - keyword: NOT - keyword: 'NULL' - keyword: DEFAULT - keyword: CURRENT_TIMESTAMP - keyword: 'ON' - keyword: UPDATE - keyword: CURRENT_TIMESTAMP - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_equals_optional.sql000066400000000000000000000000671451700765000275520ustar00rootroot00000000000000CREATE TABLE a ( id INT ) COLLATE utf8_general_ci; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_equals_optional.yml000066400000000000000000000014331451700765000275520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 58a605d729c2c4dda00b1d884f06b88b929780984ff660f716d2173b480119dc file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT end_bracket: ) - parameter: COLLATE - parameter: utf8_general_ci statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_index.sql000066400000000000000000000005171451700765000254620ustar00rootroot00000000000000CREATE TABLE foo ( id INT UNSIGNED AUTO_INCREMENT NOT NULL, a TEXT(500), b INT, c INT, PRIMARY KEY (id) COMMENT 'primary key (id)', FULLTEXT `idx_a` (a) COMMENT 'index (a)', INDEX `idx_prefix_a` (a(20)), INDEX `idx_b` (b) COMMENT 'index (b)', INDEX `idx_desc_b` (b DESC), INDEX `idx_asc_c` (c ASC) ) ENGINE=InnoDB; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_index.yml000066400000000000000000000073541451700765000254720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 239a8345bf5a129ab923e9daa7100b71b1ce4d9f17d737d949fcde022171e019 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: - naked_identifier: id - data_type: data_type_identifier: INT keyword: UNSIGNED - column_constraint_segment: keyword: AUTO_INCREMENT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: a data_type: data_type_identifier: TEXT bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '500' end_bracket: ) - comma: ',' - column_definition: naked_identifier: b data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: c data_type: data_type_identifier: INT - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - index_option: comment_clause: keyword: COMMENT quoted_literal: "'primary key (id)'" - comma: ',' - table_constraint: keyword: FULLTEXT index_reference: quoted_identifier: '`idx_a`' bracketed: start_bracket: ( column_reference: naked_identifier: a end_bracket: ) index_option: comment_clause: keyword: COMMENT quoted_literal: "'index (a)'" - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_prefix_a`' bracketed: start_bracket: ( column_reference: naked_identifier: a bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) end_bracket: ) - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_b`' bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) index_option: comment_clause: keyword: COMMENT quoted_literal: "'index (b)'" - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_desc_b`' bracketed: start_bracket: ( column_reference: naked_identifier: b keyword: DESC end_bracket: ) - comma: ',' - table_constraint: keyword: INDEX index_reference: quoted_identifier: '`idx_asc_c`' bracketed: start_bracket: ( column_reference: naked_identifier: c keyword: ASC end_bracket: ) - end_bracket: ) - parameter: ENGINE - comparison_operator: raw_comparison_operator: '=' - parameter: InnoDB statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_primary_foreign_keys.sql000066400000000000000000000021461451700765000306020ustar00rootroot00000000000000CREATE TABLE parent ( id INT NOT NULL, PRIMARY KEY (id) ); CREATE TABLE child ( id INT, parent_id INT, INDEX par_ind (parent_id), FOREIGN KEY (parent_id) REFERENCES parent(id) ON DELETE CASCADE ); CREATE TABLE product ( category INT NOT NULL, id INT NOT NULL, price DECIMAL, PRIMARY KEY(category, id) ); CREATE TABLE customer ( id INT NOT NULL, PRIMARY KEY (id) ); CREATE TABLE product_order ( product_category INT NOT NULL, product_id INT NOT NULL, customer_id INT NOT NULL, PRIMARY KEY(no), -- INDEX (product_category, product_id), -- INDEX (customer_id), FOREIGN KEY (product_category, product_id) REFERENCES product(category, id) ON UPDATE CASCADE ON DELETE RESTRICT, FOREIGN KEY (customer_id) REFERENCES customer(id) ); CREATE TABLE source_tag_assoc ( source_id INT UNSIGNED NOT NULL, tag_id INT UNSIGNED NOT NULL, PRIMARY KEY (source_id, tag_id), FOREIGN KEY (source_id) REFERENCES source (id) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES source_tag (id) ON DELETE CASCADE ); sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_primary_foreign_keys.yml000066400000000000000000000213131451700765000306010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb91070986ad8b8e82dd3bd9b793200a0ed9398192aa045f0361fdcae06af34b file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: parent - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: child - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: INT - comma: ',' - table_constraint: keyword: INDEX index_reference: naked_identifier: par_ind bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: parent - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_definition: naked_identifier: category data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: price data_type: data_type_identifier: DECIMAL - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: customer - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: product_order - bracketed: - start_bracket: ( - column_definition: naked_identifier: product_category data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: product_id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: customer_id data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: 'no' end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: product_category - comma: ',' - column_reference: naked_identifier: product_id - end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: product - bracketed: - start_bracket: ( - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: customer_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: customer - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: source_tag_assoc - bracketed: - start_bracket: ( - column_definition: naked_identifier: source_id data_type: data_type_identifier: INT keyword: UNSIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: tag_id data_type: data_type_identifier: INT keyword: UNSIGNED column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: source_id - comma: ',' - column_reference: naked_identifier: tag_id - end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: source_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: source - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: tag_id end_bracket: ) - keyword: REFERENCES - column_reference: naked_identifier: source_tag - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_unique_key.sql000066400000000000000000000000501451700765000265210ustar00rootroot00000000000000create table a( b int unique key ); sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_table_unique_key.yml000066400000000000000000000014721451700765000265340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: af44e65430301a281d5baa8730323d7ad9c796dcb0140ca1771982c21ee78e69 file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: a - bracketed: start_bracket: ( column_definition: naked_identifier: b data_type: data_type_identifier: int column_constraint_segment: - keyword: unique - keyword: key end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_trigger.sql000066400000000000000000000030221451700765000246410ustar00rootroot00000000000000CREATE TRIGGER delete_members_after_transactions AFTER DELETE ON transactions FOR EACH ROW DELETE FROM members WHERE username NOT IN (SELECT UNIQUE(username) FROM transactions); CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW BEGIN DELETE FROM some_table; INSERT INTO some_table; END; CREATE TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER UPDATE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE UPDATE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER INSERT ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger BEFORE INSERT ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER IF NOT EXISTS some_trigger AFTER DELETE ON some_table FOR EACH ROW DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW FOLLOWS some_other_trigger DELETE FROM other_table; CREATE TRIGGER some_trigger AFTER DELETE ON some_table FOR EACH ROW PRECEDES some_other_trigger DELETE FROM other_table; CREATE DEFINER=`root`@`127.0.0.1` TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; CREATE DEFINER=CURRENT_USER TRIGGER ins_sum BEFORE INSERT ON account FOR EACH ROW SET @sum = @sum + NEW.amount; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_trigger.yml000066400000000000000000000272431451700765000246560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ac600fa9db5bca6f4e64fd5a6141a57ea55630707bd387061e56299e0a5430d file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: delete_members_after_transactions - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: transactions - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: members where_clause: keyword: WHERE expression: - column_reference: naked_identifier: username - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: UNIQUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: username end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: transactions end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: transaction_statement: keyword: BEGIN statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: - column_reference: variable: '@sum' - binary_operator: + - column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - keyword: IF - keyword: NOT - keyword: EXISTS - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - keyword: FOLLOWS - naked_identifier: some_other_trigger - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: some_trigger - keyword: AFTER - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: some_table - keyword: FOR - keyword: EACH - keyword: ROW - keyword: PRECEDES - naked_identifier: some_other_trigger - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`root`' - at_sign_literal: '@' - quoted_identifier: '`127.0.0.1`' - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: - column_reference: variable: '@sum' - binary_operator: + - column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: keyword: CURRENT_USER - keyword: TRIGGER - trigger_reference: naked_identifier: ins_sum - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: account - keyword: FOR - keyword: EACH - keyword: ROW - statement: set_statement: keyword: SET variable: '@sum' comparison_operator: raw_comparison_operator: '=' expression: - column_reference: variable: '@sum' - binary_operator: + - column_reference: - naked_identifier: NEW - dot: . - naked_identifier: amount - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_user.sql000066400000000000000000000065431451700765000241670ustar00rootroot00000000000000CREATE USER jeffrey; CREATE USER IF NOT EXISTS jeffrey; CREATE USER 'prj_svc' IDENTIFIED WITH AWSAuthenticationPlugin AS 'RDS'; CREATE USER 'jeffrey'@'localhost' IDENTIFIED BY 'password'; CREATE USER "jeffrey"@"localhost" IDENTIFIED BY "password"; CREATE USER `jeffrey`@`localhost` IDENTIFIED BY "password"; CREATE USER 'jeffrey'@'localhost' IDENTIFIED BY 'new_password' PASSWORD EXPIRE; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'new_password' PASSWORD EXPIRE INTERVAL 180 DAY FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 2; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH mysql_native_password BY 'new_password1', 'jeanne'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'new_password2' REQUIRE X509 WITH MAX_QUERIES_PER_HOUR 60 PASSWORD HISTORY 5 ACCOUNT LOCK; CREATE USER 'jeffrey'@'localhost' IDENTIFIED WITH mysql_native_password BY 'password'; CREATE USER 'u1'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'sha2_password' AND IDENTIFIED WITH authentication_ldap_sasl AS 'uid=u1_ldap,ou=People,dc=example,dc=com'; CREATE USER 'u1'@'localhost' IDENTIFIED WITH caching_sha2_password BY 'sha2_password' AND IDENTIFIED WITH authentication_ldap_sasl AS 'uid=u1_ldap,ou=People,dc=example,dc=com' AND IDENTIFIED WITH authentication_fido; CREATE USER user IDENTIFIED WITH authentication_fido INITIAL AUTHENTICATION IDENTIFIED BY RANDOM PASSWORD; CREATE USER 'joe'@'10.0.0.1' DEFAULT ROLE administrator, developer; CREATE USER 'jeffrey'@'localhost' REQUIRE NONE; CREATE USER 'jeffrey'@'localhost' REQUIRE SSL; CREATE USER 'jeffrey'@'localhost' REQUIRE X509; CREATE USER 'jeffrey'@'localhost' REQUIRE ISSUER '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL/CN=CA/emailAddress=ca@example.com'; CREATE USER 'jeffrey'@'localhost' REQUIRE SUBJECT '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL demo client certificate/ CN=client/emailAddress=client@example.com'; CREATE USER 'jeffrey'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'; CREATE USER 'jeffrey'@'localhost' REQUIRE SUBJECT '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL demo client certificate/ CN=client/emailAddress=client@example.com' AND ISSUER '/C=SE/ST=Stockholm/L=Stockholm/ O=MySQL/CN=CA/emailAddress=ca@example.com' AND CIPHER 'EDH-RSA-DES-CBC3-SHA'; CREATE USER 'jeffrey'@'localhost' WITH MAX_QUERIES_PER_HOUR 500 MAX_UPDATES_PER_HOUR 100; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE NEVER; CREATE USER 'jeffrey'@'localhost' PASSWORD EXPIRE INTERVAL 180 DAY; CREATE USER 'jeffrey'@'localhost' PASSWORD HISTORY DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD HISTORY 6; CREATE USER 'jeffrey'@'localhost' PASSWORD REUSE INTERVAL DEFAULT; CREATE USER 'jeffrey'@'localhost' PASSWORD REUSE INTERVAL 360 DAY; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT OPTIONAL; CREATE USER 'jeffrey'@'localhost' PASSWORD REQUIRE CURRENT DEFAULT; CREATE USER 'jeffrey'@'localhost' FAILED_LOGIN_ATTEMPTS 4 PASSWORD_LOCK_TIME 2; CREATE USER 'jon'@'localhost' COMMENT 'Some information about Jon'; CREATE USER 'jim'@'localhost' ATTRIBUTE '{"fname": "James", "lname": "Scott", "phone": "123-456-7890"}'; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_user.yml000066400000000000000000000340421451700765000241640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 05f46a3017e1faedf744739775dca1f558debcd801def9827abd8848dca5185e file: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: jeffrey - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - keyword: IF - keyword: NOT - keyword: EXISTS - role_reference: naked_identifier: jeffrey - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: quoted_identifier: "'prj_svc'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: AWSAuthenticationPlugin - keyword: AS - quoted_literal: "'RDS'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'password'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_literal: '"jeffrey"' - at_sign_literal: '@' - quoted_literal: '"localhost"' - keyword: IDENTIFIED - keyword: BY - quoted_literal: '"password"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: '`jeffrey`' - at_sign_literal: '@' - quoted_identifier: '`localhost`' - keyword: IDENTIFIED - keyword: BY - quoted_literal: '"password"' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: BY - quoted_literal: "'new_password'" - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'new_password'" - keyword: PASSWORD - keyword: EXPIRE - keyword: INTERVAL - numeric_literal: '180' - keyword: DAY - keyword: FAILED_LOGIN_ATTEMPTS - numeric_literal: '3' - keyword: PASSWORD_LOCK_TIME - numeric_literal: '2' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: mysql_native_password - keyword: BY - quoted_literal: "'new_password1'" - comma: ',' - role_reference: - quoted_identifier: "'jeanne'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'new_password2'" - keyword: REQUIRE - keyword: X509 - keyword: WITH - keyword: MAX_QUERIES_PER_HOUR - numeric_literal: '60' - keyword: PASSWORD - keyword: HISTORY - numeric_literal: '5' - keyword: ACCOUNT - keyword: LOCK - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: mysql_native_password - keyword: BY - quoted_literal: "'password'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'u1'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'sha2_password'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_ldap_sasl - keyword: AS - quoted_literal: "'uid=u1_ldap,ou=People,dc=example,dc=com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'u1'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: caching_sha2_password - keyword: BY - quoted_literal: "'sha2_password'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_ldap_sasl - keyword: AS - quoted_literal: "'uid=u1_ldap,ou=People,dc=example,dc=com'" - keyword: AND - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_fido - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user - keyword: IDENTIFIED - keyword: WITH - object_reference: naked_identifier: authentication_fido - keyword: INITIAL - keyword: AUTHENTICATION - keyword: IDENTIFIED - keyword: BY - keyword: RANDOM - keyword: PASSWORD - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'joe'" - at_sign_literal: '@' - quoted_identifier: "'10.0.0.1'" - keyword: DEFAULT - keyword: ROLE - role_reference: naked_identifier: administrator - comma: ',' - role_reference: naked_identifier: developer - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: NONE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SSL - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: X509 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: ISSUER - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL/CN=CA/emailAddress=ca@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SUBJECT - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL demo client certificate/\n\ \ CN=client/emailAddress=client@example.com'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: CIPHER - quoted_literal: "'EDH-RSA-DES-CBC3-SHA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: REQUIRE - keyword: SUBJECT - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL demo client certificate/\n\ \ CN=client/emailAddress=client@example.com'" - keyword: AND - keyword: ISSUER - quoted_literal: "'/C=SE/ST=Stockholm/L=Stockholm/\n O=MySQL/CN=CA/emailAddress=ca@example.com'" - keyword: AND - keyword: CIPHER - quoted_literal: "'EDH-RSA-DES-CBC3-SHA'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: WITH - keyword: MAX_QUERIES_PER_HOUR - numeric_literal: '500' - keyword: MAX_UPDATES_PER_HOUR - numeric_literal: '100' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: NEVER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: EXPIRE - keyword: INTERVAL - numeric_literal: '180' - keyword: DAY - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: HISTORY - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: HISTORY - numeric_literal: '6' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REUSE - keyword: INTERVAL - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REUSE - keyword: INTERVAL - numeric_literal: '360' - keyword: DAY - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - keyword: OPTIONAL - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: PASSWORD - keyword: REQUIRE - keyword: CURRENT - keyword: DEFAULT - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jeffrey'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: FAILED_LOGIN_ATTEMPTS - numeric_literal: '4' - keyword: PASSWORD_LOCK_TIME - numeric_literal: '2' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jon'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: COMMENT - quoted_literal: "'Some information about Jon'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: - quoted_identifier: "'jim'" - at_sign_literal: '@' - quoted_identifier: "'localhost'" - keyword: ATTRIBUTE - quoted_literal: "'{\"fname\": \"James\", \"lname\": \"Scott\", \"phone\": \"\ 123-456-7890\"}'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_view.sql000066400000000000000000000005151451700765000241540ustar00rootroot00000000000000CREATE VIEW v1 (c,d) AS SELECT a,b FROM t1; CREATE OR REPLACE VIEW v1 (c,d,e,f) AS SELECT a,b, a IN (SELECT a+2 FROM t1), a = all (SELECT a FROM t1) FROM t1; CREATE VIEW v2 AS SELECT a FROM t1 WITH CASCADED CHECK OPTION; CREATE VIEW v2 AS (SELECT a FROM t1) WITH CASCADED CHECK OPTION; CREATE VIEW v2 AS SELECT 1 UNION SELECT 2; sqlfluff-2.3.5/test/fixtures/dialects/mysql/create_view.yml000066400000000000000000000141561451700765000241640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3bb1e638499d52434c20d168ce195274b3a557ac2b38cf7244b16d97e841d3aa file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: v1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c - comma: ',' - column_reference: naked_identifier: d - comma: ',' - column_reference: naked_identifier: e - comma: ',' - column_reference: naked_identifier: f - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: a keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: a binary_operator: + numeric_literal: '2' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: all bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - with_check_options: - keyword: WITH - keyword: CASCADED - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 end_bracket: ) - with_check_options: - keyword: WITH - keyword: CASCADED - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v2 - keyword: AS - set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/deallocate_prepare.sql000066400000000000000000000000311451700765000254630ustar00rootroot00000000000000DEALLOCATE PREPARE dynam;sqlfluff-2.3.5/test/fixtures/dialects/mysql/deallocate_prepare.yml000066400000000000000000000010301451700765000254650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 47c2b5531df3cb85f6eb69f699db36588afa339a5cde82a5058f928718671932 file: statement: deallocate_segment: - keyword: DEALLOCATE - keyword: PREPARE - naked_identifier: dynam statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_condition.sql000066400000000000000000000000611451700765000253200ustar00rootroot00000000000000DECLARE random_condition_name CONDITION FOR 1051;sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_condition.yml000066400000000000000000000011271451700765000253260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7bdce909905e143f2896cccb3ca99f0b52c2f36ae44e5cbbbf19ab71e621efc1 file: statement: declare_statement: - keyword: DECLARE - naked_identifier: random_condition_name - keyword: CONDITION - keyword: FOR - numeric_literal: '1051' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_continue_handler_sqlexception.sql000066400000000000000000000000761451700765000314570ustar00rootroot00000000000000DECLARE continue handler for sqlexception begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_continue_handler_sqlexception.yml000066400000000000000000000016311451700765000314570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ef9e8623111d364c44fed8e8f3660ed551cf7158ff3dc87e02ab81cd98a02e0 file: - statement: declare_statement: - keyword: DECLARE - keyword: continue - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_cursor.sql000066400000000000000000000000411451700765000246450ustar00rootroot00000000000000DECLARE test CURSOR FOR SELECT 1;sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_cursor.yml000066400000000000000000000013131451700765000246520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55c9581409fbc9d064dd492a2842e4df7d4102077d475bb9f50a7a33906204cd file: statement: declare_statement: - keyword: DECLARE - naked_identifier: test - keyword: CURSOR - keyword: FOR - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_default_numeric.sql000066400000000000000000000000321451700765000264760ustar00rootroot00000000000000DECLARE abc int default 1;sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_default_numeric.yml000066400000000000000000000011301451700765000265000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a302920845b75b85f450540a7283e49735d86a7878f913048cc3adf4efc5adfc file: statement: declare_statement: - keyword: DECLARE - variable: abc - data_type: data_type_identifier: int - keyword: default - numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_default_quotedliteral.sql000066400000000000000000000000441451700765000277150ustar00rootroot00000000000000DECLARE abc longtext default 'test';sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_default_quotedliteral.yml000066400000000000000000000011411451700765000277160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 411913c6181d73ee4d79bbfca1a497ba4503fd8a371273f951c7d403cba06a02 file: statement: declare_statement: - keyword: DECLARE - variable: abc - data_type: data_type_identifier: longtext - keyword: default - quoted_literal: "'test'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_exit_handler_sqlexception.sql000066400000000000000000000000721451700765000306000ustar00rootroot00000000000000DECLARE exit handler for sqlexception begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_exit_handler_sqlexception.yml000066400000000000000000000016251451700765000306070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fa28b2deb8b2ba8bbc00b29368ae51e09b2c142b0654a70ae093cb9e37d507b0 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_local_variable.sql000066400000000000000000000000201451700765000262640ustar00rootroot00000000000000DECLARE abc int;sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_local_variable.yml000066400000000000000000000010461451700765000262770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9f8d9b37c576c621698357759133c61224aedf19208cfaa65ef8ac6ca395b35 file: statement: declare_statement: keyword: DECLARE variable: abc data_type: data_type_identifier: int statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_undo_handler_sqlexception.sql000066400000000000000000000000721451700765000305740ustar00rootroot00000000000000DECLARE undo handler for sqlexception begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/declare_undo_handler_sqlexception.yml000066400000000000000000000016251451700765000306030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ec056f47e16db2062f9cfe150451dd9dc8bb603e0c028f404f7b7cd77a7ee3a file: - statement: declare_statement: - keyword: DECLARE - keyword: undo - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/delete_multitable.sql000066400000000000000000000014521451700765000253440ustar00rootroot00000000000000DELETE a FROM a JOIN b USING (id) WHERE b.name = 'example'; DELETE FROM somelog WHERE user = 'jcole' ORDER BY timestamp_column LIMIT 1; DELETE LOW_PRIORITY QUICK IGNORE a FROM a JOIN b USING (id) WHERE b.name = 'example'; DELETE FROM a PARTITION (p) WHERE b.name = 'example'; -- Multiple-Table Syntax 1 DELETE t1, t2 FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE LOW_PRIORITY QUICK IGNORE t1, t2 FROM t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; -- Multiple-Table Syntax 2 DELETE FROM t1, t2 USING t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE LOW_PRIORITY QUICK IGNORE FROM t1, t2 USING t1 INNER JOIN t2 INNER JOIN t3 WHERE t1.id=t2.id AND t2.id=t3.id; DELETE a1, a2 FROM t1 AS a1 INNER JOIN t2 AS a2 WHERE a1.id=a2.id; sqlfluff-2.3.5/test/fixtures/dialects/mysql/delete_multitable.yml000066400000000000000000000260611451700765000253510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8891f999d204f5ecaae8e896c7a6e185578e86c73f67978e1c5881a8b231eda1 file: - statement: delete_statement: keyword: DELETE table_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: somelog where_clause: keyword: WHERE expression: column_reference: naked_identifier: user comparison_operator: raw_comparison_operator: '=' quoted_literal: "'jcole'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: timestamp_column limit_clause: keyword: LIMIT numeric_literal: '1' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - table_reference: naked_identifier: a - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - keyword: USING - bracketed: start_bracket: ( naked_identifier: id end_bracket: ) - where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: p end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: b - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'example'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - table_reference: naked_identifier: t1 - comma: ',' - table_reference: naked_identifier: t2 - from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - table_reference: naked_identifier: t1 - comma: ',' - table_reference: naked_identifier: t2 - from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: t1 - comma: ',' - table_reference: naked_identifier: t2 - using_clause: keyword: USING from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: LOW_PRIORITY - keyword: QUICK - keyword: IGNORE - keyword: FROM - table_reference: naked_identifier: t1 - comma: ',' - table_reference: naked_identifier: t2 - using_clause: keyword: USING from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: t1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t3 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t3 - dot: . - naked_identifier: id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - table_reference: naked_identifier: a1 - comma: ',' - table_reference: naked_identifier: a2 - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: keyword: AS naked_identifier: a1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 alias_expression: keyword: AS naked_identifier: a2 - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: a1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a2 - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/delimiter_function.sql000066400000000000000000000002361451700765000255420ustar00rootroot00000000000000DELIMITER ~ CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC LANGUAGE SQL CONTAINS SQL SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~ DELIMITER ;sqlfluff-2.3.5/test/fixtures/dialects/mysql/delimiter_function.yml000066400000000000000000000032611451700765000255450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e86cfba356795fe4599cef806bd5c5c782881b14785deacab9bef35c96bef07b file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: LANGUAGE - keyword: SQL - keyword: CONTAINS - keyword: SQL - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/delimiter_procedure.sql000066400000000000000000000002301451700765000256770ustar00rootroot00000000000000DELIMITER ~ CREATE PROCEDURE `testprocedure`(test int) DETERMINISTIC LANGUAGE SQL CONTAINS SQL SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~ DELIMITER ;sqlfluff-2.3.5/test/fixtures/dialects/mysql/delimiter_procedure.yml000066400000000000000000000031571451700765000257140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f8885557a4ee5d4d90b6fc6f07983158a33d5c9eaadcf2cdcae85be72280973c file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - characteristic_statement: - keyword: DETERMINISTIC - keyword: LANGUAGE - keyword: SQL - keyword: CONTAINS - keyword: SQL - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/delimiter_select.sql000066400000000000000000000000411451700765000251660ustar00rootroot00000000000000DELIMITER ~ SELECT 1~ DELIMITER ;sqlfluff-2.3.5/test/fixtures/dialects/mysql/delimiter_select.yml000066400000000000000000000013501451700765000251740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a7b5e6298ad0f76791911956fe2dcb7762a8483f6dd646d30160c4e0c39a9b2e file: - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: '~' - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: '~' - statement: delimiter_statement: keyword: DELIMITER - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_function.sql000066400000000000000000000000471451700765000245300ustar00rootroot00000000000000DROP FUNCTION IF EXISTS `testfunction`;sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_function.yml000066400000000000000000000011431451700765000245300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 36f8ff2a93f64a4996cc8a997d1048029f86ffcd5114b0db81ca5b4c984860bf file: statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: quoted_identifier: '`testfunction`' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_index.sql000066400000000000000000000000451451700765000240100ustar00rootroot00000000000000DROP INDEX `test` ON `table1`.`foo`; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_index.yml000066400000000000000000000012621451700765000240140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 55a1c03846735c33fdff186577506b3f00b63f73d930c2bf8974f73d4d0f7cea file: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_index_with_algorithm.sql000066400000000000000000000005121451700765000271100ustar00rootroot00000000000000DROP INDEX `test` ON `table1`.`foo` ALGORITHM = DEFAULT; DROP INDEX `test` ON `table1`.`foo` ALGORITHM = INPLACE; DROP INDEX `test` ON `table1`.`foo` ALGORITHM = COPY; DROP INDEX `test` ON `table1`.`foo` ALGORITHM DEFAULT; DROP INDEX `test` ON `table1`.`foo` ALGORITHM INPLACE; DROP INDEX `test` ON `table1`.`foo` ALGORITHM COPY; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_index_with_algorithm.yml000066400000000000000000000051241451700765000271160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2f5fbf257ef1ab0490d88f2c0a9840283f4ea5595471e662572591e7fb25dd6b file: - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: INPLACE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - keyword: COPY - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: INPLACE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: - quoted_identifier: '`table1`' - dot: . - quoted_identifier: '`foo`' - keyword: ALGORITHM - keyword: COPY - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_index_with_lock.sql000066400000000000000000000005441451700765000260570ustar00rootroot00000000000000DROP INDEX `test` ON `table1` LOCK = DEFAULT; DROP INDEX `test` ON `table1` LOCK = NONE; DROP INDEX `test` ON `table1` LOCK = SHARED; DROP INDEX `test` ON `table1` LOCK = EXCLUSIVE; DROP INDEX `test` ON `table1` LOCK DEFAULT; DROP INDEX `test` ON `table1` LOCK NONE; DROP INDEX `test` ON `table1` LOCK SHARED; DROP INDEX `test` ON `table1` LOCK EXCLUSIVE; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_index_with_lock.yml000066400000000000000000000056161451700765000260660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c91a2276d5d611e727906a9c2b014e808dc5fb97fe6f238268b7fed429adb32 file: - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: SHARED - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - comparison_operator: raw_comparison_operator: '=' - keyword: EXCLUSIVE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: DEFAULT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: NONE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: SHARED - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '`test`' - keyword: 'ON' - table_reference: quoted_identifier: '`table1`' - keyword: LOCK - keyword: EXCLUSIVE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_prepare.sql000066400000000000000000000000231451700765000243330ustar00rootroot00000000000000DROP PREPARE dynam;sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_prepare.yml000066400000000000000000000010221451700765000243350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fb0ffbafdc168dc91f61b1dc45a6ae2bb23726da0391e6a2bb85fb0c72e481d file: statement: deallocate_segment: - keyword: DROP - keyword: PREPARE - naked_identifier: dynam statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_procedure.sql000066400000000000000000000000511451700765000246660ustar00rootroot00000000000000DROP PROCEDURE IF EXISTS `testprocedure`;sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_procedure.yml000066400000000000000000000011511451700765000246720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6226dad29f08fa6bc945f7ffbd3ee824cfb9513fec02f752efccc635d8bfc0e6 file: statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - object_reference: quoted_identifier: '`testprocedure`' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_table.sql000066400000000000000000000002141451700765000237660ustar00rootroot00000000000000DROP TEMPORARY TABLE IF EXISTS t; DROP TEMPORARY TABLE IF EXISTS t, t2; DROP TABLE IF EXISTS t RESTRICT; DROP TABLE IF EXISTS t CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_table.yml000066400000000000000000000025041451700765000237740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 079d162da6241fd53a7106c843d2c5b885c9fa93adf3e4e1fedc73f706bc00c4 file: - statement: drop_table_statement: - keyword: DROP - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TEMPORARY - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - comma: ',' - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - keyword: RESTRICT - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_trigger.sql000066400000000000000000000001511451700765000243420ustar00rootroot00000000000000DROP TRIGGER trigger_name; DROP TRIGGER schema_name.trigger_name; DROP TRIGGER IF EXISTS trigger_name; sqlfluff-2.3.5/test/fixtures/dialects/mysql/drop_trigger.yml000066400000000000000000000017231451700765000243520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ea2814154a2f377dfa41e981612ca02ec575bbb9384d4356a6f87d69d8367df file: - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: trigger_name - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: - naked_identifier: schema_name - dot: . - naked_identifier: trigger_name - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: trigger_name - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/execute_prepared_stmt.sql000066400000000000000000000000151451700765000262450ustar00rootroot00000000000000execute test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/execute_prepared_stmt.yml000066400000000000000000000007721451700765000262610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 645d3d2f6b6a99f0b56f9e61558eddbd807982b88dd4b5a85926d08ad6fde973 file: statement: execute_segment: keyword: execute naked_identifier: test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/execute_prepared_stmt_using.sql000066400000000000000000000000311451700765000274500ustar00rootroot00000000000000execute test using @test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/execute_prepared_stmt_using.yml000066400000000000000000000010471451700765000274620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c84c2b918a06a90ade5b2628eaf69ac7ee1220e235e947be52d9663ea1717e60 file: statement: execute_segment: - keyword: execute - naked_identifier: test - keyword: using - variable: '@test' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/execute_prepared_stmt_using_multiple_variable.sql000066400000000000000000000000411451700765000332310ustar00rootroot00000000000000execute test using @test, @test1;sqlfluff-2.3.5/test/fixtures/dialects/mysql/execute_prepared_stmt_using_multiple_variable.yml000066400000000000000000000011211451700765000332330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e883899cefef9dcaa1775b3ebae0ba07aa11a5b9fb3c10843c74b452f7e9921c file: statement: execute_segment: - keyword: execute - naked_identifier: test - keyword: using - variable: '@test' - comma: ',' - variable: '@test1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/explain.sql000066400000000000000000000002271451700765000233170ustar00rootroot00000000000000explain select 1; explain update tbl set foo = 1 where bar = 2; explain delete from tbl where foo = 1; explain insert into tbl (col1) values (123); sqlfluff-2.3.5/test/fixtures/dialects/mysql/explain.yml000066400000000000000000000045331451700765000233250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 77b5d16c2cddca62f5fb8d333728260a2f33be4f0e46cecfdc10d68bf5e4d542 file: - statement: explain_statement: keyword: explain select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain update_statement: keyword: update table_reference: naked_identifier: tbl set_clause_list: keyword: set set_clause: column_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' where_clause: keyword: where expression: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: explain_statement: keyword: explain delete_statement: keyword: delete from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl where_clause: keyword: where expression: column_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain insert_statement: - keyword: insert - keyword: into - table_reference: naked_identifier: tbl - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - values_clause: keyword: values bracketed: start_bracket: ( numeric_literal: '123' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch.sql000066400000000000000000000000321451700765000227420ustar00rootroot00000000000000fetch curcursor into test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch.yml000066400000000000000000000010531451700765000227500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ffb648d6082513210891e9d343ab35d75b56ddc97fee1356ec805b821eb52fbc file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_from.sql000066400000000000000000000000371451700765000237720ustar00rootroot00000000000000fetch from curcursor into test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_from.yml000066400000000000000000000010771451700765000240010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac2545f40f18314609db52b54bcf5af85eeb4a464c012915e157261c76d0acdb file: statement: cursor_fetch_segment: - keyword: fetch - keyword: from - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_multiple.sql000066400000000000000000000000411451700765000246550ustar00rootroot00000000000000fetch curcursor into test, test2;sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_multiple.yml000066400000000000000000000011221451700765000246600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8f2e64f6c4bfeb9e324e9f8fbf9ab2024c805ba70341e9763ca33556c367cbc file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: test - comma: ',' - variable: test2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_next_from.sql000066400000000000000000000000441451700765000250260ustar00rootroot00000000000000fetch next from curcursor into test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_next_from.yml000066400000000000000000000011231451700765000250270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5799de98d4fbb26b52d336da9b39a4a58ecbce7623c1a8aefa4ccf02ba3ec0bc file: statement: cursor_fetch_segment: - keyword: fetch - keyword: next - keyword: from - naked_identifier: curcursor - keyword: into - variable: test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_session.sql000066400000000000000000000000331451700765000245060ustar00rootroot00000000000000fetch curcursor into @test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_session.yml000066400000000000000000000010561451700765000245160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0282fb848f6361a6299ff7b64ab47fb43966975f758c1e30e5ed1f55e66ee7ff file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: '@test' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_session_multiple.sql000066400000000000000000000000431451700765000264220ustar00rootroot00000000000000fetch curcursor into @test, @test2;sqlfluff-2.3.5/test/fixtures/dialects/mysql/fetch_session_multiple.yml000066400000000000000000000011301451700765000264220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b21f217493fdc862ebf564013c0b16d8d4139163bc5a9910cb3f586f45c04114 file: statement: cursor_fetch_segment: - keyword: fetch - naked_identifier: curcursor - keyword: into - variable: '@test' - comma: ',' - variable: '@test2' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/flush.sql000066400000000000000000000006411451700765000230000ustar00rootroot00000000000000FLUSH LOGS; FLUSH NO_WRITE_TO_BINLOG BINARY LOGS, ENGINE LOGS, ERROR LOGS, GENERAL LOGS, HOSTS; FLUSH LOCAL PRIVILEGES, OPTIMIZER_COSTS, RELAY LOGS, SLOW LOGS, STATUS, USER_RESOURCES; FLUSH RELAY LOGS FOR CHANNEL my_channel; FLUSH TABLES; FLUSH TABLES WITH READ LOCK; FLUSH TABLES table1; FLUSH TABLES table1, `foo`.`bar`; FLUSH TABLES table1, `foo`.`bar` WITH READ LOCK; FLUSH TABLES table1, `foo`.`bar` FOR EXPORT; sqlfluff-2.3.5/test/fixtures/dialects/mysql/flush.yml000066400000000000000000000054371451700765000230120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19d48e39bc87b05358abcf06a2d2edd289d9e249da43532890fd301fe03d83df file: - statement: flush_statement: - keyword: FLUSH - keyword: LOGS - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: NO_WRITE_TO_BINLOG - keyword: BINARY - keyword: LOGS - comma: ',' - keyword: ENGINE - keyword: LOGS - comma: ',' - keyword: ERROR - keyword: LOGS - comma: ',' - keyword: GENERAL - keyword: LOGS - comma: ',' - keyword: HOSTS - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: LOCAL - keyword: PRIVILEGES - comma: ',' - keyword: OPTIMIZER_COSTS - comma: ',' - keyword: RELAY - keyword: LOGS - comma: ',' - keyword: SLOW - keyword: LOGS - comma: ',' - keyword: STATUS - comma: ',' - keyword: USER_RESOURCES - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: RELAY - keyword: LOGS - keyword: FOR - keyword: CHANNEL - object_reference: naked_identifier: my_channel - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - keyword: WITH - keyword: READ - keyword: LOCK - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: WITH - keyword: READ - keyword: LOCK - statement_terminator: ; - statement: flush_statement: - keyword: FLUSH - keyword: TABLES - table_reference: naked_identifier: table1 - comma: ',' - table_reference: - quoted_identifier: '`foo`' - dot: . - quoted_identifier: '`bar`' - keyword: FOR - keyword: EXPORT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index.sql000066400000000000000000000000571451700765000241450ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX (idx_index);sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index.yml000066400000000000000000000020121451700765000241400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 76f81441b5f5b820ccbdc7248648433a05eac5d802017015e52e7d2adbfe9444 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index_for_group_by.sql000066400000000000000000000000651451700765000267200ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR GROUP BY (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index_for_group_by.yml000066400000000000000000000021031451700765000267150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 87773b30b37f699859714e573179e58c6f5dafb6191d5538e681e5831418af67 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index_for_join.sql000066400000000000000000000000611451700765000260250ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR JOIN (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index_for_join.yml000066400000000000000000000020561451700765000260350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6b94cfaf91b378bf96312ae5b473616021fb82e21522046a4e9543d1191e027 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index_for_order_by.sql000066400000000000000000000000651451700765000266770ustar00rootroot00000000000000SELECT * FROM onetable FORCE INDEX FOR ORDER BY (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index_for_order_by.yml000066400000000000000000000021031451700765000266740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0c09a9cf14085e00ca694807253593f891828a1f101948491a5ad7083d4546e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index_left_join.sql000066400000000000000000000001661451700765000261770ustar00rootroot00000000000000SELECT onetable.f1, twotable.f1 FROM onetable left join twotable FORCE INDEX (idx_index) on onetable.f1 = twotable.f1;sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_index_left_join.yml000066400000000000000000000035031451700765000261770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7156f479a87ebed7b8e34b4cd7016aadd3dab7e0bd6b0ad14bc7d4795fd2de90 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: onetable - dot: . - naked_identifier: f1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: twotable - dot: . - naked_identifier: f1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable join_clause: - keyword: left - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: twotable index_hint_clause: - keyword: FORCE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: onetable - dot: . - naked_identifier: f1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: twotable - dot: . - naked_identifier: f1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_key.sql000066400000000000000000000000551451700765000236240ustar00rootroot00000000000000SELECT * FROM onetable FORCE KEY (idx_index);sqlfluff-2.3.5/test/fixtures/dialects/mysql/force_key.yml000066400000000000000000000020101451700765000236170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4141b5f48dff30ccb10e63d5fffe11c7ba21cecf1f39f273af9cd2de9e2ef10f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: FORCE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: idx_index end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_comment.sql000066400000000000000000000001561451700765000252270ustar00rootroot00000000000000CREATE FUNCTION `testfunction`(var1 int) RETURNS longtext COMMENT 'this is a comment' DETERMINISTIC BEGIN END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_comment.yml000066400000000000000000000022151451700765000252270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f8d250d436ac39910d375ea85845e11190e3b5f622719a189914bf297106cf4 file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( parameter: var1 data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - comment_clause: keyword: COMMENT quoted_literal: "'this is a comment'" - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_definer.sql000066400000000000000000000001471451700765000252010ustar00rootroot00000000000000CREATE DEFINER=`test`@`%` FUNCTION `testfunction`() RETURNS longtext DETERMINISTIC BEGIN SELECT 1; END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_definer.yml000066400000000000000000000026171451700765000252070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 63402a8d737600b4ceec102c8982aca0423da4d2e24e7cd7893a8d394c15091c file: - statement: create_function_statement: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`test`' - at_sign_literal: '@' - quoted_identifier: '`%`' - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_modifies_sql.sql000066400000000000000000000001511451700765000262360ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC MODIFIES SQL DATA BEGIN SELECT 1 + 2; END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_modifies_sql.yml000066400000000000000000000026341451700765000262500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c950f8fdbb6c66257b3ecd2a33e1db18ef6b958973246fbc0e17486c6e30758e file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: MODIFIES - keyword: SQL - keyword: DATA - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_no_sql.sql000066400000000000000000000001361451700765000250560ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC NO SQL BEGIN SELECT 1 + 2; END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_no_sql.yml000066400000000000000000000026021451700765000250600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7cf6b2f6629057890ce653e7f421bdab2d9b541afed3344da33927c25e3f2d4e file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: 'NO' - keyword: SQL - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_notdeterministic.sql000066400000000000000000000001331451700765000271440ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext NOT DETERMINISTIC BEGIN SELECT 1 + 2; END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_notdeterministic.yml000066400000000000000000000025541451700765000271570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a419b4154858e8c36166a450bc605ade73c8c8bd9cf192c2581831c5ba5213d file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: NOT - keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_reads_sql.sql000066400000000000000000000001461451700765000255410ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC READS SQL DATA BEGIN SELECT 1 + 2; END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_reads_sql.yml000066400000000000000000000026311451700765000255440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4f21236c7ba039e1347af4a274638f535a951ab5775f1bd9ced459f5a19dd5d8 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: READS - keyword: SQL - keyword: DATA - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_return.sql000066400000000000000000000001411451700765000250760ustar00rootroot00000000000000CREATE FUNCTION `testfunction`(var1 int) RETURNS int DETERMINISTIC BEGIN RETURN (var1 + 1); END~ sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_return.yml000066400000000000000000000026701451700765000251110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f900c91d9372a026ebd6e47b33e72d76158191583040ff73d52264c797df7317 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`testfunction`' - function_parameter_list: bracketed: start_bracket: ( parameter: var1 data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: int - characteristic_statement: keyword: DETERMINISTIC - function_definition: transaction_statement: keyword: BEGIN statement: return_statement: keyword: RETURN expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: var1 binary_operator: + numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_sql_security_definer.sql000066400000000000000000000001541451700765000300050ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC SQL SECURITY DEFINER BEGIN SELECT 1 + 2; END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_sql_security_definer.yml000066400000000000000000000026371451700765000300170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 74abdb518948c19065ad637d90fbcfff31c9b6cfde82cf79d4586f4605f9a2a6 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: SQL - keyword: SECURITY - keyword: DEFINER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_sql_security_invoker.sql000066400000000000000000000001541451700765000300460ustar00rootroot00000000000000CREATE FUNCTION `add`(test int) RETURNS longtext DETERMINISTIC SQL SECURITY INVOKER BEGIN SELECT 1 + 2; END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/function_sql_security_invoker.yml000066400000000000000000000026371451700765000300600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c69f593c8af3fc5de0f6d04a417a91f5c128e29bb2d4cde483edd95f1799b39b file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: quoted_identifier: '`add`' - function_parameter_list: bracketed: start_bracket: ( parameter: test data_type: data_type_identifier: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: longtext - characteristic_statement: - keyword: DETERMINISTIC - keyword: SQL - keyword: SECURITY - keyword: INVOKER - function_definition: transaction_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: END - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_info_local_variable.sql000066400000000000000000000000611451700765000331210ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 _test = CLASS_ORIGIN;sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_info_local_variable.yml000066400000000000000000000012331451700765000331250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f1f183ac1cbee5d2382d84afc9c8151c8037a58872cbf2315d25442242ae7a3e file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: _test - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_info_multiple_variable.sql000066400000000000000000000001051451700765000336610ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 _test = CLASS_ORIGIN, @test = TABLE_NAME;sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_info_multiple_variable.yml000066400000000000000000000014361451700765000336730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a774db06cd2a028f74beaf8e4fbc800bf95454357357782ca3dad888a9580e8b file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: _test - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN - comma: ',' - variable: '@test' - comparison_operator: raw_comparison_operator: '=' - keyword: TABLE_NAME statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_info_session_variable.sql000066400000000000000000000000611451700765000335120ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1 @test = CLASS_ORIGIN;sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_info_session_variable.yml000066400000000000000000000012351451700765000335200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 57cdf831e601b7330259c2fb2a9896682094508706204a3dfc889ab5ee7de24d file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' - variable: '@test' - comparison_operator: raw_comparison_operator: '=' - keyword: CLASS_ORIGIN statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_local_variable.sql000066400000000000000000000000401451700765000321030ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION _test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_local_variable.yml000066400000000000000000000010531451700765000321120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42bce6b496e8e1802b7fb8dd2b7d3869ae2de167e39670d4b0d24007814e9441 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: _test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_numeric.sql000066400000000000000000000000341451700765000306110ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION 1;sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_numeric.yml000066400000000000000000000010511451700765000306130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7dd3117703135613c7197d4be59c7a2de1398f0d20291fe13aaa129cb5df786 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_session_variable.sql000066400000000000000000000000401451700765000324740ustar00rootroot00000000000000GET DIAGNOSTICS CONDITION @test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_condition_session_variable.yml000066400000000000000000000010551451700765000325050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac7218df685e13ae13347608b37dce89733405707037b02020f696a4b448ed6f file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - keyword: CONDITION - variable: '@test' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_number.sql000066400000000000000000000000511451700765000263700ustar00rootroot00000000000000GET DIAGNOSTICS @a = NUMBER CONDITION 1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_number.yml000066400000000000000000000012241451700765000263750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 796518d65a6e3a9acb2574ac1162af0a4b26e7801d0169c1912a973abf831243 file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - variable: '@a' - comparison_operator: raw_comparison_operator: '=' - keyword: NUMBER - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_row_count.sql000066400000000000000000000000541451700765000271220ustar00rootroot00000000000000GET DIAGNOSTICS @a = ROW_COUNT CONDITION 1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/get_diagnostics_row_count.yml000066400000000000000000000012271451700765000271270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2e7f5ba8bf71309f0b86e1e2945924f9cc6523a1f063b04a44cd734b1213f7c file: statement: get_diagnostics_segment: - keyword: GET - keyword: DIAGNOSTICS - variable: '@a' - comparison_operator: raw_comparison_operator: '=' - keyword: ROW_COUNT - keyword: CONDITION - variable: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/grant.sql000066400000000000000000000011571451700765000227750ustar00rootroot00000000000000GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO prj_svc; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO 'prj_svc'; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO "prj_svc"; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc`; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO 'prj_svc'@'%'; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO "prj_svc"@"%"; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc`@`%`; GRANT INSERT, UPDATE, DELETE, SELECT, REFERENCES ON prj_table TO `prj_svc` @`%`; sqlfluff-2.3.5/test/fixtures/dialects/mysql/grant.yml000066400000000000000000000076541451700765000230070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd535f609b65d22497d484902183cbfed83028e566ddc56502d9f7cf064c78aa file: - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: naked_identifier: prj_svc - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_identifier: "'prj_svc'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_literal: '"prj_svc"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: quoted_identifier: '`prj_svc`' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: "'prj_svc'" - at_sign_literal: '@' - quoted_identifier: "'%'" - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_literal: '"prj_svc"' - at_sign_literal: '@' - quoted_literal: '"%"' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: '`prj_svc`' - at_sign_literal: '@' - quoted_identifier: '`%`' - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: SELECT - comma: ',' - keyword: REFERENCES - keyword: 'ON' - object_reference: naked_identifier: prj_table - keyword: TO - role_reference: - quoted_identifier: '`prj_svc`' - at_sign_literal: '@' - quoted_identifier: '`%`' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_condition_name.sql000066400000000000000000000000731451700765000263410ustar00rootroot00000000000000DECLARE exit handler for conditionName begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_condition_name.yml000066400000000000000000000016371451700765000263520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 414ccaf762a7703b813181a3ad0d367889e5491629f899021b28dca8767b320d file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - naked_identifier: conditionName - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_continue_sqlexception.yml000066400000000000000000000010151451700765000277740ustar00rootroot00000000000000file: - statement: declare_statement: - keyword: DECLARE - keyword: continue - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_error_code.sql000066400000000000000000000000621451700765000254740ustar00rootroot00000000000000DECLARE exit handler for 1051 begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_error_code.yml000066400000000000000000000016271451700765000255060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d34172436001fa633eaf25d34177ba7a6c903cbd232b5a83f7146fca0964287 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - numeric_literal: '1051' - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_exit_sqlexception.yml000066400000000000000000000010111451700765000271150ustar00rootroot00000000000000file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_not_found.sql000066400000000000000000000000671451700765000253510ustar00rootroot00000000000000DECLARE exit handler for not found begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_not_found.yml000066400000000000000000000016411451700765000253520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c939949a50ffc6352dc498e1144348bcb1ae7813f5e690feaa32fffe9abf39b0 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: not - keyword: found - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_sqlstate.sql000066400000000000000000000000721451700765000252120ustar00rootroot00000000000000DECLARE exit handler for SQLSTATE '1' begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_sqlstate.yml000066400000000000000000000016551451700765000252240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d138be4d0143608c52d2393cf54169f65f3d25fc89471428021fa12adf1f8a4 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: SQLSTATE - quoted_literal: "'1'" - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_sqlstate_value.sql000066400000000000000000000001001451700765000263760ustar00rootroot00000000000000DECLARE exit handler for SQLSTATE VALUE '1' begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_sqlstate_value.yml000066400000000000000000000017021451700765000264110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 931dfb6f38009cef3ad9b07ccf1412c97b4106d944cbfffa6b2d694f27526517 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: SQLSTATE - keyword: VALUE - quoted_literal: "'1'" - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_sqlwarning.sql000066400000000000000000000000701451700765000255350ustar00rootroot00000000000000DECLARE exit handler for sqlwarning begin select 1; end;sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_sqlwarning.yml000066400000000000000000000016231451700765000255440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec2fecbc723f160082a61a9b6f8a2bca85e98f87c99cfc923e06a591905870c7 file: - statement: declare_statement: - keyword: DECLARE - keyword: exit - keyword: handler - keyword: for - keyword: sqlwarning - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/handler_undo_sqlexception.yml000066400000000000000000000010111451700765000271110ustar00rootroot00000000000000file: - statement: declare_statement: - keyword: DECLARE - keyword: undo - keyword: handler - keyword: for - keyword: sqlexception - statement: transaction_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: literal: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/help_statement.sql000066400000000000000000000001501451700765000246660ustar00rootroot00000000000000HELP 'contents'; HELP 'data types'; HELP 'ascii'; HELP 'create table'; HELP 'status'; HELP 'functions'; sqlfluff-2.3.5/test/fixtures/dialects/mysql/help_statement.yml000066400000000000000000000020701451700765000246730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2870d78e2fbb06bc6dee4188a68515024d12f2143da14b0aa617149d244033e0 file: - statement: help_statement: keyword: HELP quoted_literal: "'contents'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'data types'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'ascii'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'create table'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'status'" - statement_terminator: ; - statement: help_statement: keyword: HELP quoted_literal: "'functions'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/hexadecimal_literal.sql000066400000000000000000000001361451700765000256360ustar00rootroot00000000000000SELECT X'01AF'; SELECT X'01af'; SELECT x'01AF'; SELECT x'01af'; SELECT 0x01AF; SELECT 0x01af; sqlfluff-2.3.5/test/fixtures/dialects/mysql/hexadecimal_literal.yml000066400000000000000000000026341451700765000256450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2f1335b636b0de3737c565c34db24be63660c70c8d54d8acf2b5066b5cee4531 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "X'01AF'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "X'01af'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "x'01AF'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: "x'01af'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0x01AF' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '0x01af' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if.sql000066400000000000000000000000631451700765000222530ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; end if;sqlfluff-2.3.5/test/fixtures/dialects/mysql/if.yml000066400000000000000000000023311451700765000222550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 790013e75ace25c9b58cfb5985991696b381b935afcc68eeb61507b9ee9eac5e file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_else.sql000066400000000000000000000000701451700765000232610ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; else end if;sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_else.yml000066400000000000000000000024411451700765000232670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3b5bf11f96922230ba969f1531778aa5b28d916699d1e65df904e48239182821 file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: keyword: else statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_elseif.sql000066400000000000000000000001451451700765000236030ustar00rootroot00000000000000if (x = 0) then set @errmsg = ''; select 1; elseif (x = 1) then set _test = 1; else select 2; end if;sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_elseif.yml000066400000000000000000000040161451700765000236060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 541352fb1913bbea13363d80cee97028508476b4a3bd851fb97b09d48b14d71f file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: elseif - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: - keyword: set - variable: _test - comparison_operator: raw_comparison_operator: '=' - variable: '1' - statement_terminator: ; - statement: if_then_statement: keyword: else statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_multiple_expression.sql000066400000000000000000000001321451700765000264420ustar00rootroot00000000000000if ((select count(*) from table1) = 0 and x = 1) then set @errmsg = ''; select 1; end if; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_multiple_expression.yml000066400000000000000000000042411451700765000264510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c22f7deac8d62c98a993e6ef8956541cbcee947deb8dce6e9a499897d41b46d5 file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - binary_operator: and - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_nested.sql000066400000000000000000000001251451700765000236140ustar00rootroot00000000000000if (x = 0) then select 0; if (y = 1) then set @errmsg = ''; select 1; end if; end if;sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_nested.yml000066400000000000000000000035421451700765000236240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f81ec5c70044185b4094ead0ad97096653f9e47a8d693f00b2920173a9fef500 file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '0' - statement_terminator: ; - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_session_variable.sql000066400000000000000000000000571451700765000256660ustar00rootroot00000000000000if (@x = 0) then set @b = ''; select 1; end if;sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_session_variable.yml000066400000000000000000000023171451700765000256710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1bdf0414cbba1a35240b81866b84e72279f68cb5939e807fc5b26a1b1f0ff60a file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: column_reference: variable: '@x' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@b' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_subquery_expression.sql000066400000000000000000000001201451700765000264630ustar00rootroot00000000000000if ((select count(*) from table1) = 0) then set @errmsg = ''; select 1; end if; sqlfluff-2.3.5/test/fixtures/dialects/mysql/if_subquery_expression.yml000066400000000000000000000037231451700765000265010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0a4b721f419adfb0e1e76e0a76f3fee710334c3fb2e1908ea40ed675cc4496bf file: - statement: if_then_statement: - keyword: if - expression: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - keyword: then - statement: set_statement: keyword: set variable: '@errmsg' comparison_operator: raw_comparison_operator: '=' quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: if_then_statement: - keyword: end - keyword: if - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_index.sql000066400000000000000000000000511451700765000243240ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_index.yml000066400000000000000000000020041451700765000243260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2e1f60338262df41c57f37eece235e168d74a57176116c55c9c443126c8373a5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_index_for_group_by.sql000066400000000000000000000000661451700765000271060ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR GROUP BY (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_index_for_group_by.yml000066400000000000000000000021041451700765000271030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 26e55b2b5d567db8e63bd7802f436aeec190ab9f4c94603820231c9c8026ae89 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_index_for_join.sql000066400000000000000000000000621451700765000262130ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR JOIN (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_index_for_join.yml000066400000000000000000000020571451700765000262230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 60cc4d293f24f927ac5fc8ed7e486d7604260516c11b3fb241c6d9d7a2362f05 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_index_for_order_by.sql000066400000000000000000000000661451700765000270650ustar00rootroot00000000000000SELECT * FROM onetable IGNORE INDEX FOR ORDER BY (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_index_for_order_by.yml000066400000000000000000000021041451700765000270620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7e4537df9e55c6973d4495dc1a329920562069731c0838927483119d8588b25e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_key.sql000066400000000000000000000000471451700765000240120ustar00rootroot00000000000000SELECT * FROM onetable IGNORE KEY (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/ignore_key.yml000066400000000000000000000020021451700765000240050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3033d4c9bc24004b84b6b337e6a280aa6d576857c8b2df2e6bac02a4392ac050 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable index_hint_clause: - keyword: IGNORE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/insert.sql000066400000000000000000000015021451700765000231600ustar00rootroot00000000000000INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6); INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) AS dt; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2 ON DUPLICATE KEY UPDATE c = t2.a+t2.b; INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2 ON DUPLICATE KEY UPDATE a = VALUES(a), b = VALUES(b); INSERT INTO t1 (a,b,c) VALUES (1,2,3),(4,5,6) AS t2(m,n,p) ON DUPLICATE KEY UPDATE c = m+n; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) AS dt ON DUPLICATE KEY UPDATE b = e; INSERT INTO t1 SELECT * FROM (SELECT c, c+d AS e FROM t2) ON DUPLICATE KEY UPDATE b = e; INSERT INTO t1 (a,b,c) TABLE t2 as t3(m,n,p) ON DUPLICATE KEY UPDATE b = n+p; INSERT INTO t1 SET a=1,b=2,c=3 AS t2 ON DUPLICATE KEY UPDATE c = t2.a+t2.b; INSERT INTO t1 SET a=1,b=2,c=3 AS t2(m,n,p) ON DUPLICATE KEY UPDATE c = m+n, b = n+p; sqlfluff-2.3.5/test/fixtures/dialects/mysql/insert.yml000066400000000000000000000403761451700765000231760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f26042eddf4339d9869fb9f5ad624003347b83461901ce6ffb122617abf6c2d9 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: keyword: AS naked_identifier: dt - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: a end_bracket: ) - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) - insert_row_alias: keyword: AS naked_identifier: t2 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: m - binary_operator: + - column_reference: naked_identifier: n - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: keyword: AS naked_identifier: dt - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: e - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: e - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - keyword: TABLE - table_reference: naked_identifier: t2 - insert_row_alias: keyword: as naked_identifier: t3 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: n - binary_operator: + - column_reference: naked_identifier: p - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - insert_row_alias: keyword: AS naked_identifier: t2 - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: a - binary_operator: + - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - insert_row_alias: keyword: AS naked_identifier: t2 bracketed: start_bracket: ( identifier_list: - naked_identifier: m - comma: ',' - naked_identifier: n - comma: ',' - naked_identifier: p end_bracket: ) - upsert_clause_list: - keyword: 'ON' - keyword: DUPLICATE - keyword: KEY - keyword: UPDATE - set_clause: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: m - binary_operator: + - column_reference: naked_identifier: n - comma: ',' - set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' expression: - column_reference: naked_identifier: n - binary_operator: + - column_reference: naked_identifier: p - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/interval.sql000066400000000000000000000007471451700765000235120ustar00rootroot00000000000000SELECT DATE_ADD(CURDATE(), INTERVAL -30 DAY); SELECT SUBDATE('2008-01-02', INTERVAL 31 DAY); SELECT ADDDATE(CURDATE(), INTERVAL -30 DAY); SELECT DATE_SUB('1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND); SELECT DATE_ADD('2100-12-31 23:59:59', INTERVAL '1:1' MINUTE_SECOND); SELECT DATE_ADD(CURDATE(), INTERVAL 7 * 4 DAY); SELECT ADDDATE(CURDATE(), INTERVAL col1 DAY) FROM tbl1 ; SELECT SUBDATE(CURDATE(), INTERVAL col1 + col2 DAY) FROM tbl1 ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/interval.yml000066400000000000000000000156451451700765000235170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 985f29f0b5b895f3e2b840714cec791c128deb41151edccecb3ee1d61c9fda4e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: sign_indicator: '-' numeric_literal: '30' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUBDATE bracketed: - start_bracket: ( - expression: quoted_literal: "'2008-01-02'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: '31' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ADDDATE bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: numeric_literal: sign_indicator: '-' numeric_literal: '30' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_SUB bracketed: - start_bracket: ( - expression: quoted_literal: "'1992-12-31 23:59:59.000002'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: quoted_literal: "'1.999999'" date_part: SECOND_MICROSECOND - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD bracketed: - start_bracket: ( - expression: quoted_literal: "'2100-12-31 23:59:59'" - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: quoted_literal: "'1:1'" date_part: MINUTE_SECOND - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE_ADD bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - numeric_literal: '7' - binary_operator: '*' - numeric_literal: '4' date_part: DAY - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ADDDATE bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: column_reference: naked_identifier: col1 date_part: DAY - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUBDATE bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: CURDATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: interval_expression: keyword: INTERVAL expression: - column_reference: naked_identifier: col1 - binary_operator: + - column_reference: naked_identifier: col2 date_part: DAY - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/json.sql000066400000000000000000000006661451700765000226370ustar00rootroot00000000000000CREATE TABLE facts (sentence JSON); INSERT INTO facts VALUES (JSON_OBJECT("mascot", "Our mascot is a dolphin named \"Sakila\".")); SELECT sentence->"$.mascot" FROM facts; SELECT sentence->'$.mascot' FROM facts; SELECT sentence->>"$.mascot" FROM facts; SELECT sentence->>'$.mascot' FROM facts; SELECT sentence FROM facts WHERE JSON_TYPE(sentence->"$.mascot") = "NULL"; SELECT sentence FROM facts WHERE sentence->"$.mascot" IS NULL; sqlfluff-2.3.5/test/fixtures/dialects/mysql/json.yml000066400000000000000000000115541451700765000226370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aaeefa5bec93b6c2a4cbd0bf9e96bdae0c96464e96a5fbd0071299cffc6cc1d3 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: facts - bracketed: start_bracket: ( column_definition: naked_identifier: sentence data_type: data_type_identifier: JSON end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: facts - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: JSON_OBJECT bracketed: - start_bracket: ( - expression: quoted_literal: '"mascot"' - comma: ',' - expression: quoted_literal: '"Our mascot is a dolphin named \"Sakila\"."' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence column_path_operator: -> json_path: '"$.mascot"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence column_path_operator: -> json_path: "'$.mascot'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence column_path_operator: ->> json_path: '"$.mascot"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence column_path_operator: ->> json_path: "'$.mascot'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: JSON_TYPE bracketed: start_bracket: ( expression: column_reference: naked_identifier: sentence column_path_operator: -> json_path: '"$.mascot"' end_bracket: ) comparison_operator: raw_comparison_operator: '=' quoted_literal: '"NULL"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sentence from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: facts where_clause: keyword: WHERE expression: column_reference: naked_identifier: sentence column_path_operator: -> json_path: '"$.mascot"' keyword: IS null_literal: 'NULL' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/load_data.sql000066400000000000000000000024401451700765000235660ustar00rootroot00000000000000LOAD DATA INFILE '/var/lib/mysql-files/libaccess.csv' INTO TABLE libaccess FIELDS TERMINATED BY '\t' OPTIONALLY ENCLOSED BY '"' IGNORE 1 LINES; LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table; LOAD DATA INFILE 'data.txt' INTO TABLE db2.my_table PARTITION (partition_name); LOAD DATA INFILE '/tmp/test.txt' INTO TABLE test FIELDS TERMINATED BY ',' LINES STARTING BY 'xxx'; LOAD DATA INFILE '/tmp/test.txt' INTO TABLE test IGNORE 1 LINES; LOAD DATA INFILE 'data.txt' INTO TABLE table2 FIELDS TERMINATED BY ','; LOAD DATA INFILE 'data.txt' INTO TABLE table2 FIELDS TERMINATED BY '\t'; LOAD DATA INFILE 'data.txt' INTO TABLE tbl_name FIELDS TERMINATED BY ',' ENCLOSED BY '"' LINES TERMINATED BY '\r\n' IGNORE 1 LINES; LOAD DATA INFILE '/tmp/jokes.txt' INTO TABLE jokes FIELDS TERMINATED BY '' LINES TERMINATED BY '\n%%\n' (joke); LOAD DATA INFILE 'persondata.txt' INTO TABLE persondata; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, @var1) SET column2 = @var1/100; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, column2) SET column3 = CURRENT_TIMESTAMP; LOAD DATA INFILE 'file.txt' INTO TABLE t1 (column1, @dummy, column2, @dummy, column3); LOAD DATA INFILE '/local/access_log' INTO TABLE tbl_name FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' ESCAPED BY '\\' sqlfluff-2.3.5/test/fixtures/dialects/mysql/load_data.yml000066400000000000000000000160551451700765000235770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2d0a01865822c5dbbf9fc9d91f606ad76c05ede0b97e42f286267cb7c58df743 file: - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/var/lib/mysql-files/libaccess.csv'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: libaccess - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\t'" - keyword: OPTIONALLY - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db2 - dot: . - naked_identifier: my_table - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: - naked_identifier: db2 - dot: . - naked_identifier: my_table - partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/test.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: STARTING - keyword: BY - quoted_literal: "'xxx'" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/test.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: table2 - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: table2 - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\t'" - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'data.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\r\\n'" - keyword: IGNORE - numeric_literal: '1' - keyword: LINES - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/tmp/jokes.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: jokes - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "''" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n%%\\n'" - bracketed: start_bracket: ( column_reference: naked_identifier: joke end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'persondata.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: persondata - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: variable: '@var1' - end_bracket: ) - keyword: SET - column_reference: naked_identifier: column2 - comparison_operator: raw_comparison_operator: '=' - column_reference: variable: '@var1' - binary_operator: / - numeric_literal: '100' - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - keyword: SET - column_reference: naked_identifier: column3 - comparison_operator: raw_comparison_operator: '=' - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'file.txt'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: variable: '@dummy' - comma: ',' - column_reference: naked_identifier: column2 - comma: ',' - column_reference: variable: '@dummy' - comma: ',' - column_reference: naked_identifier: column3 - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INFILE - quoted_literal: "'/local/access_log'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: tbl_name - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: OPTIONALLY - keyword: ENCLOSED - keyword: BY - quoted_literal: "'\"'" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" sqlfluff-2.3.5/test/fixtures/dialects/mysql/loop_label.sql000066400000000000000000000001201451700765000237570ustar00rootroot00000000000000iteration:loop select 1; iterate iteration; leave iteration; end loop iteration;sqlfluff-2.3.5/test/fixtures/dialects/mysql/loop_label.yml000066400000000000000000000020461451700765000237720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ee6c612bc68b46b0637885e6c2d45996d0657a723d4c8aaf116166d7483357b file: - statement: loop_statement: naked_identifier: iteration colon: ':' keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: iterate_statement: keyword: iterate naked_identifier: iteration - statement_terminator: ; - statement: transaction_statement: keyword: leave naked_identifier: iteration - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - naked_identifier: iteration - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/loop_multiple_statements.sql000066400000000000000000000000601451700765000270050ustar00rootroot00000000000000loop select 1; select * from onetable; end loop;sqlfluff-2.3.5/test/fixtures/dialects/mysql/loop_multiple_statements.yml000066400000000000000000000022171451700765000270150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f4385380b2be8c5e17f459866e0ad060d6cdb7b1a3aabcac090d59502b9d0a8 file: - statement: loop_statement: keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: onetable - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/loop_no_label.sql000066400000000000000000000000301451700765000244530ustar00rootroot00000000000000loop select 1; end loop;sqlfluff-2.3.5/test/fixtures/dialects/mysql/loop_no_label.yml000066400000000000000000000013411451700765000244630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5257dc626087e74ee817942fa42136a8f5b1fa385f436cb278a903375ababa10 file: - statement: loop_statement: keyword: loop statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: loop_statement: - keyword: end - keyword: loop - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/nested_begin.sql000066400000000000000000000000731451700765000243040ustar00rootroot00000000000000blocks:BEGIN nest:begin set @abc = 1; end nest; END blocks~sqlfluff-2.3.5/test/fixtures/dialects/mysql/nested_begin.yml000066400000000000000000000021161451700765000243060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f75b44362b1cb42381aac501a808f9c004f950e97b70a2a2876ff95138ab64d file: - statement: transaction_statement: naked_identifier: blocks colon: ':' keyword: BEGIN statement: transaction_statement: naked_identifier: nest colon: ':' keyword: begin statement: set_statement: - keyword: set - variable: '@abc' - comparison_operator: raw_comparison_operator: '=' - variable: '1' - statement_terminator: ; - statement: transaction_statement: keyword: end naked_identifier: nest - statement_terminator: ; - statement: transaction_statement: keyword: END naked_identifier: blocks - statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/open.sql000066400000000000000000000000171451700765000226150ustar00rootroot00000000000000open curcursor;sqlfluff-2.3.5/test/fixtures/dialects/mysql/open.yml000066400000000000000000000010061451700765000226160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e1aebd2df56d3ec79fea9159ce9c4448a2f2f46696461f7886d3b27f321ed502 file: statement: cursor_open_close_segment: keyword: open naked_identifier: curcursor statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/open_qualified.sql000066400000000000000000000000211451700765000246330ustar00rootroot00000000000000open `curcursor`;sqlfluff-2.3.5/test/fixtures/dialects/mysql/open_qualified.yml000066400000000000000000000010131451700765000246370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3adadcefab1078bb37b5f960ec6344ab8391a808cb512db12cedeeaf6bc8612d file: statement: cursor_open_close_segment: keyword: open quoted_identifier: '`curcursor`' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/optimize_table.sql000066400000000000000000000004031451700765000246620ustar00rootroot00000000000000OPTIMIZE TABLE some_table; OPTIMIZE TABLE some_table1, some_table2; OPTIMIZE NO_WRITE_TO_BINLOG TABLE some_table; OPTIMIZE NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; OPTIMIZE LOCAL TABLE some_table; OPTIMIZE LOCAL TABLE some_table1, some_table2; sqlfluff-2.3.5/test/fixtures/dialects/mysql/optimize_table.yml000066400000000000000000000033711451700765000246730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: feffb21d789f331a3e487e1b8d5d71deb4fbfc668e677fd34995e77e3cd87c4d file: - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: optimize_table_statement: - keyword: OPTIMIZE - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/prepare_local_variable.sql000066400000000000000000000000301451700765000263240ustar00rootroot00000000000000PREPARE test FROM _test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/prepare_local_variable.yml000066400000000000000000000010441451700765000263340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 25b6e93e2262f01eaf66c26d2f5644717653ed3e01b5bd341134513ded5c1b38 file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - variable: _test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/prepare_session_variable.sql000066400000000000000000000000301451700765000267150ustar00rootroot00000000000000PREPARE test FROM @test;sqlfluff-2.3.5/test/fixtures/dialects/mysql/prepare_session_variable.yml000066400000000000000000000010461451700765000267270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 465db8acb36a650a88c7dd35993d5321557e4b9b2241b2ee99a97b8ef7f39058 file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - variable: '@test' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/prepare_statement.sql000066400000000000000000000000361451700765000253770ustar00rootroot00000000000000PREPARE test FROM 'select 1;';sqlfluff-2.3.5/test/fixtures/dialects/mysql/prepare_statement.yml000066400000000000000000000010621451700765000254010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f984d1fe46e0896dd4e356ab50270fee9ad6dd7d59a16b16c34906db26d7576f file: statement: prepare_segment: - keyword: PREPARE - naked_identifier: test - keyword: FROM - quoted_literal: "'select 1;'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/procedure_definer.sql000066400000000000000000000001001451700765000253310ustar00rootroot00000000000000CREATE DEFINER=`test`@`%` PROCEDURE `testprocedure`() BEGIN END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/procedure_definer.yml000066400000000000000000000021051451700765000253420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4a9679dce024353f3845d4f1b920ea507add980899b88e70c45c5864f274f9ce file: statement: create_procedure_statement: - keyword: CREATE - definer_segment: keyword: DEFINER comparison_operator: raw_comparison_operator: '=' role_reference: - quoted_identifier: '`test`' - at_sign_literal: '@' - quoted_identifier: '`%`' - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/procedure_in_param.sql000066400000000000000000000000701451700765000255110ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(in test int) BEGIN END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/procedure_in_param.yml000066400000000000000000000017101451700765000255150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2d6aefd94f35d5599b705fa4c4dfe7a3a8a0da4c29696e30d4d648c15bc5042 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: in parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/procedure_inout_param.sql000066400000000000000000000000731451700765000262440ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(inout test int) BEGIN END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/procedure_inout_param.yml000066400000000000000000000017131451700765000262500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fbe00aeccc4e8664fcef735462688e8f15ee4edc02e12cd9e6e372c85a2514d3 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: inout parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/procedure_out_param.sql000066400000000000000000000000711451700765000257130ustar00rootroot00000000000000CREATE PROCEDURE `testprocedure`(out test int) BEGIN END~sqlfluff-2.3.5/test/fixtures/dialects/mysql/procedure_out_param.yml000066400000000000000000000017111451700765000257170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b743b1d3c50497e605139814614c4b3b16413ada9a280ad66415562fa6b89121 file: statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: quoted_identifier: '`testprocedure`' - procedure_parameter_list: bracketed: start_bracket: ( parameter_direction: out parameter: test data_type: data_type_identifier: int end_bracket: ) - function_definition: transaction_statement: keyword: BEGIN statement: transaction_statement: keyword: END statement_terminator: '~' sqlfluff-2.3.5/test/fixtures/dialects/mysql/purge_binary_logs.sql000066400000000000000000000003561451700765000253740ustar00rootroot00000000000000PURGE BINARY LOGS TO 'mysql-bin.010'; PURGE BINARY LOGS BEFORE '2019-04-02 22:46:26'; PURGE BINARY LOGS BEFORE TIMESTAMP '2019-04-02 22:46:26'; PURGE BINARY LOGS BEFORE 19830905132800; PURGE BINARY LOGS BEFORE TIMESTAMP 19830905132800; sqlfluff-2.3.5/test/fixtures/dialects/mysql/purge_binary_logs.yml000066400000000000000000000027731451700765000254030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7bb1441e7e4eeb3327daa0ca6330a42e1887e1440bbbc94266437831ca63ec77 file: - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: TO - quoted_literal: "'mysql-bin.010'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: quoted_literal: "'2019-04-02 22:46:26'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: keyword: TIMESTAMP date_constructor_literal: "'2019-04-02 22:46:26'" - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: numeric_literal: '19830905132800' - statement_terminator: ; - statement: purge_binary_logs_statement: - keyword: PURGE - keyword: BINARY - keyword: LOGS - keyword: BEFORE - expression: keyword: TIMESTAMP numeric_literal: '19830905132800' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/quoted_literal.sql000066400000000000000000000014361451700765000246770ustar00rootroot00000000000000SELECT ''; SELECT ""; SELECT ''''; SELECT """"; SELECT ' '; SELECT " "; SELECT '''aaa'''; SELECT """aaa"""; SELECT ' '' '; SELECT " "" "; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' "bar"; SELECT 'foo' 'bar'; SELECT "foo" "bar"; SELECT 'foo' -- some comment 'bar'; SELECT "foo" -- some comment "bar"; SELECT 'foo' /* some comment */ 'bar'; SELECT "foo" /* some comment */ "bar"; UPDATE table1 SET column1 = 'baz\'s'; UPDATE table1 SET column1 = "baz\"s"; SELECT 'terminating MySQL-y escaped single-quote bazs\''; SELECT "terminating MySQL-y escaped double-quote bazs\""; SELECT 'terminating ANSI-ish escaped single-quote '''; SELECT "terminating ANSI-ish escaped double-quote """; SELECT '\\'; SELECT "\\"; sqlfluff-2.3.5/test/fixtures/dialects/mysql/quoted_literal.yml000066400000000000000000000142671451700765000247070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad88d0ef3cc10c070f32053dbaea0a7d788e8f01ef4ea30ac820784eb76cc4f6 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '""""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "\"\n\n\"" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'''aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"""aaa"""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "\"\n\"\"\n\"" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: '"foo"' - quoted_literal: '"bar"' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'baz\\'s'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: column1 comparison_operator: raw_comparison_operator: '=' quoted_literal: '"baz\"s"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'terminating MySQL-y escaped single-quote bazs\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"terminating MySQL-y escaped double-quote bazs\""' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'terminating ANSI-ish escaped single-quote '''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"terminating ANSI-ish escaped double-quote """' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: '"\\"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/rename_table.sql000066400000000000000000000001461451700765000242750ustar00rootroot00000000000000RENAME TABLE old_table TO new_table; RENAME TABLE old_table1 TO new_table1, old_table2 TO new_table2; sqlfluff-2.3.5/test/fixtures/dialects/mysql/rename_table.yml000066400000000000000000000020231451700765000242730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c90b566ed72ba3be4bcb5183c0ff3089ab6ea7ce8b37783931b52e0aa1d855c6 file: - statement: rename_table_statement: - keyword: RENAME - keyword: TABLE - table_reference: naked_identifier: old_table - keyword: TO - table_reference: naked_identifier: new_table - statement_terminator: ; - statement: rename_table_statement: - keyword: RENAME - keyword: TABLE - table_reference: naked_identifier: old_table1 - keyword: TO - table_reference: naked_identifier: new_table1 - comma: ',' - table_reference: naked_identifier: old_table2 - keyword: TO - table_reference: naked_identifier: new_table2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/repair_table.sql000066400000000000000000000006151451700765000243110ustar00rootroot00000000000000REPAIR TABLE some_table; REPAIR TABLE some_table1, some_table2; REPAIR NO_WRITE_TO_BINLOG TABLE some_table; REPAIR NO_WRITE_TO_BINLOG TABLE some_table1, some_table2; REPAIR LOCAL TABLE some_table; REPAIR LOCAL TABLE some_table1, some_table2; REPAIR TABLE some_table QUICK; REPAIR TABLE some_table EXTENDED; REPAIR TABLE some_table USE_FRM; REPAIR TABLE some_table QUICK EXTENDED USE_FRM; sqlfluff-2.3.5/test/fixtures/dialects/mysql/repair_table.yml000066400000000000000000000050211451700765000243070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1faf15f84e873b3e2be5b00bf51587fe7e641b27ecc10fdfd77ea0c3bc5e56df file: - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: NO_WRITE_TO_BINLOG - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: LOCAL - keyword: TABLE - table_reference: naked_identifier: some_table1 - comma: ',' - table_reference: naked_identifier: some_table2 - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: EXTENDED - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: USE_FRM - statement_terminator: ; - statement: repair_table_statement: - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: some_table - keyword: QUICK - keyword: EXTENDED - keyword: USE_FRM - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/repeat_label.sql000066400000000000000000000001041451700765000242700ustar00rootroot00000000000000iteration:repeat set @a = @a + 1; until @a > 5 end repeat iteration;sqlfluff-2.3.5/test/fixtures/dialects/mysql/repeat_label.yml000066400000000000000000000022041451700765000242750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2053c708738b674f2b1964f16358678e7df43a8662d2d38373a7ad02d693267 file: - statement: repeat_statement: naked_identifier: iteration colon: ':' keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: column_reference: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: column_reference: variable: '@a' comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' - keyword: end - keyword: repeat - naked_identifier: iteration - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/repeat_multiple_statements.sql000066400000000000000000000001041451700765000273130ustar00rootroot00000000000000repeat set @a = @a + 1; select 1; until @a > 5 and x = 1 end repeat;sqlfluff-2.3.5/test/fixtures/dialects/mysql/repeat_multiple_statements.yml000066400000000000000000000026151451700765000273260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be84d375c2033cff7370f20ceaa9409da3f9a700c02a210850a2634b48bbe207 file: - statement: repeat_statement: keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: column_reference: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: - column_reference: variable: '@a' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: and - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: end - keyword: repeat - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/repeat_no_label.sql000066400000000000000000000000601451700765000247650ustar00rootroot00000000000000repeat set @a = @a + 1; until @a > 5 end repeat;sqlfluff-2.3.5/test/fixtures/dialects/mysql/repeat_no_label.yml000066400000000000000000000020571451700765000247770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1626c026e42da9255c25bbd14de7039094f647001ae594a00274a1377c3c240b file: - statement: repeat_statement: keyword: repeat statement: set_statement: keyword: set variable: '@a' comparison_operator: raw_comparison_operator: '=' expression: column_reference: variable: '@a' binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: repeat_statement: - keyword: until - expression: column_reference: variable: '@a' comparison_operator: raw_comparison_operator: '>' numeric_literal: '5' - keyword: end - keyword: repeat - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/replace.sql000066400000000000000000000020731451700765000232730ustar00rootroot00000000000000REPLACE tbl_name VALUES (1, 2); REPLACE tbl_name VALUES (DEFAULT, DEFAULT); REPLACE tbl_name VALUES (1, 2), (11, 22); REPLACE tbl_name VALUE (1, 2), (11, 22); REPLACE tbl_name (col1, col2) VALUES (1, 2); REPLACE tbl_name (col1, col2) VALUES ROW(1, 2), ROW(11, 22); REPLACE LOW_PRIORITY tbl_name VALUES (1, 2); REPLACE DELAYED tbl_name VALUES (1, 2); REPLACE LOW_PRIORITY INTO tbl_name VALUES (1, 2); REPLACE tbl_name PARTITION (partition_name) VALUES (1, 2); REPLACE tbl_name SET col1 = 1, col2 = 2; REPLACE LOW_PRIORITY tbl_name SET col1 = 1, col2 = 2; REPLACE DELAYED tbl_name SET col1 = 1, col2 = 2; REPLACE LOW_PRIORITY INTO tbl_name SET col1 = 1, col2 = 2; REPLACE tbl_name PARTITION (partition_name) SET col1 = 1, col2 = 2; REPLACE tbl_name SELECT * FROM table_name; REPLACE tbl_name TABLE table_name; REPLACE LOW_PRIORITY tbl_name TABLE table_name; REPLACE DELAYED tbl_name TABLE table_name; REPLACE LOW_PRIORITY INTO tbl_name TABLE table_name; REPLACE tbl_name (col1, col2) TABLE table_name; REPLACE tbl_name PARTITION (partition_name) TABLE table_name; sqlfluff-2.3.5/test/fixtures/dialects/mysql/replace.yml000066400000000000000000000246421451700765000233030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e56839dde36d79ac2153e62446f262002bffb704a7185d5e155ef88f1e1b528 file: - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: keyword: VALUES bracketed: - start_bracket: ( - keyword: DEFAULT - comma: ',' - keyword: DEFAULT - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name values_clause: - keyword: VALUE - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) values_clause: - keyword: VALUES - keyword: ROW - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - keyword: ROW - bracketed: - start_bracket: ( - numeric_literal: '11' - comma: ',' - numeric_literal: '22' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: replace_statement: keyword: REPLACE table_reference: naked_identifier: tbl_name select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - table_reference: naked_identifier: tbl_name - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - table_reference: naked_identifier: tbl_name - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: DELAYED - table_reference: naked_identifier: tbl_name - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - keyword: LOW_PRIORITY - keyword: INTO - table_reference: naked_identifier: tbl_name - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - table_reference: naked_identifier: tbl_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: replace_statement: - keyword: REPLACE - table_reference: naked_identifier: tbl_name - partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: partition_name end_bracket: ) - keyword: TABLE - table_reference: naked_identifier: table_name - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/reset_master.sql000066400000000000000000000000441451700765000243510ustar00rootroot00000000000000RESET MASTER; RESET MASTER TO 1234; sqlfluff-2.3.5/test/fixtures/dialects/mysql/reset_master.yml000066400000000000000000000012261451700765000243560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c1e5dac0655f6e9471d3cab9d0b711be754e37c946b8099192ccd30e6ed4608 file: - statement: reset_master_statement: - keyword: RESET - keyword: MASTER - statement_terminator: ; - statement: reset_master_statement: - keyword: RESET - keyword: MASTER - keyword: TO - numeric_literal: '1234' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal.sql000066400000000000000000000000111451700765000234520ustar00rootroot00000000000000resignal;sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal.yml000066400000000000000000000007371451700765000234730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5486a51152d09bd7aa929d6aad8035b68a7af6d3b97581f55893f1b5b23015ff file: statement: resignal_segment: keyword: resignal statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_condition_name.sql000066400000000000000000000000271451700765000265270ustar00rootroot00000000000000resignal testcondition;sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_condition_name.yml000066400000000000000000000010051451700765000265260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddb0f0a16d3621ad997255a98f4989de04e3689165072077b221c139f0cf28bd file: statement: resignal_segment: keyword: resignal naked_identifier: testcondition statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_condition_sqlstate.sql000066400000000000000000000000321451700765000274430ustar00rootroot00000000000000resignal sqlstate '42S02';sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_condition_sqlstate.yml000066400000000000000000000010271451700765000274520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6df9f8a5460f8e1b81a403bcae8e1764ad2573f179abbc7b23d952fa1a65d06b file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_condition_sqlstate_value.sql000066400000000000000000000000401451700765000306360ustar00rootroot00000000000000resignal sqlstate value '42S02';sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_condition_sqlstate_value.yml000066400000000000000000000010541451700765000306460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f14945a5c55ed136eafcb7262de333b7a62483cd48eb3b4f94d2227f16e5e04b file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - keyword: value - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_set_signal_info.sql000066400000000000000000000000531451700765000267030ustar00rootroot00000000000000resignal set message_text = 'test message';sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_set_signal_info.yml000066400000000000000000000011651451700765000267120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5bdac0c8fdc569afd6cee10fc16146ba608204274f6e22e9ccce971887e2b6f9 file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_set_signal_info_multiple.sql000066400000000000000000000001031451700765000306120ustar00rootroot00000000000000resignal set message_text = 'test message', mysql_errno = '42S500';sqlfluff-2.3.5/test/fixtures/dialects/mysql/resignal_set_signal_info_multiple.yml000066400000000000000000000014021451700765000306170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00965e828a098c4f8db6e840822b45ed6cefee6c32689b6e753a6e7ac61259b7 file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" - comma: ',' - keyword: mysql_errno - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'42S500'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_boolean_operators.sql000066400000000000000000000001741451700765000267340ustar00rootroot00000000000000SELECT !1; SELECT 1 && 1; SELECT 1 && 0; SELECT 1 XOR 1; SELECT 1 || 1; SELECT col_1 && 1; SELECT (col_1 = col_2) || col_3; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_boolean_operators.yml000066400000000000000000000047671451700765000267520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d21bd7c1927fa694dafe5d0812e99cabeeffb6fd21b122189c8c7dbb79328f70 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: not_operator: '!' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '&&' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '&&' - numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: XOR - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '||' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: col_1 binary_operator: '&&' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col_1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col_2 end_bracket: ) binary_operator: '||' column_reference: naked_identifier: col_3 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_distinctrow.sql000066400000000000000000000000421451700765000255620ustar00rootroot00000000000000select distinctrow * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_distinctrow.yml000066400000000000000000000015651451700765000255770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9871a45f50a15605c76b50132724c5f346aaa8192ccb8d8808c5a46f5fd4d8b8 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinctrow select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_share.sql000066400000000000000000000000401451700765000251570ustar00rootroot00000000000000SELECT 1 FROM table1 FOR SHARE; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_share.yml000066400000000000000000000014731451700765000251740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02225e92a0b90b53bdd96e50dc68defd5f0a350525db6500100b390bb2403fa3 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: SHARE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update.sql000066400000000000000000000000411451700765000253400ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update.yml000066400000000000000000000014741451700765000253550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c93bb773f206085006bb91a60b1174791819d70a67b1d49f89d67f985871b092 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_lock_in_share_mode.sql000066400000000000000000000000511451700765000312250ustar00rootroot00000000000000SELECT 1 FROM table1 LOCK IN SHARE MODE; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_lock_in_share_mode.yml000066400000000000000000000015461451700765000312410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d370a7c336d97129568cc88f70c4b8d79cce6e3d13c34fcfba66e315f4bfd27f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: LOCK - keyword: IN - keyword: SHARE - keyword: MODE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_nowait.sql000066400000000000000000000000501451700765000267210ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE NOWAIT; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_nowait.yml000066400000000000000000000015241451700765000267320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d507810a93ebf607747effb7b223e1813effc969a3deba66c9bb62ea9c82be5e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: NOWAIT statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_of.sql000066400000000000000000000000511451700765000260250ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE OF test; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_of.yml000066400000000000000000000015571451700765000260430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efebbedd435ef3a626ed5e4556668b386152d98ece84affadbd1dab651a77223 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: OF - naked_identifier: test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_of_multiple.sql000066400000000000000000000000611451700765000277410ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE OF test1, test2; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_of_multiple.yml000066400000000000000000000016431451700765000277520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 495a190960fe7a8c4ec218cd448be5b7af1c4f54cb09c25f058bc29a18356f48 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: OF - naked_identifier: test1 - comma: ',' - naked_identifier: test2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_skip_locked.sql000066400000000000000000000000551451700765000277140ustar00rootroot00000000000000SELECT 1 FROM table1 FOR UPDATE SKIP LOCKED; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_for_update_skip_locked.yml000066400000000000000000000015521451700765000277210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 54602cc7d83eb9cbaa73bb881f60b100d97193a89f31f284943f9b8b9da2dc5a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: FOR - keyword: UPDATE - keyword: SKIP - keyword: LOCKED statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_high_priority.sql000066400000000000000000000000441451700765000260730ustar00rootroot00000000000000select HIGH_PRIORITY * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_high_priority.yml000066400000000000000000000015671451700765000261100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3006519272c9151e4e617e0fbaf1af3e6f16b3e2880d360c38b8800a6cb0ca24 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: HIGH_PRIORITY select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_dumpfile.sql000066400000000000000000000000471451700765000260540ustar00rootroot00000000000000select * into dumpfile '' from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_dumpfile.yml000066400000000000000000000016261451700765000260620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a61aaa5347e7be900293a778b15cd3dc8752f32d832fdc7a081cd90c4708c3bf file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: dumpfile - quoted_literal: "''" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_multiple_variable.sql000066400000000000000000000001501451700765000277420ustar00rootroot00000000000000select 1, @test2, _test3, 'test4', func(test5) into @test1, @test2, _test3, @test4, @test5 from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_multiple_variable.yml000066400000000000000000000031641451700765000277540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c8649adcf3150b1fc65b45fb13088beac78e43fc4da3a300b9adb22368dbed86 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: column_reference: variable: '@test2' - comma: ',' - select_clause_element: column_reference: naked_identifier: _test3 - comma: ',' - select_clause_element: quoted_literal: "'test4'" - comma: ',' - select_clause_element: function: function_name: function_name_identifier: func bracketed: start_bracket: ( expression: column_reference: naked_identifier: test5 end_bracket: ) into_clause: - keyword: into - variable: '@test1' - comma: ',' - variable: '@test2' - comma: ',' - variable: _test3 - comma: ',' - variable: '@test4' - comma: ',' - variable: '@test5' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile.sql000066400000000000000000000000471451700765000257160ustar00rootroot00000000000000select * into outfile 'a' from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile.yml000066400000000000000000000016261451700765000257240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f52451a2e6be22be53d77d57103acf7c11495f1064382ddf5e9e15ba40c4546 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_fields_enclosed.sql000066400000000000000000000000761451700765000311220ustar00rootroot00000000000000select * into outfile 'a' fields enclosed by '"' from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_fields_enclosed.yml000066400000000000000000000017731451700765000311310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 148cc7fc5601d1bfc8c802f704f9fe203a9066a63fe8c71a4cc7279fc81de668 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: enclosed - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_fields_escaped.sql000066400000000000000000000000751451700765000307310ustar00rootroot00000000000000select * into outfile 'a' fields escaped by '-' from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_fields_escaped.yml000066400000000000000000000017711451700765000307370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 44d837537b304fdfee81a7789f09327652eead42ec3ffc31356d0e16de2b8d0f file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: escaped - keyword: by - quoted_literal: "'-'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_fields_optionally_enclosed.sql000066400000000000000000000001111451700765000333620ustar00rootroot00000000000000select * into outfile 'a' fields optionally enclosed by '"' from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_fields_optionally_enclosed.yml000066400000000000000000000020271451700765000333740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae4149d89d866b6a72bec15d62123ad46ef10dbfcb192726936c192c93c31b8e file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: optionally - keyword: enclosed - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_fields_terminated.sql000066400000000000000000000001001451700765000314460ustar00rootroot00000000000000select * into outfile 'a' fields terminated by '"' from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_fields_terminated.yml000066400000000000000000000017751451700765000314730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd500c01b05c9c1f319b631c96eb958dab28b6ecd736800d3112519d898c2d7f file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: fields - keyword: terminated - keyword: by - quoted_literal: "'\"'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_lines_starting.sql000066400000000000000000000000761451700765000310250ustar00rootroot00000000000000select * into outfile 'a' lines starting by '\n' from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_lines_starting.yml000066400000000000000000000017731451700765000310340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 18230abeacc4e58e49f83f845d8a4c5e927b78671b4f85d9e38d5d16039f45e2 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: lines - keyword: starting - keyword: by - quoted_literal: "'\\n'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_lines_terminated.sql000066400000000000000000000001001451700765000313120ustar00rootroot00000000000000select * into outfile 'a' lines terminated by '\n' from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_outfile_lines_terminated.yml000066400000000000000000000017751451700765000313370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e7bfa635f6ef5cce44d7372ddabeaa8fadf29dfd470beda83ab3bc5f58eb6ed6 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: into - keyword: outfile - quoted_literal: "'a'" - keyword: lines - keyword: terminated - keyword: by - quoted_literal: "'\\n'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_session_variable.sql000066400000000000000000000001771451700765000276030ustar00rootroot00000000000000select 1 into @dumpfile from table1; SELECT name INTO @name FROM t WHERE id = 1; SELECT name FROM t WHERE id = 1 INTO @name; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_into_session_variable.yml000066400000000000000000000042541451700765000276050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d90396fce3fe718549e0cb44f0a32d13f527711a1a3b6edddfb6f9f76d674dc file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' into_clause: keyword: into variable: '@dumpfile' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name into_clause: keyword: INTO variable: '@name' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' into_clause: keyword: INTO variable: '@name' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_local_variable.sql000066400000000000000000000000151451700765000261500ustar00rootroot00000000000000select test2;sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_local_variable.yml000066400000000000000000000011231451700765000261530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9af97739ef6ad17f592f5387bb4522bd1bf97e6b52ae98b460f76bf6ed3ffcdd file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: test2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_lock_in_share_mode.sql000066400000000000000000000000511451700765000270150ustar00rootroot00000000000000select 1 from table1 lock in share mode; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_lock_in_share_mode.yml000066400000000000000000000015461451700765000270310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9fbe45c2085ef72cbde8c14405f345140dc661d053add571a454c18ae3e79500 file: statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 for_clause: - keyword: lock - keyword: in - keyword: share - keyword: mode statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_multiple_partition.sql000066400000000000000000000000561451700765000271420ustar00rootroot00000000000000select * from table1 PARTITION(part1, part2); sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_multiple_partition.yml000066400000000000000000000021051451700765000271410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1fc300a18d46f88268099f98290a1499b9c66fa35604ee9cd2cdf6d278b6af14 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 partition_clause: keyword: PARTITION bracketed: - start_bracket: ( - object_reference: naked_identifier: part1 - comma: ',' - object_reference: naked_identifier: part2 - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_partition.sql000066400000000000000000000000471451700765000252270ustar00rootroot00000000000000select * from table1 PARTITION(part1); sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_partition.yml000066400000000000000000000017601451700765000252340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c508e3cccfffae3b003ffd16e156f90f1ebed5bf0524be2a458da7a218c202c1 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 partition_clause: keyword: PARTITION bracketed: start_bracket: ( object_reference: naked_identifier: part1 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_session_variable.sql000066400000000000000000000000161451700765000265420ustar00rootroot00000000000000select @test2;sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_session_variable.yml000066400000000000000000000011161451700765000265460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f9c4401b3123d25feb2c718e2b77f7f7af27bef874d71b33357b354614b80daa file: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: variable: '@test2' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_big_result.sql000066400000000000000000000000451451700765000262320ustar00rootroot00000000000000select SQL_BIG_RESULT * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_big_result.yml000066400000000000000000000015701451700765000262400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f4d35b2a7b796d378d6888b3b66a816977025435ad2330584bc4e0d0192c9bf file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_BIG_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_buffer_result.sql000066400000000000000000000000501451700765000267360ustar00rootroot00000000000000select SQL_BUFFER_RESULT * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_buffer_result.yml000066400000000000000000000015731451700765000267530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bebb36790002ef31a1e293fe5c53ae91060da9a684608734fd9ae851ff032493 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_BUFFER_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_cache.sql000066400000000000000000000000401451700765000251310ustar00rootroot00000000000000select SQL_CACHE * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_cache.yml000066400000000000000000000015631451700765000251460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ad574866e07fcfc72d05757ea620ff73184427caa3756357a5d8b13259ea4c7 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_CACHE select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_calc_found_rows.sql000066400000000000000000000000521451700765000272400ustar00rootroot00000000000000select SQL_CALC_FOUND_ROWS * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_calc_found_rows.yml000066400000000000000000000015751451700765000272550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f50fd122c842bf90fce3cd99e66ed64121af15acd587b423c38251d0dfcaad51 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_CALC_FOUND_ROWS select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_no_cache.sql000066400000000000000000000000431451700765000256300ustar00rootroot00000000000000select SQL_NO_CACHE * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_no_cache.yml000066400000000000000000000015661451700765000256450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6de0523b89aa7d305d9217f843e0fdd35e1e092140f7bb1d5b4d692c6247e4c5 file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_NO_CACHE select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_small_result.sql000066400000000000000000000000471451700765000266030ustar00rootroot00000000000000select SQL_SMALL_RESULT * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_sql_small_result.yml000066400000000000000000000015721451700765000266110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5aec50a0e01748aaba96d940e14c71dd2f03e7d27d777d58c5392e2ed3dac7e file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: SQL_SMALL_RESULT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_straight_join.sql000066400000000000000000000000441451700765000260570ustar00rootroot00000000000000select STRAIGHT_JOIN * from table1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/select_straight_join.yml000066400000000000000000000015671451700765000260740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b9d5f425cf93327107ccb3168505d5544de5d89cdf9b82ce91cc9eaa06831fc file: statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: STRAIGHT_JOIN select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_booleans.sql000066400000000000000000000002401451700765000243270ustar00rootroot00000000000000SET some_bool_param = ON; SET some_bool_param = OFF; SET some_bool_param = TRUE; SET some_bool_param = FALSE; SET some_bool_param = 0; SET some_bool_param = 1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_booleans.yml000066400000000000000000000030261451700765000243360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f5a859bc75f6b63c6c5c7fbf8ebe8f7d0a248c0674ca8469ad493e7cacd0ca14 file: - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'TRUE' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - keyword: 'FALSE' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - variable: '0' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: some_bool_param - comparison_operator: raw_comparison_operator: '=' - variable: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_multiple_variables.sql000066400000000000000000000000221451700765000264060ustar00rootroot00000000000000SET a = 1, b = 2; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_multiple_variables.yml000066400000000000000000000012641451700765000264210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ec087dc18382ef0fa53f47fccf4b20ea966a60e1bd51cf5ed1aaa125cea0edd file: statement: set_statement: - keyword: SET - variable: a - comparison_operator: raw_comparison_operator: '=' - variable: '1' - comma: ',' - variable: b - comparison_operator: raw_comparison_operator: '=' - variable: '2' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_session_variable.sql000066400000000000000000000001151451700765000260560ustar00rootroot00000000000000set @abc = 1; set @my_var = 1; set @my$currency = 1; set @sha256enabled = 1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_session_variable.yml000066400000000000000000000021771451700765000260720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9e4ffffc861f4647dfc51be1591f5df5931d712fe5083c39fde3ce9942d8ad60 file: - statement: set_statement: - keyword: set - variable: '@abc' - comparison_operator: raw_comparison_operator: '=' - variable: '1' - statement_terminator: ; - statement: set_statement: - keyword: set - variable: '@my_var' - comparison_operator: raw_comparison_operator: '=' - variable: '1' - statement_terminator: ; - statement: set_statement: - keyword: set - variable: '@my$currency' - comparison_operator: raw_comparison_operator: '=' - variable: '1' - statement_terminator: ; - statement: set_statement: - keyword: set - variable: '@sha256enabled' - comparison_operator: raw_comparison_operator: '=' - variable: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_session_variable_expression.sql000066400000000000000000000001471451700765000303420ustar00rootroot00000000000000set @abc = 1 + 2; set @abc = (select 1); SET @id = (SELECT id FROM table1 WHERE field = TRUE LIMIT 1); sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_session_variable_expression.yml000066400000000000000000000042761451700765000303530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6dbb54d45f1b9ec0f0802f990ae79589b3b07f58353743dc22554fb032e4ed8e file: - statement: set_statement: keyword: set variable: '@abc' comparison_operator: raw_comparison_operator: '=' expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - statement_terminator: ; - statement: set_statement: keyword: set variable: '@abc' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@id' comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 where_clause: keyword: WHERE expression: column_reference: naked_identifier: field comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' limit_clause: keyword: LIMIT numeric_literal: '1' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_sql_log_bin.sql000066400000000000000000000000551451700765000250210ustar00rootroot00000000000000SET sql_log_bin = ON; SET sql_log_bin = OFF; sqlfluff-2.3.5/test/fixtures/dialects/mysql/set_sql_log_bin.yml000066400000000000000000000014051451700765000250230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 71f17883bf0985f7065339557331cd8a031250b88137613ef698f51b1487b8f9 file: - statement: set_statement: - keyword: SET - variable: sql_log_bin - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: sql_log_bin - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal.sql000066400000000000000000000000111451700765000231230ustar00rootroot00000000000000resignal;sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal.yml000066400000000000000000000007371451700765000231440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5486a51152d09bd7aa929d6aad8035b68a7af6d3b97581f55893f1b5b23015ff file: statement: resignal_segment: keyword: resignal statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_condition_name.sql000066400000000000000000000000271451700765000262000ustar00rootroot00000000000000resignal testcondition;sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_condition_name.yml000066400000000000000000000010051451700765000261770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddb0f0a16d3621ad997255a98f4989de04e3689165072077b221c139f0cf28bd file: statement: resignal_segment: keyword: resignal naked_identifier: testcondition statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_condition_sqlstate.sql000066400000000000000000000000321451700765000271140ustar00rootroot00000000000000resignal sqlstate '42S02';sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_condition_sqlstate.yml000066400000000000000000000010271451700765000271230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6df9f8a5460f8e1b81a403bcae8e1764ad2573f179abbc7b23d952fa1a65d06b file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_condition_sqlstate_value.sql000066400000000000000000000000401451700765000303070ustar00rootroot00000000000000resignal sqlstate value '42S02';sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_condition_sqlstate_value.yml000066400000000000000000000010541451700765000303170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f14945a5c55ed136eafcb7262de333b7a62483cd48eb3b4f94d2227f16e5e04b file: statement: resignal_segment: - keyword: resignal - keyword: sqlstate - keyword: value - quoted_literal: "'42S02'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_set_signal_info.sql000066400000000000000000000000531451700765000263540ustar00rootroot00000000000000resignal set message_text = 'test message';sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_set_signal_info.yml000066400000000000000000000011651451700765000263630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5bdac0c8fdc569afd6cee10fc16146ba608204274f6e22e9ccce971887e2b6f9 file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_set_signal_info_multiple.sql000066400000000000000000000001031451700765000302630ustar00rootroot00000000000000resignal set message_text = 'test message', mysql_errno = '42S500';sqlfluff-2.3.5/test/fixtures/dialects/mysql/signal_set_signal_info_multiple.yml000066400000000000000000000014021451700765000302700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00965e828a098c4f8db6e840822b45ed6cefee6c32689b6e753a6e7ac61259b7 file: statement: resignal_segment: - keyword: resignal - keyword: set - keyword: message_text - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test message'" - comma: ',' - keyword: mysql_errno - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'42S500'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/system_variables.sql000066400000000000000000000002441451700765000252320ustar00rootroot00000000000000SELECT @@global.time_zone; SELECT @@session.time_zone; SELECT @@global.version; SELECT @@session.rand_seed1; SELECT CONVERT_TZ(NOW(), @@global.time_zone, '+00:00') sqlfluff-2.3.5/test/fixtures/dialects/mysql/system_variables.yml000066400000000000000000000035061451700765000252400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4ba62a416da7b4bd69bceb3282c5e6cb823d108748592d5c0f806c221dd64b36 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@global.time_zone' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@session.time_zone' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@global.version' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@session.rand_seed1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: CONVERT_TZ bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: NOW bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: system_variable: '@@global.time_zone' - comma: ',' - expression: quoted_literal: "'+00:00'" - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/mysql/update.sql000066400000000000000000000017471451700765000231510ustar00rootroot00000000000000UPDATE t1 SET col1 = col1 + 1; UPDATE t1 SET col1 = col1 + 1, col2 = col1; UPDATE items,month SET items.price=month.price WHERE items.id=month.id; UPDATE t SET id = id + 1 ORDER BY id DESC; UPDATE items SET retail = retail * 0.9 WHERE id IN (SELECT id FROM items WHERE retail / wholesale >= 1.3 AND quantity > 100); UPDATE items, (SELECT id FROM items WHERE id IN (SELECT id FROM items WHERE retail / wholesale >= 1.3 AND quantity < 100)) AS discounted SET items.retail = items.retail * 0.9 WHERE items.id = discounted.id; UPDATE items, (SELECT id, retail / wholesale AS markup, quantity FROM items) AS discounted SET items.retail = items.retail * 0.9 WHERE discounted.markup >= 1.3 AND discounted.quantity < 100 AND items.id = discounted.id; UPDATE LOW_PRIORITY foo SET bar = 7 LIMIT 4; UPDATE a, b SET a.name = b.name WHERE a.id = b.id; UPDATE a join b on a.id = b.id set a.type = b.type where a.type is null; sqlfluff-2.3.5/test/fixtures/dialects/mysql/update.yml000066400000000000000000000340331451700765000231450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7d7ef20a92e8be5f1d1d78915f1a19312436821e2173a1450e3838d0c73dcf9 file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: col1 binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: col1 binary_operator: + numeric_literal: '1' - comma: ',' - set_clause: - column_reference: naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col1 - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: items - comma: ',' - table_reference: naked_identifier: month - set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: items - dot: . - naked_identifier: price - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: month - dot: . - naked_identifier: price - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: month - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: id binary_operator: + numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - keyword: DESC - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: column_reference: naked_identifier: id keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: naked_identifier: quantity - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '100' end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items comma: ',' from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: column_reference: naked_identifier: id keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items where_clause: keyword: WHERE expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: naked_identifier: quantity - comparison_operator: raw_comparison_operator: < - numeric_literal: '100' end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: discounted set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: items comma: ',' from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: retail - binary_operator: / - column_reference: naked_identifier: wholesale alias_expression: keyword: AS naked_identifier: markup - comma: ',' - select_clause_element: column_reference: naked_identifier: quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: items end_bracket: ) alias_expression: keyword: AS naked_identifier: discounted set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail comparison_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: items - dot: . - naked_identifier: retail binary_operator: '*' numeric_literal: '0.9' where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: markup - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1.3' - binary_operator: AND - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: quantity - comparison_operator: raw_comparison_operator: < - numeric_literal: '100' - binary_operator: AND - column_reference: - naked_identifier: items - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: discounted - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - keyword: LOW_PRIORITY - table_reference: naked_identifier: foo - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '7' - limit_clause: keyword: LIMIT numeric_literal: '4' - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: a - comma: ',' - table_reference: naked_identifier: b - set_clause_list: keyword: SET set_clause: - column_reference: - naked_identifier: a - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: name - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: id - statement_terminator: ; - statement: update_statement: keyword: UPDATE from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: b join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: id set_clause_list: keyword: set set_clause: - column_reference: - naked_identifier: a - dot: . - naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: type where_clause: keyword: where expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: type keyword: is null_literal: 'null' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_database.sql000066400000000000000000000000131451700765000242700ustar00rootroot00000000000000use my_db; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_database.yml000066400000000000000000000010211451700765000242720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3aaf2dd630ae2dbc3dde9cead50589cf7b516507c6ed959abf06d7a0ce7ba46b file: statement: use_statement: keyword: use database_reference: naked_identifier: my_db statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_index.sql000066400000000000000000000000451451700765000236400ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_index.yml000066400000000000000000000020761451700765000236500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1cac348257a408e8fd1127849665a5ee7ed563c0a2aa93770b2fc29857958514 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_index_for_group_by.sql000066400000000000000000000000621451700765000264130ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR GROUP BY (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_index_for_group_by.yml000066400000000000000000000021761451700765000264250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d4a1185294cbe267fc87d4cd099f2f22f3c83e590e106f237d37715f4f284107 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: GROUP - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_index_for_join.sql000066400000000000000000000000561451700765000255270ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR JOIN (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_index_for_join.yml000066400000000000000000000021511451700765000255270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9a6441958ff22d6693ae703886de81600e7bb086f08a61a24ea841a6792b7d8c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: JOIN - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_index_for_order_by.sql000066400000000000000000000000621451700765000263720ustar00rootroot00000000000000SELECT * FROM t1 test USE INDEX FOR ORDER BY (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_index_for_order_by.yml000066400000000000000000000021761451700765000264040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a62cf5389e97666d68013628c8769ee03126c1ef12d7f64c506d8f5ef89a464a file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: INDEX - keyword: FOR - keyword: ORDER - keyword: BY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_key.sql000066400000000000000000000000431451700765000233170ustar00rootroot00000000000000SELECT * FROM t1 test USE KEY (i2);sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_key.yml000066400000000000000000000020741451700765000233270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0506aa4eb10450054ebb9990ae570d8cdb46ae6b2e7292aa2dd0e96f0ec7cd29 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 alias_expression: naked_identifier: test index_hint_clause: - keyword: USE - keyword: KEY - bracketed: start_bracket: ( object_reference: naked_identifier: i2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_statement.sql000066400000000000000000000000101451700765000245250ustar00rootroot00000000000000USE db; sqlfluff-2.3.5/test/fixtures/dialects/mysql/use_statement.yml000066400000000000000000000010161451700765000245360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1247f4ff8e352edcb3de7913a509af396d6c2bdcdfad9481ebf698f8b319e7a4 file: statement: use_statement: keyword: USE database_reference: naked_identifier: db statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/values_statement.sql000066400000000000000000000002141451700765000252360ustar00rootroot00000000000000VALUES ROW ('a', 1), ROW ('b', 2); VALUES ROW ('a', 1), ROW (upper('b'), 2+1); VALUES ROW (CURRENT_DATE, '2020-06-04' + interval -5 day); sqlfluff-2.3.5/test/fixtures/dialects/mysql/values_statement.yml000066400000000000000000000040571451700765000252510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ec3f560739d5cbb79a45481da9f5a78d3d3073fbc8830284de6763c9fc987458 file: - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'a'" comma: ',' numeric_literal: '1' end_bracket: ) - comma: ',' - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'b'" comma: ',' numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: start_bracket: ( quoted_literal: "'a'" comma: ',' numeric_literal: '1' end_bracket: ) - comma: ',' - keyword: ROW - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: upper bracketed: start_bracket: ( expression: quoted_literal: "'b'" end_bracket: ) - comma: ',' - expression: - numeric_literal: '2' - binary_operator: + - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: VALUES - keyword: ROW - bracketed: - start_bracket: ( - expression: bare_function: CURRENT_DATE - comma: ',' - expression: quoted_literal: "'2020-06-04'" binary_operator: + interval_expression: keyword: interval expression: numeric_literal: sign_indicator: '-' numeric_literal: '5' date_part: day - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/variable_assignment.sql000066400000000000000000000001611451700765000256710ustar00rootroot00000000000000SELECT @var1:=COUNT(*) FROM t1; SET @var1:=0; SET @var1:=@var2:=0; UPDATE t1 SET c1 = 2 WHERE c1 = @var1:= 1; sqlfluff-2.3.5/test/fixtures/dialects/mysql/variable_assignment.yml000066400000000000000000000040271451700765000257000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb42313b72d36da00df4839494e6e7357c200742df78c658ddbbbe051746a937 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: variable: '@var1' assignment_operator: := function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: set_statement: - keyword: SET - variable: '@var1' - assignment_operator: := - variable: '0' - statement_terminator: ; - statement: set_statement: keyword: SET variable: '@var1' assignment_operator: := expression: variable: '@var2' assignment_operator: := numeric_literal: '0' - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: t1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' where_clause: keyword: WHERE expression: column_reference: naked_identifier: c1 comparison_operator: raw_comparison_operator: '=' variable: '@var1' assignment_operator: := numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/while_label.sql000066400000000000000000000001151451700765000241220ustar00rootroot00000000000000iteration:while _cnt <= _max_cnt do set _cnt = _cnt + 1; end while iteration;sqlfluff-2.3.5/test/fixtures/dialects/mysql/while_label.yml000066400000000000000000000023241451700765000241300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eb5d12b359790327f36cb23a2ea83afc299ff4e2662c3277729a7cfd4bc91795 file: - statement: while_statement: - naked_identifier: iteration - colon: ':' - keyword: while - expression: - column_reference: naked_identifier: _cnt - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: _max_cnt - keyword: do - statement: set_statement: keyword: set variable: _cnt comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: _cnt binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: while_statement: - keyword: end - keyword: while - naked_identifier: iteration - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/mysql/while_no_label.sql000066400000000000000000000000711451700765000246170ustar00rootroot00000000000000while _cnt <= _max_cnt do set _cnt = _cnt + 1; end while;sqlfluff-2.3.5/test/fixtures/dialects/mysql/while_no_label.yml000066400000000000000000000021771451700765000246320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a5b1995fc531a7204a5cefdb23104e3ac19d6cc33e85f9b7036d80e950a8887 file: - statement: while_statement: - keyword: while - expression: - column_reference: naked_identifier: _cnt - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: _max_cnt - keyword: do - statement: set_statement: keyword: set variable: _cnt comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: _cnt binary_operator: + numeric_literal: '1' - statement_terminator: ; - statement: while_statement: - keyword: end - keyword: while - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/000077500000000000000000000000001451700765000212355ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/oracle/.sqlfluff000066400000000000000000000000341451700765000230550ustar00rootroot00000000000000[sqlfluff] dialect = oracle sqlfluff-2.3.5/test/fixtures/dialects/oracle/alter_table.sql000066400000000000000000000015021451700765000242320ustar00rootroot00000000000000-- AlterTableColumnClausesSegment ALTER TABLE table_name RENAME COLUMN old_column_name TO new_column_name; -- add_column_clause ALTER TABLE table_name ADD (column_name NUMBER(18)); -- modify_column_clauses ALTER TABLE table_name MODIFY column_name NUMBER(18); -- drop_column_clause ALTER TABLE table_name DROP COLUMN column_name; ALTER TABLE table_name DROP (column_name_one, column_name_two); -- AlterTableConstraintClauses ALTER TABLE table_name ADD CONSTRAINT constraint_name FOREIGN KEY (column_name) REFERENCES other_table_name (other_column_name); -- drop_constraint_clause ALTER TABLE table_name DROP CONSTRAINT constraint_name; ALTER TABLE table_name MODIFY (column_name NOT NULL ENABLE); ALTER TABLE table_name MODIFY (column_name DEFAULT 10); ALTER TABLE table_name MODIFY (column_name DEFAULT 10 NOT NULL ENABLE); sqlfluff-2.3.5/test/fixtures/dialects/oracle/alter_table.yml000066400000000000000000000124371451700765000242450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12d7e9e7e03ef1a35e91fa4258be722ef43fb9dd4d7c1e251e1909a0fb24b499 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: old_column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: ADD bracketed: start_bracket: ( column_definition: naked_identifier: column_name data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '18' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: MODIFY column_definition: naked_identifier: column_name data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '18' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: column_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: DROP bracketed: - start_bracket: ( - column_reference: naked_identifier: column_name_one - comma: ',' - column_reference: naked_identifier: column_name_two - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_constraint_clauses: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: constraint_name - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: column_name end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: other_table_name - bracketed: start_bracket: ( column_reference: naked_identifier: other_column_name end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_constraint_clauses: - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: constraint_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: MODIFY bracketed: start_bracket: ( column_definition: naked_identifier: column_name column_constraint_segment: - keyword: NOT - keyword: 'NULL' keyword: ENABLE end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: MODIFY bracketed: start_bracket: ( column_definition: naked_identifier: column_name column_constraint_segment: keyword: DEFAULT numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - alter_table_column_clauses: keyword: MODIFY bracketed: start_bracket: ( column_definition: - naked_identifier: column_name - column_constraint_segment: keyword: DEFAULT numeric_literal: '10' - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: ENABLE end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/at_signs.sql000066400000000000000000000002541451700765000235660ustar00rootroot00000000000000@@some_other_sql_file.sql @@some_other_sql_file_with_args.sql foo bar baz @some_other_sql_file.sql @some_other_sql_file_with_args.sql foo bar baz SELECT * from some_table; sqlfluff-2.3.5/test/fixtures/dialects/oracle/at_signs.yml000066400000000000000000000027641451700765000236000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d687c62efbf650d4457868fae33d95c534fc475817ebf2075348fd2b89f86bcf file: - execute_file_statement: - at_sign: '@' - at_sign: '@' - naked_identifier: some_other_sql_file - dot: . - naked_identifier: sql - execute_file_statement: - at_sign: '@' - at_sign: '@' - naked_identifier: some_other_sql_file_with_args - dot: . - naked_identifier: sql - naked_identifier: foo - naked_identifier: bar - naked_identifier: baz - execute_file_statement: - at_sign: '@' - naked_identifier: some_other_sql_file - dot: . - naked_identifier: sql - execute_file_statement: - at_sign: '@' - naked_identifier: some_other_sql_file_with_args - dot: . - naked_identifier: sql - naked_identifier: foo - naked_identifier: bar - naked_identifier: baz - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/bare_functions.sql000066400000000000000000000003141451700765000247550ustar00rootroot00000000000000SELECT a.foo, b.bar, current_date, current_timestamp, dbtimezone, localtimestamp, sessiontimestamp, sysdate, systimestamp FROM first_table a INNER JOIN second_table b ON a.baz = b.baz ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/bare_functions.yml000066400000000000000000000046051451700765000247660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8e263a00681db97d124e7a639374ee40f65642e13597c704ac5a150b1e308206 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: foo - comma: ',' - select_clause_element: column_reference: - naked_identifier: b - dot: . - naked_identifier: bar - comma: ',' - select_clause_element: bare_function: current_date - comma: ',' - select_clause_element: bare_function: current_timestamp - comma: ',' - select_clause_element: bare_function: dbtimezone - comma: ',' - select_clause_element: bare_function: localtimestamp - comma: ',' - select_clause_element: bare_function: sessiontimestamp - comma: ',' - select_clause_element: bare_function: sysdate - comma: ',' - select_clause_element: bare_function: systimestamp from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: first_table alias_expression: naked_identifier: a join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: second_table alias_expression: naked_identifier: b - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: baz statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/bind_variables.sql000066400000000000000000000002171451700765000247220ustar00rootroot00000000000000select :abc from dual; insert into mytab values(:abc,:xyz); select column_name from table_name where column_name = :column_name_filter; sqlfluff-2.3.5/test/fixtures/dialects/oracle/bind_variables.yml000066400000000000000000000036651451700765000247360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd712e00e905964fff249bd96878cc3008d776d7a6192bbec01a12b353109633 file: - statement: select_statement: select_clause: keyword: select select_clause_element: sqlplus_variable: colon: ':' parameter: abc from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: into - table_reference: naked_identifier: mytab - values_clause: keyword: values bracketed: - start_bracket: ( - sqlplus_variable: colon: ':' parameter: abc - comma: ',' - sqlplus_variable: colon: ':' parameter: xyz - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: where expression: column_reference: naked_identifier: column_name comparison_operator: raw_comparison_operator: '=' sqlplus_variable: colon: ':' parameter: column_name_filter - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/comment.sql000066400000000000000000000005161451700765000234220ustar00rootroot00000000000000COMMENT ON COLUMN employees.job_id IS 'abbreviated job title'; COMMENT ON TABLE employees IS 'employees table'; COMMENT ON INDEXTYPE employees_indextype IS 'employees indextype'; COMMENT ON OPERATOR employees_operator IS 'employees operator'; COMMENT ON MATERIALIZED VIEW employees_mv IS 'employees materialized view'; sqlfluff-2.3.5/test/fixtures/dialects/oracle/comment.yml000066400000000000000000000033331451700765000234240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c51b17e9c78cfe03b337afca82520331ca798a2bd2153384ef58c800cb742ca6 file: - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: employees - dot: . - naked_identifier: job_id - keyword: IS - quoted_literal: "'abbreviated job title'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: employees - keyword: IS - quoted_literal: "'employees table'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: INDEXTYPE - indextype_reference: naked_identifier: employees_indextype - keyword: IS - quoted_literal: "'employees indextype'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: OPERATOR - object_reference: naked_identifier: employees_operator - keyword: IS - quoted_literal: "'employees operator'" - statement_terminator: ; - statement: comment_statement: - keyword: COMMENT - keyword: 'ON' - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: employees_mv - keyword: IS - quoted_literal: "'employees materialized view'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/comparison_operators_with_space.sql000066400000000000000000000001651451700765000304360ustar00rootroot00000000000000select 1 from dual where 3 < = 5; select 1 from dual where 4 > = 2; select 1 from dual where 1 ! = 3; sqlfluff-2.3.5/test/fixtures/dialects/oracle/comparison_operators_with_space.yml000066400000000000000000000042451451700765000304430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70bbe6d19831c69c75359a6098c6a2cff155fcbddf2ebfbb444909ff2e756898 file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - numeric_literal: '3' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - numeric_literal: '4' - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - numeric_literal: '3' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/create_table.sql000066400000000000000000000005341451700765000243720ustar00rootroot00000000000000-- create table with # in table name create table tabl#e1 (c1 SMALLINT, c2 DATE); -- create table with $ in table name create table table1$ (c1 SMALLINT, c2 DATE); -- create table with both $ & # in table name create table tab#le1$ (c1 SMALLINT, c2 DATE); -- create table with $ & # in column name create table tab#le1$ (c#1 SMALLINT, c$2 DATE);sqlfluff-2.3.5/test/fixtures/dialects/oracle/create_table.yml000066400000000000000000000044721451700765000244010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9c5e9bc766712d02ab700df17b3bc93e54f7b06c6fad0462b9a87a9713eb30a8 file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: tabl#e1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: table1$ - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: tab#le1$ - bracketed: - start_bracket: ( - column_definition: naked_identifier: c1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c2 data_type: data_type_identifier: DATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: tab#le1$ - bracketed: - start_bracket: ( - column_definition: naked_identifier: c#1 data_type: data_type_identifier: SMALLINT - comma: ',' - column_definition: naked_identifier: c$2 data_type: data_type_identifier: DATE - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/create_view.sql000066400000000000000000000017611451700765000242600ustar00rootroot00000000000000-- BASIC CREATE VIEW CREATE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE OR REPLACE CREATE OR REPLACE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH FORCE CREATE FORCE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH NO FORCE CREATE NO FORCE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); CREATE OR REPLACE NO FORCE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH EDITIONING CREATE EDITIONING VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH EDITIONABLE CREATE EDITIONABLE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH NONEDITIONABLE CREATE NONEDITIONABLE VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE VIEW WITH EDITIONABLE EDITIONING CREATE EDITIONABLE EDITIONING VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); -- CREATE OR REPLACE VIEW WITH EDITIONING CREATE OR REPLACE EDITIONING VIEW NEW_VIEW AS (select PERSON_ID from PERSONS p); sqlfluff-2.3.5/test/fixtures/dialects/oracle/create_view.yml000066400000000000000000000200241451700765000242530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 69b5ba640daef659deac176b55e74ae38bb6322a61da319ed2bc8a11e65f24d8 file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: FORCE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: 'NO' - keyword: FORCE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: 'NO' - keyword: FORCE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: EDITIONING - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: EDITIONABLE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: NONEDITIONABLE - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: EDITIONABLE - keyword: EDITIONING - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EDITIONING - keyword: VIEW - table_reference: naked_identifier: NEW_VIEW - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: PERSON_ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: PERSONS alias_expression: naked_identifier: p end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/database_link.sql000066400000000000000000000002521451700765000245360ustar00rootroot00000000000000select * from foo@bar where 1 = 1; select baz.name from foo@bar baz where 1 = 1; select function_a@orcl() from dual; select pkg_test.function_a@orcl(1) from dual; sqlfluff-2.3.5/test/fixtures/dialects/oracle/database_link.yml000066400000000000000000000061451451700765000245470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b124903ad8acbe8db908220d7601a6334e7f1c56458e3b20a694152f85e291ba file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: foo - at_sign: '@' - naked_identifier: bar where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: baz - dot: . - naked_identifier: name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: foo - at_sign: '@' - naked_identifier: bar alias_expression: naked_identifier: baz where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: - function_name_identifier: function_a - at_sign: '@' - function_name_identifier: orcl bracketed: start_bracket: ( end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: - naked_identifier: pkg_test - dot: . - function_name_identifier: function_a - at_sign: '@' - function_name_identifier: orcl bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/drop_table.sql000066400000000000000000000001611451700765000240670ustar00rootroot00000000000000DROP TABLE foo.bar CASCADE CONSTRAINTS PURGE; DROP TABLE foo.bar CASCADE CONSTRAINTS; DROP TABLE foo.bar PURGE; sqlfluff-2.3.5/test/fixtures/dialects/oracle/drop_table.yml000066400000000000000000000021741451700765000240770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db58b5ebc6efad7ee53b2673aefe4004241b0b3f282079aa6e02b48745243b16 file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: CASCADE - keyword: CONSTRAINTS - keyword: PURGE - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: CASCADE - keyword: CONSTRAINTS - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: PURGE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/fetch_first_row_only.sql000066400000000000000000000003711451700765000262070ustar00rootroot00000000000000select column_name from table_name fetch first row only; select column_name from table_name fetch first rows only; select column_name from table_name fetch first 2 row only; select column_name from table_name fetch first 2 rows only; sqlfluff-2.3.5/test/fixtures/dialects/oracle/fetch_first_row_only.yml000066400000000000000000000047621451700765000262210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 536341383952d140b9f33fde33d846482376e3b154d8e8fd49cbd82d2ddfa34e file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - keyword: rows - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - numeric_literal: '2' - keyword: row - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name fetch_clause: - keyword: fetch - keyword: first - numeric_literal: '2' - keyword: rows - keyword: only - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/hierarchical_queries.sql000066400000000000000000000031461451700765000261350ustar00rootroot00000000000000SELECT employee_id, last_name, manager_id FROM employees CONNECT BY PRIOR employee_id = manager_id; SELECT employee_id, last_name, manager_id, LEVEL FROM employees CONNECT BY PRIOR employee_id = manager_id; SELECT last_name, employee_id, manager_id, LEVEL FROM employees START WITH employee_id = 100 CONNECT BY PRIOR employee_id = manager_id ORDER SIBLINGS BY last_name; SELECT last_name "Employee", LEVEL, SYS_CONNECT_BY_PATH(last_name, '/') "Path" FROM employees WHERE level <= 3 AND department_id = 80 START WITH last_name = 'King' CONNECT BY PRIOR employee_id = manager_id AND LEVEL <= 4; SELECT last_name "Employee", CONNECT_BY_ISCYCLE "Cycle", LEVEL, SYS_CONNECT_BY_PATH(last_name, '/') "Path" FROM employees WHERE level <= 3 AND department_id = 80 START WITH last_name = 'King' CONNECT BY NOCYCLE PRIOR employee_id = manager_id AND LEVEL <= 4 ORDER BY "Employee", "Cycle", LEVEL, "Path"; SELECT LTRIM(SYS_CONNECT_BY_PATH (warehouse_id,','),',') FROM (SELECT ROWNUM r, warehouse_id FROM warehouses) WHERE CONNECT_BY_ISLEAF = 1 START WITH r = 1 CONNECT BY r = PRIOR r + 1 ORDER BY warehouse_id; SELECT last_name "Employee", CONNECT_BY_ROOT last_name "Manager", LEVEL-1 "Pathlen", SYS_CONNECT_BY_PATH(last_name, '/') "Path" FROM employees WHERE LEVEL > 1 and department_id = 110 CONNECT BY PRIOR employee_id = manager_id ORDER BY "Employee", "Manager", "Pathlen", "Path"; SELECT name, SUM(salary) "Total_Salary" FROM ( SELECT CONNECT_BY_ROOT last_name as name, Salary FROM employees WHERE department_id = 110 CONNECT BY PRIOR employee_id = manager_id) GROUP BY name ORDER BY name, "Total_Salary"; sqlfluff-2.3.5/test/fixtures/dialects/oracle/hierarchical_queries.yml000066400000000000000000000434151451700765000261420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 76765fe49e51f73ff98d549cc6ed5ad70d7c41669520e6f40ebfc18a254c0028 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees hierarchical_query_clause: connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: LEVEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees hierarchical_query_clause: connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name - comma: ',' - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: LEVEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees hierarchical_query_clause: startwith_clause: - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: employee_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '100' connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id orderby_clause: - keyword: ORDER - keyword: SIBLINGS - keyword: BY - column_reference: naked_identifier: last_name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name alias_expression: quoted_identifier: '"Employee"' - comma: ',' - select_clause_element: column_reference: naked_identifier: LEVEL - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'/'" - end_bracket: ) alias_expression: quoted_identifier: '"Path"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: naked_identifier: level - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '3' - binary_operator: AND - column_reference: naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '80' hierarchical_query_clause: startwith_clause: - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: last_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'King'" connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - binary_operator: AND - column_reference: naked_identifier: LEVEL - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name alias_expression: quoted_identifier: '"Employee"' - comma: ',' - select_clause_element: column_reference: naked_identifier: CONNECT_BY_ISCYCLE alias_expression: quoted_identifier: '"Cycle"' - comma: ',' - select_clause_element: column_reference: naked_identifier: LEVEL - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'/'" - end_bracket: ) alias_expression: quoted_identifier: '"Path"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: naked_identifier: level - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '3' - binary_operator: AND - column_reference: naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '80' hierarchical_query_clause: startwith_clause: - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: last_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'King'" connectby_clause: - keyword: CONNECT - keyword: BY - keyword: NOCYCLE - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id - binary_operator: AND - column_reference: naked_identifier: LEVEL - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '4' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '"Employee"' - comma: ',' - column_reference: quoted_identifier: '"Cycle"' - comma: ',' - column_reference: naked_identifier: LEVEL - comma: ',' - column_reference: quoted_identifier: '"Path"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: LTRIM bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: warehouse_id - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) - comma: ',' - expression: quoted_literal: "','" - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ROWNUM alias_expression: naked_identifier: r - comma: ',' - select_clause_element: column_reference: naked_identifier: warehouse_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: warehouses end_bracket: ) where_clause: keyword: WHERE expression: column_reference: naked_identifier: CONNECT_BY_ISLEAF comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' hierarchical_query_clause: startwith_clause: - keyword: START - keyword: WITH - expression: column_reference: naked_identifier: r comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' connectby_clause: - keyword: CONNECT - keyword: BY - expression: - column_reference: naked_identifier: r - comparison_operator: raw_comparison_operator: '=' - keyword: PRIOR - column_reference: naked_identifier: r - binary_operator: + - numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: warehouse_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: last_name alias_expression: quoted_identifier: '"Employee"' - comma: ',' - select_clause_element: keyword: CONNECT_BY_ROOT naked_identifier: last_name alias_expression: quoted_identifier: '"Manager"' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: LEVEL binary_operator: '-' numeric_literal: '1' alias_expression: quoted_identifier: '"Pathlen"' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SYS_CONNECT_BY_PATH bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'/'" - end_bracket: ) alias_expression: quoted_identifier: '"Path"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: - column_reference: naked_identifier: LEVEL - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' - binary_operator: and - column_reference: naked_identifier: department_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '110' hierarchical_query_clause: connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '"Employee"' - comma: ',' - column_reference: quoted_identifier: '"Manager"' - comma: ',' - column_reference: quoted_identifier: '"Pathlen"' - comma: ',' - column_reference: quoted_identifier: '"Path"' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) alias_expression: quoted_identifier: '"Total_Salary"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: keyword: CONNECT_BY_ROOT naked_identifier: last_name alias_expression: keyword: as naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: Salary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '110' hierarchical_query_clause: connectby_clause: - keyword: CONNECT - keyword: BY - expression: - keyword: PRIOR - column_reference: naked_identifier: employee_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: manager_id end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: name orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name - comma: ',' - column_reference: quoted_identifier: '"Total_Salary"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/interval_operations.sql000066400000000000000000000005071451700765000260470ustar00rootroot00000000000000select 1 from dual where sysdate > sysdate - interval '2' hour; select sysdate - interval '3' year from dual; select interval '2 3:04:11.333' day to second from dual; select 1 from dual where sysdate > to_date('01/01/1970', 'dd/mm/yyyy') + interval '600' month; select sysdate + interval '10' minute from dual; sqlfluff-2.3.5/test/fixtures/dialects/oracle/interval_operations.yml000066400000000000000000000071011451700765000260460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 75c92164df465af4705b75019cbed005252cff6f553ba67ca21c682c1568788b file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - bare_function: sysdate - comparison_operator: raw_comparison_operator: '>' - bare_function: sysdate - binary_operator: '-' - keyword: interval - date_constructor_literal: "'2'" - keyword: hour - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - bare_function: sysdate - binary_operator: '-' - keyword: interval - date_constructor_literal: "'3'" - keyword: year from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: - keyword: interval - date_constructor_literal: "'2 3:04:11.333'" - keyword: day - keyword: to - keyword: second from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual where_clause: keyword: where expression: - bare_function: sysdate - comparison_operator: raw_comparison_operator: '>' - function: function_name: function_name_identifier: to_date bracketed: - start_bracket: ( - expression: quoted_literal: "'01/01/1970'" - comma: ',' - expression: quoted_literal: "'dd/mm/yyyy'" - end_bracket: ) - binary_operator: + - keyword: interval - date_constructor_literal: "'600'" - keyword: month - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - bare_function: sysdate - binary_operator: + - keyword: interval - date_constructor_literal: "'10'" - keyword: minute from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/named_argument.sql000066400000000000000000000004311451700765000247420ustar00rootroot00000000000000--https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Function-Expressions.html#GUID-C47F0B7D-9058-481F-815E-A31FB21F3BD5 select my_function(arg1 => 3, arg2 => 4) from dual; select my_function(3, arg2 => 4) from dual; select my_function(arg1 => 3, 4) from dual; sqlfluff-2.3.5/test/fixtures/dialects/oracle/named_argument.yml000066400000000000000000000054131451700765000247510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 96a8a90d5cb19753570b96e0b15c0390b1676bb54dd1a8c226a75f998e31ea54 file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function bracketed: - start_bracket: ( - named_argument: naked_identifier: arg1 right_arrow: => expression: numeric_literal: '3' - comma: ',' - named_argument: naked_identifier: arg2 right_arrow: => expression: numeric_literal: '4' - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function bracketed: start_bracket: ( expression: numeric_literal: '3' comma: ',' named_argument: naked_identifier: arg2 right_arrow: => expression: numeric_literal: '4' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: my_function bracketed: start_bracket: ( named_argument: naked_identifier: arg1 right_arrow: => expression: numeric_literal: '3' comma: ',' expression: numeric_literal: '4' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/non_ansi_joins.sql000066400000000000000000000013571451700765000247720ustar00rootroot00000000000000SELECT suppliers.supplier_id, suppliers.supplier_name, orders.order_date FROM suppliers, orders WHERE suppliers.supplier_id (+) = orders.supplier_id; SELECT suppliers.supplier_id, suppliers.supplier_name, orders.order_date FROM suppliers, orders WHERE suppliers.supplier_id = orders.supplier_id(+); SELECT suppliers.supplier_id, suppliers.supplier_name, orders.order_date FROM suppliers, orders, customers WHERE suppliers.supplier_id = orders.supplier_id AND orders.customer_id = customers.customer_id (+); SELECT * FROM table_a, table_b WHERE column_a(+) = nvl(column_b, 1); SELECT * FROM table_a, table_b WHERE nvl(column_b, 1) = column_a(+); sqlfluff-2.3.5/test/fixtures/dialects/oracle/non_ansi_joins.yml000066400000000000000000000175461451700765000250030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1909e95302d1729dbe1df44e81b4b87d0648659a5f860c0296e795c06f0bb33b file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_date from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: suppliers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: supplier_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_date from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: suppliers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: supplier_id - bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: order_date from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: suppliers - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: suppliers - dot: . - naked_identifier: supplier_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: orders - dot: . - naked_identifier: supplier_id - binary_operator: AND - column_reference: - naked_identifier: orders - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_b where_clause: keyword: WHERE expression: column_reference: naked_identifier: column_a bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: nvl bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: column_b - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_a - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_b where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: nvl bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: column_b - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) comparison_operator: raw_comparison_operator: '=' column_reference: naked_identifier: column_a bracketed: start_bracket: ( plus_join_symbol: + end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/pivot_unpivot.sql000066400000000000000000000017471451700765000247140ustar00rootroot00000000000000select * from ( select times_purchased, state_code from customers t ) pivot ( count(state_code) for state_code in ('NY' as new_york,'CT','NJ','FL','MO') ); select * from ( select times_purchased, state_code from customers t ) pivot ( count(state_code) for state_code in (select distinct state_code from state) ); select * from ( select times_purchased, state_code from customers t ) pivot ( count(state_code) for state_code in (any) ); select * from sale_stats unpivot ( quantity for product_code in ( product_a AS 'A', product_b AS 'B', product_c AS 'C' ) ); select * from sale_stats unpivot include nulls ( quantity for product_code in ( product_a AS 'A', product_b AS 'B', product_c AS 'C' ) ); select * from sale_stats unpivot ( (quantity, amount) for product_code in ( (a_qty, a_value) as 'A', (b_qty, b_value) as 'B' ) ); sqlfluff-2.3.5/test/fixtures/dialects/oracle/pivot_unpivot.yml000066400000000000000000000260511451700765000247110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b2997c7d1a052842356e1af47491c1bcd3de963f28a2466855d5c6ba6d10ead file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: times_purchased - comma: ',' - select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers alias_expression: naked_identifier: t end_bracket: ) pivot_clause: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: column_reference: naked_identifier: state_code end_bracket: ) - keyword: for - column_reference: naked_identifier: state_code - keyword: in - bracketed: - start_bracket: ( - quoted_literal: "'NY'" - alias_expression: keyword: as naked_identifier: new_york - comma: ',' - quoted_literal: "'CT'" - comma: ',' - quoted_literal: "'NJ'" - comma: ',' - quoted_literal: "'FL'" - comma: ',' - quoted_literal: "'MO'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: times_purchased - comma: ',' - select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers alias_expression: naked_identifier: t end_bracket: ) pivot_clause: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: column_reference: naked_identifier: state_code end_bracket: ) - keyword: for - column_reference: naked_identifier: state_code - keyword: in - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinct select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: state end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: times_purchased - comma: ',' - select_clause_element: column_reference: naked_identifier: state_code from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customers alias_expression: naked_identifier: t end_bracket: ) pivot_clause: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: column_reference: naked_identifier: state_code end_bracket: ) - keyword: for - column_reference: naked_identifier: state_code - keyword: in - bracketed: start_bracket: ( column_reference: naked_identifier: any end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sale_stats unpivot_clause: keyword: unpivot bracketed: - start_bracket: ( - column_reference: naked_identifier: quantity - keyword: for - column_reference: naked_identifier: product_code - keyword: in - bracketed: - start_bracket: ( - column_reference: naked_identifier: product_a - alias_expression: keyword: AS quoted_identifier: "'A'" - comma: ',' - column_reference: naked_identifier: product_b - alias_expression: keyword: AS quoted_identifier: "'B'" - comma: ',' - column_reference: naked_identifier: product_c - alias_expression: keyword: AS quoted_identifier: "'C'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sale_stats unpivot_clause: - keyword: unpivot - keyword: include - keyword: nulls - bracketed: - start_bracket: ( - column_reference: naked_identifier: quantity - keyword: for - column_reference: naked_identifier: product_code - keyword: in - bracketed: - start_bracket: ( - column_reference: naked_identifier: product_a - alias_expression: keyword: AS quoted_identifier: "'A'" - comma: ',' - column_reference: naked_identifier: product_b - alias_expression: keyword: AS quoted_identifier: "'B'" - comma: ',' - column_reference: naked_identifier: product_c - alias_expression: keyword: AS quoted_identifier: "'C'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sale_stats unpivot_clause: keyword: unpivot bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - column_reference: naked_identifier: quantity - comma: ',' - column_reference: naked_identifier: amount - end_bracket: ) - keyword: for - column_reference: naked_identifier: product_code - keyword: in - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - column_reference: naked_identifier: a_qty - comma: ',' - column_reference: naked_identifier: a_value - end_bracket: ) - alias_expression: keyword: as quoted_identifier: "'A'" - comma: ',' - bracketed: - start_bracket: ( - column_reference: naked_identifier: b_qty - comma: ',' - column_reference: naked_identifier: b_value - end_bracket: ) - alias_expression: keyword: as quoted_identifier: "'B'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/prompt.sql000066400000000000000000000001731451700765000233000ustar00rootroot00000000000000PROMPT this is an Oracle SQL newline delimited prompt statement PROMPT PROMPT another prompt SELECT job_id from employees; sqlfluff-2.3.5/test/fixtures/dialects/oracle/prompt.yml000066400000000000000000000014421451700765000233020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 644b16a31198e9167e689a0a8420ae5b78ab8aa0a39e43b68c8328503d12790f file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: job_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/quoted_slash.sql000066400000000000000000000003151451700765000244500ustar00rootroot00000000000000select a.column_a || '\' || a.column_b test from test_table a; select * from test_table a where a.column_a || '\' || a.column_b = '10\10'; select 'Test\ ' from dual; select '\Test\' from dual; sqlfluff-2.3.5/test/fixtures/dialects/oracle/quoted_slash.yml000066400000000000000000000061301451700765000244530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 830296ff81b1d612284c9e9c29eaaa29c5af1c83047c5bd133de3a5f6b4798bc file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'\\'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: a - dot: . - naked_identifier: column_b alias_expression: naked_identifier: test from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a where_clause: keyword: where expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'\\'" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: a - dot: . - naked_identifier: column_b - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'10\\10'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: "'Test\\ '" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: "'\\Test\\'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/space_between_alias_and_column.sql000066400000000000000000000003001451700765000301230ustar00rootroot00000000000000select a.column_a from test_table a where a. column_b = 1; select a.column_a from test_table a where 1 = a. column_a; select a. column_a from test_table a where 1 = a.column_a; sqlfluff-2.3.5/test/fixtures/dialects/oracle/space_between_alias_and_column.yml000066400000000000000000000054101451700765000301340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e06e67cb2a2be631371438386fa46d37d2c019b329733210725cbeb6aa392b85 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a where_clause: keyword: where expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: column_b comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a where_clause: keyword: where expression: numeric_literal: '1' comparison_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table alias_expression: naked_identifier: a where_clause: keyword: where expression: numeric_literal: '1' comparison_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: a - dot: . - naked_identifier: column_a - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/temporary_table.sql000066400000000000000000000014401451700765000251460ustar00rootroot00000000000000CREATE GLOBAL TEMPORARY TABLE today_sales ON COMMIT PRESERVE ROWS AS SELECT * FROM orders WHERE order_date = SYSDATE; CREATE GLOBAL TEMPORARY TABLE HT_AFFAIRES (ID CHAR (36 CHAR)) ON COMMIT DELETE ROWS; CREATE GLOBAL TEMPORARY TABLE my_temp_table ( id NUMBER, description VARCHAR2(20) ) ON COMMIT DELETE ROWS; CREATE GLOBAL TEMPORARY TABLE my_temp_table ( id NUMBER, description VARCHAR2(20) ) ON COMMIT PRESERVE ROWS; CREATE PRIVATE TEMPORARY TABLE ora$ptt_my_temp_table ( id NUMBER, description VARCHAR2(20) ) ON COMMIT DROP DEFINITION; CREATE PRIVATE TEMPORARY TABLE ora$ptt_my_temp_table ( id NUMBER, description VARCHAR2(20) ) ON COMMIT PRESERVE DEFINITION; CREATE PRIVATE TEMPORARY TABLE ora$ptt_emp AS SELECT * FROM emp; sqlfluff-2.3.5/test/fixtures/dialects/oracle/temporary_table.yml000066400000000000000000000133741451700765000251610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8cbfbb0ebb67e9974ae411e9fcace308dfa8cb3db044bcba8e0a3c684853699e file: - statement: create_table_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: today_sales - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: column_reference: naked_identifier: order_date comparison_operator: raw_comparison_operator: '=' bare_function: SYSDATE - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: HT_AFFAIRES - bracketed: start_bracket: ( column_definition: naked_identifier: ID data_type: data_type_identifier: CHAR bracketed: start_bracket: ( numeric_literal: '36' word: CHAR end_bracket: ) end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: my_temp_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: description data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: my_temp_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: description data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: PRIVATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: ora$ptt_my_temp_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: description data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: DROP - keyword: DEFINITION - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: PRIVATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: ora$ptt_my_temp_table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: NUMBER - comma: ',' - column_definition: naked_identifier: description data_type: data_type_identifier: VARCHAR2 bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: DEFINITION - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: PRIVATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: ora$ptt_emp - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: emp - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/oracle/within_group.sql000066400000000000000000000017301451700765000244750ustar00rootroot00000000000000--https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/LISTAGG.html#GUID-B6E50D8E-F467-425B-9436-F7F8BF38D466 SELECT LISTAGG(last_name, '; ') WITHIN GROUP (ORDER BY hire_date, last_name) "Emp_list", MIN(hire_date) "Earliest" FROM employees WHERE department_id = 30; SELECT department_id "Dept.", LISTAGG(last_name, '; ') WITHIN GROUP (ORDER BY hire_date) "Employees" FROM employees GROUP BY department_id ORDER BY department_id; SELECT department_id "Dept.", LISTAGG(last_name, '; ' ON OVERFLOW TRUNCATE '...') WITHIN GROUP (ORDER BY hire_date) "Employees" FROM employees GROUP BY department_id ORDER BY department_id; SELECT department_id "Dept", hire_date "Date", last_name "Name", LISTAGG(last_name, '; ') WITHIN GROUP (ORDER BY hire_date, last_name) OVER (PARTITION BY department_id) as "Emp_list" FROM employees WHERE hire_date < '01-SEP-2003' ORDER BY "Dept", "Date", "Name"; sqlfluff-2.3.5/test/fixtures/dialects/oracle/within_group.yml000066400000000000000000000204521451700765000245010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4ada825227950ee63592765397f58f0d03f157219fd5fa98ffd93b963c93fe48 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: LISTAGG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'; '" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date - comma: ',' - column_reference: naked_identifier: last_name end_bracket: ) alias_expression: quoted_identifier: '"Emp_list"' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MIN bracketed: start_bracket: ( expression: column_reference: naked_identifier: hire_date end_bracket: ) alias_expression: quoted_identifier: '"Earliest"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: department_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '30' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_id alias_expression: quoted_identifier: '"Dept."' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'; '" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date end_bracket: ) alias_expression: quoted_identifier: '"Employees"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: department_id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: department_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_id alias_expression: quoted_identifier: '"Dept."' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'; '" - listagg_overflow_clause: - keyword: 'ON' - keyword: OVERFLOW - keyword: TRUNCATE - quoted_identifier: "'...'" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date end_bracket: ) alias_expression: quoted_identifier: '"Employees"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: department_id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: department_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: department_id alias_expression: quoted_identifier: '"Dept"' - comma: ',' - select_clause_element: column_reference: naked_identifier: hire_date alias_expression: quoted_identifier: '"Date"' - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name alias_expression: quoted_identifier: '"Name"' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LISTAGG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: last_name - comma: ',' - expression: quoted_literal: "'; '" - end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: hire_date - comma: ',' - column_reference: naked_identifier: last_name end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: department_id end_bracket: ) alias_expression: keyword: as quoted_identifier: '"Emp_list"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: WHERE expression: column_reference: naked_identifier: hire_date comparison_operator: raw_comparison_operator: < quoted_literal: "'01-SEP-2003'" orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '"Dept"' - comma: ',' - column_reference: quoted_identifier: '"Date"' - comma: ',' - column_reference: quoted_identifier: '"Name"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/000077500000000000000000000000001451700765000216365ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/postgres/.sqlfluff000066400000000000000000000000361451700765000234600ustar00rootroot00000000000000[sqlfluff] dialect = postgres sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_database.sql000066400000000000000000000024551451700765000253200ustar00rootroot00000000000000ALTER DATABASE db; ALTER DATABASE db ALLOW_CONNECTIONS true; ALTER DATABASE db WITH ALLOW_CONNECTIONS true; ALTER DATABASE db CONNECTION LIMIT 10; ALTER DATABASE db WITH CONNECTION LIMIT 10; ALTER DATABASE db IS_TEMPLATE true; ALTER DATABASE db WITH IS_TEMPLATE true; ALTER DATABASE db IS_TEMPLATE true ALLOW_CONNECTIONS true; ALTER DATABASE db WITH IS_TEMPLATE true ALLOW_CONNECTIONS true; ALTER DATABASE db CONNECTION LIMIT 10 IS_TEMPLATE true ALLOW_CONNECTIONS true; ALTER DATABASE db WITH CONNECTION LIMIT 10 IS_TEMPLATE true ALLOW_CONNECTIONS true; ALTER DATABASE db RENAME TO new_db; ALTER DATABASE db OWNER TO other_role; ALTER DATABASE db OWNER TO CURRENT_ROLE; ALTER DATABASE db OWNER TO CURRENT_USER; ALTER DATABASE db OWNER TO SESSION_USER; -- Issue:2017 ALTER DATABASE postgres SET password_encryption TO 'scram-sha-256'; ALTER DATABASE db SET TABLESPACE new_tablespace; ALTER DATABASE db SET parameter1 TO 1; ALTER DATABASE db SET parameter1 TO 'some_value'; ALTER DATABASE db SET parameter1 TO DEFAULT; ALTER DATABASE db SET parameter1 = 1; ALTER DATABASE db SET parameter1 = 'some_value'; ALTER DATABASE db SET parameter1 = DEFAULT; ALTER DATABASE db SET parameter1 FROM CURRENT; ALTER USER some_user SET default_transaction_read_only = ON; ALTER DATABASE db RESET parameter1; ALTER DATABASE db RESET ALL; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_database.yml000066400000000000000000000174161451700765000253250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b5ca029c5ff1a0e82e534f264dfe874a5dadb9863c930dc8118f51b94e818f3b file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: IS_TEMPLATE - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - boolean_literal: 'true' - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: IS_TEMPLATE - boolean_literal: 'true' - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: IS_TEMPLATE - boolean_literal: 'true' - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: IS_TEMPLATE - boolean_literal: 'true' - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: RENAME - keyword: TO - database_reference: naked_identifier: new_db - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - keyword: TO - object_reference: naked_identifier: other_role - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - keyword: TO - object_reference: naked_identifier: CURRENT_USER - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: postgres - keyword: SET - parameter: password_encryption - keyword: TO - quoted_literal: "'scram-sha-256'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - keyword: TO - numeric_literal: '1' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - keyword: TO - quoted_literal: "'some_value'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: SET - parameter: parameter1 - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: some_user - keyword: SET - parameter: default_transaction_read_only - comparison_operator: raw_comparison_operator: '=' - naked_identifier: 'ON' - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: RESET - parameter: parameter1 - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: db - keyword: RESET - keyword: ALL - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_default_privileges.sql000066400000000000000000000045541451700765000274330ustar00rootroot00000000000000ALTER DEFAULT PRIVILEGES FOR USER my_user GRANT SELECT ON TABLES TO my_group; ALTER DEFAULT PRIVILEGES FOR USER my_user IN SCHEMA my_schema GRANT INSERT ON TABLES TO my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT UPDATE ON TABLES TO my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user IN SCHEMA my_schema, your_schema GRANT DELETE ON TABLES TO my_group WITH GRANT OPTION; ALTER DEFAULT PRIVILEGES FOR ROLE my_user, your_user GRANT TRUNCATE ON TABLES TO GROUP my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT REFERENCES, TRIGGER ON TABLES TO PUBLIC; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT ALL ON SEQUENCES TO GROUP my_group, GROUP your_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT EXECUTE ON ROUTINES TO my_group; ALTER DEFAULT PRIVILEGES GRANT ALL PRIVILEGES ON FUNCTIONS TO my_group; ALTER DEFAULT PRIVILEGES IN SCHEMA my_schema, your_schema GRANT USAGE ON TYPES TO my_group, GROUP your_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT USAGE ON SCHEMAS TO my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user GRANT CREATE ON SCHEMAS TO my_group; ALTER DEFAULT PRIVILEGES FOR USER my_user, your_user IN SCHEMA my_schema, your_schema GRANT SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLES TO GROUP my_group, PUBLIC WITH GRANT OPTION; ALTER DEFAULT PRIVILEGES FOR USER my_user REVOKE ALL ON TABLES FROM my_group; ALTER DEFAULT PRIVILEGES FOR USER my_user IN SCHEMA my_schema REVOKE SELECT ON TABLES FROM GROUP my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user IN SCHEMA my_schema, your_schema REVOKE INSERT ON TABLES FROM PUBLIC CASCADE; ALTER DEFAULT PRIVILEGES FOR ROLE my_user IN SCHEMA my_schema REVOKE UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLES FROM PUBLIC RESTRICT; ALTER DEFAULT PRIVILEGES FOR ROLE my_user, your_user REVOKE ALL PRIVILEGES ON SEQUENCES FROM my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user REVOKE EXECUTE ON FUNCTIONS FROM my_group; ALTER DEFAULT PRIVILEGES REVOKE EXECUTE ON ROUTINES FROM my_group; ALTER DEFAULT PRIVILEGES IN SCHEMA my_schema, your_schema REVOKE USAGE ON TYPES FROM my_group, GROUP your_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user REVOKE USAGE, CREATE ON TYPES FROM my_group; ALTER DEFAULT PRIVILEGES FOR ROLE my_user, your_user IN SCHEMA my_schema, your_schema REVOKE SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER ON TABLES FROM GROUP my_group, PUBLIC CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_default_privileges.yml000066400000000000000000000426511451700765000274350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6cecbb7ede0ff8bce72a86691b5b2d4883afc280921f2f35fb7c28392b1ef09e file: - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: SELECT - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: INSERT - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: UPDATE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: DELETE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - keyword: WITH - keyword: GRANT - keyword: OPTION - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - comma: ',' - object_reference: naked_identifier: your_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: TRUNCATE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: - keyword: REFERENCES - comma: ',' - keyword: TRIGGER - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: ALL - keyword: 'ON' - alter_default_privileges_schema_object: keyword: SEQUENCES - keyword: TO - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: your_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: ROUTINES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - alter_default_privileges_schema_object: keyword: FUNCTIONS - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: USAGE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TYPES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: your_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: USAGE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: SCHEMAS - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: CREATE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: SCHEMAS - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - comma: ',' - object_reference: naked_identifier: your_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: - keyword: SELECT - comma: ',' - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: TRUNCATE - comma: ',' - keyword: REFERENCES - comma: ',' - keyword: TRIGGER - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: TO - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - keyword: WITH - keyword: GRANT - keyword: OPTION - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: ALL - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: SELECT - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: INSERT - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - keyword: CASCADE - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: TRUNCATE - comma: ',' - keyword: REFERENCES - comma: ',' - keyword: TRIGGER - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - keyword: RESTRICT - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - comma: ',' - object_reference: naked_identifier: your_user - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: - keyword: ALL - keyword: PRIVILEGES - keyword: 'ON' - alter_default_privileges_schema_object: keyword: SEQUENCES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: FUNCTIONS - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: ROUTINES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: keyword: USAGE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TYPES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: your_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: - keyword: USAGE - comma: ',' - keyword: CREATE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TYPES - keyword: FROM - alter_default_privileges_to_from_roles: role_reference: naked_identifier: my_group - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: ROLE - object_reference: naked_identifier: my_user - comma: ',' - object_reference: naked_identifier: your_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - comma: ',' - schema_reference: naked_identifier: your_schema - alter_default_privileges_revoke: - keyword: REVOKE - alter_default_privileges_object_privilege: - keyword: SELECT - comma: ',' - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - comma: ',' - keyword: TRUNCATE - comma: ',' - keyword: REFERENCES - comma: ',' - keyword: TRIGGER - keyword: 'ON' - alter_default_privileges_schema_object: keyword: TABLES - keyword: FROM - alter_default_privileges_to_from_roles: keyword: GROUP role_reference: naked_identifier: my_group - comma: ',' - alter_default_privileges_to_from_roles: role_reference: naked_identifier: PUBLIC - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_domain.sql000066400000000000000000000005631451700765000250210ustar00rootroot00000000000000ALTER DOMAIN zipcode SET NOT NULL; ALTER DOMAIN zipcode DROP NOT NULL; ALTER DOMAIN zipcode ADD CONSTRAINT zipchk CHECK (char_length(VALUE) = 5); ALTER DOMAIN zipcode DROP CONSTRAINT zipchk; ALTER DOMAIN zipcode RENAME CONSTRAINT zipchk TO zip_check; ALTER DOMAIN zipcode SET SCHEMA customers; alter domain oname add constraint "test" check (length(value) < 512); sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_domain.yml000066400000000000000000000064441451700765000250270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0842cce75d5ff763da81ba1a1620dd6a7387bffae5ba7feb612778a0d6bd75d6 file: - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: SET - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: ADD - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - keyword: CHECK - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: char_length bracketed: start_bracket: ( expression: column_reference: naked_identifier: VALUE end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: RENAME - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - keyword: TO - object_reference: naked_identifier: zip_check - statement_terminator: ; - statement: alter_domain_statement: - keyword: ALTER - keyword: DOMAIN - object_reference: naked_identifier: zipcode - keyword: SET - keyword: SCHEMA - object_reference: naked_identifier: customers - statement_terminator: ; - statement: alter_domain_statement: - keyword: alter - keyword: domain - object_reference: naked_identifier: oname - keyword: add - keyword: constraint - object_reference: quoted_identifier: '"test"' - keyword: check - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: length bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) comparison_operator: raw_comparison_operator: < numeric_literal: '512' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_function.sql000066400000000000000000000113331451700765000253740ustar00rootroot00000000000000-- Issue:2089 ALTER FUNCTION fn OWNER TO auser; ALTER FUNCTION fn(int, arg2 text) OWNER TO auser; ALTER FUNCTION fn OWNER TO auser; ALTER FUNCTION fn OWNER TO CURRENT_ROLE; ALTER FUNCTION fn OWNER TO CURRENT_USER; ALTER FUNCTION fn OWNER TO SESSION_USER; ALTER FUNCTION public.fn OWNER TO auser; ALTER FUNCTION public.fn OWNER TO CURRENT_USER; ALTER FUNCTION public.fn OWNER TO CURRENT_ROLE; ALTER FUNCTION public.fn OWNER TO SESSION_USER; ALTER FUNCTION fn CALLED ON NULL INPUT; ALTER FUNCTION public.fn CALLED ON NULL INPUT; ALTER FUNCTION fn CALLED ON NULL INPUT RESTRICT; ALTER FUNCTION fn(arg1 int) CALLED ON NULL INPUT RESTRICT; ALTER FUNCTION public.fn(arg1 int) CALLED ON NULL INPUT RESTRICT; ALTER FUNCTION fn RETURNS NULL ON NULL INPUT; ALTER FUNCTION fn(int, text) RETURNS NULL ON NULL INPUT; ALTER FUNCTION fn RETURNS NULL ON NULL INPUT RESTRICT; ALTER FUNCTION public.fn RETURNS NULL ON NULL INPUT RESTRICT; ALTER FUNCTION fn(int, text) RETURNS NULL ON NULL INPUT RESTRICT; ALTER FUNCTION fn STRICT; ALTER FUNCTION fn(int) STRICT; ALTER FUNCTION fn STRICT RESTRICT; ALTER FUNCTION public.fn STRICT RESTRICT; ALTER FUNCTION fn(arg1 int) STRICT RESTRICT; ALTER FUNCTION fn IMMUTABLE; ALTER FUNCTION fn IMMUTABLE RESTRICT; ALTER FUNCTION public.fn IMMUTABLE RESTRICT; ALTER FUNCTION fn STABLE; ALTER FUNCTION public.fn STABLE; ALTER FUNCTION fn STABLE RESTRICT; ALTER FUNCTION fn VOLATILE; ALTER FUNCTION fn VOLATILE RESTRICT; ALTER FUNCTION fn(int, arg2 text) IMMUTABLE; ALTER FUNCTION fn(int, arg2 text) IMMUTABLE RESTRICT; ALTER FUNCTION public.fn(int, arg2 text) IMMUTABLE RESTRICT; ALTER FUNCTION fn(int) STABLE; ALTER FUNCTION fn(int) STABLE RESTRICT; ALTER FUNCTION fn(arg int) VOLATILE; ALTER FUNCTION fn(arg int) VOLATILE RESTRICT; ALTER FUNCTION fn LEAKPROOF; ALTER FUNCTION fn LEAKPROOF RESTRICT; ALTER FUNCTION fn(int, arg2 text) LEAKPROOF; ALTER FUNCTION fn(int, arg2 text) LEAKPROOF RESTRICT; ALTER FUNCTION fn NOT LEAKPROOF; ALTER FUNCTION fn NOT LEAKPROOF RESTRICT; ALTER FUNCTION fn(arg int) NOT LEAKPROOF; ALTER FUNCTION fn(arg int) NOT LEAKPROOF RESTRICT; ALTER FUNCTION fn SECURITY INVOKER; ALTER FUNCTION fn SECURITY INVOKER RESTRICT; ALTER FUNCTION fn(int, text, boolean) SECURITY INVOKER; ALTER FUNCTION fn(int, text, boolean) SECURITY INVOKER RESTRICT; ALTER FUNCTION fn EXTERNAL SECURITY INVOKER; ALTER FUNCTION fn EXTERNAL SECURITY INVOKER RESTRICT; ALTER FUNCTION fn(int, text) EXTERNAL SECURITY INVOKER; ALTER FUNCTION fn(int, text) EXTERNAL SECURITY INVOKER RESTRICT; ALTER FUNCTION fn SECURITY DEFINER; ALTER FUNCTION fn SECURITY DEFINER RESTRICT; ALTER FUNCTION fn EXTERNAL SECURITY DEFINER; ALTER FUNCTION fn EXTERNAL SECURITY DEFINER RESTRICT; ALTER FUNCTION fn(arg1 int, arg2 text, boolean) EXTERNAL SECURITY DEFINER; ALTER FUNCTION fn(arg1 int, arg2 text, boolean) EXTERNAL SECURITY DEFINER RESTRICT; ALTER FUNCTION fn PARALLEL UNSAFE; ALTER FUNCTION fn(arg int) PARALLEL UNSAFE; ALTER FUNCTION fn PARALLEL UNSAFE RESTRICT; ALTER FUNCTION fn(int) PARALLEL UNSAFE RESTRICT; ALTER FUNCTION fn PARALLEL RESTRICTED; ALTER FUNCTION fn PARALLEL RESTRICTED RESTRICT; ALTER FUNCTION fn(int, text) PARALLEL RESTRICTED; ALTER FUNCTION fn(text) PARALLEL RESTRICTED RESTRICT; ALTER FUNCTION fn PARALLEL SAFE; ALTER FUNCTION public.fn PARALLEL SAFE RESTRICT; ALTER FUNCTION fn(text, arg2 int) PARALLEL SAFE; ALTER FUNCTION fn(text, text) PARALLEL SAFE RESTRICT; ALTER FUNCTION fn COST 10; ALTER FUNCTION fn COST 10 RESTRICT; ALTER FUNCTION fn(arg1 int, arg2 text) COST 10; ALTER FUNCTION fn(arg1 int, arg2 text) COST 10 RESTRICT; ALTER FUNCTION fn ROWS 10; ALTER FUNCTION fn ROWS 10 RESTRICT; ALTER FUNCTION fn(arg1 int, arg2 text, int, int) ROWS 10; ALTER FUNCTION fn(arg1 int, arg2 text, int, int) ROWS 10 RESTRICT; ALTER FUNCTION fn SUPPORT supportfn; ALTER FUNCTION public.fn SUPPORT supportfn; ALTER FUNCTION fn(int, int) SUPPORT supportfn; ALTER FUNCTION fn(int, int) SUPPORT supportfn RESTRICT; ALTER FUNCTION fn SET param1 TO 1; ALTER FUNCTION fn SET param1 TO 'value'; ALTER FUNCTION fn SET param1 TO DEFAULT; ALTER FUNCTION fn SET param1 TO 'value' RESTRICT; ALTER FUNCTION fn(int, int) SET param1 TO DEFAULT; ALTER FUNCTION fn(int, int) SET param1 TO DEFAULT RESTRICT; ALTER FUNCTION fn SET param1 TO avalue; ALTER FUNCTION fn SET param1 = 1; ALTER FUNCTION fn SET param1 = 'value'; ALTER FUNCTION fn SET param1 = avalue; ALTER FUNCTION fn SET param1 = DEFAULT; ALTER FUNCTION fn(arg1 int, arg2 text) SET param1 = 'value'; ALTER FUNCTION fn(int) SET param1 = avalue; ALTER FUNCTION fn(text, int) SET param1 = avalue; ALTER FUNCTION fn(int, int) SUPPORT supportfn; ALTER FUNCTION fn SET param1 FROM CURRENT; ALTER FUNCTION fn(boolean) SET param1 FROM CURRENT; ALTER FUNCTION public.fn(boolean, int) SET param1 FROM CURRENT; ALTER FUNCTION fn RESET param1; ALTER FUNCTION fn RESET ALL; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_function.yml000066400000000000000000001171071451700765000254040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f5189a71f81828e83117fdfcf84db19cd6ad2423d302ee1285375a3c90e6f193 file: - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: auser - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - keyword: OWNER - keyword: TO - parameter: auser - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: auser - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: CURRENT_ROLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: SESSION_USER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: auser - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: CURRENT_ROLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - keyword: OWNER - keyword: TO - parameter: SESSION_USER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg1 data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg1 data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: STRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: keyword: STRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: STRICT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: STRICT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg1 data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: STRICT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: IMMUTABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: IMMUTABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: IMMUTABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: STABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: keyword: STABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: STABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: VOLATILE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: VOLATILE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: keyword: IMMUTABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: IMMUTABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: IMMUTABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: keyword: STABLE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: STABLE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: keyword: VOLATILE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: VOLATILE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: LEAKPROOF - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: LEAKPROOF - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: keyword: LEAKPROOF - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: LEAKPROOF - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: NOT - keyword: LEAKPROOF - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: NOT - keyword: LEAKPROOF - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: NOT - keyword: LEAKPROOF - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: NOT - keyword: LEAKPROOF - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SECURITY - keyword: INVOKER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - comma: ',' - data_type: keyword: boolean - end_bracket: ) - alter_function_action_segment: - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - comma: ',' - data_type: keyword: boolean - end_bracket: ) - alter_function_action_segment: - keyword: SECURITY - keyword: INVOKER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: INVOKER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: INVOKER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SECURITY - keyword: DEFINER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SECURITY - keyword: DEFINER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - comma: ',' - data_type: keyword: boolean - end_bracket: ) - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - comma: ',' - data_type: keyword: boolean - end_bracket: ) - alter_function_action_segment: - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: UNSAFE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( parameter: arg data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: UNSAFE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: UNSAFE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: UNSAFE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: RESTRICTED - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: RESTRICTED - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: RESTRICTED - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: text end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: RESTRICTED - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: SAFE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: - keyword: PARALLEL - keyword: SAFE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: text - comma: ',' - parameter: arg2 - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: SAFE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: text - comma: ',' - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: PARALLEL - keyword: SAFE - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: COST numeric_literal: '10' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: COST - numeric_literal: '10' - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: keyword: COST numeric_literal: '10' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: - keyword: COST - numeric_literal: '10' - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: ROWS numeric_literal: '10' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: ROWS - numeric_literal: '10' - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - comma: ',' - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: keyword: ROWS numeric_literal: '10' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - comma: ',' - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: ROWS - numeric_literal: '10' - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: SUPPORT parameter: supportfn - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - alter_function_action_segment: keyword: SUPPORT parameter: supportfn - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: keyword: SUPPORT parameter: supportfn - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: SUPPORT - parameter: supportfn - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - numeric_literal: '1' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - quoted_literal: "'value'" - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - keyword: DEFAULT - keyword: RESTRICT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: TO - naked_identifier: avalue - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' naked_identifier: avalue - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - parameter: arg1 - data_type: keyword: int - comma: ',' - parameter: arg2 - data_type: keyword: text - end_bracket: ) - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' naked_identifier: avalue - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: text - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: keyword: SET parameter: param1 comparison_operator: raw_comparison_operator: '=' naked_identifier: avalue - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: keyword: SUPPORT parameter: supportfn - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: boolean end_bracket: ) - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: boolean - comma: ',' - data_type: keyword: int - end_bracket: ) - alter_function_action_segment: - keyword: SET - parameter: param1 - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: keyword: RESET parameter: param1 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: fn - alter_function_action_segment: - keyword: RESET - keyword: ALL - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_index.sql000066400000000000000000000007571451700765000246660ustar00rootroot00000000000000ALTER INDEX distributors RENAME TO suppliers; ALTER INDEX distributors SET TABLESPACE fasttablespace; ALTER INDEX distributors SET (fillfactor = 75); ALTER INDEX coord_idx ALTER COLUMN 3 SET STATISTICS 1000; ALTER INDEX IF EXISTS foo ATTACH PARTITION bar; ALTER INDEX foo NO DEPENDS ON EXTENSION barr; ALTER INDEX foo RESET (thing, other_thing); ALTER INDEX foo ALTER 4 SET STATISTICS 7; ALTER INDEX ALL IN TABLESPACE foo OWNED BY role_1, account_admin, steve SET TABLESPACE bar NOWAIT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_index.yml000066400000000000000000000065721451700765000246710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5cf064cb4d9880172559d9988a04e7ea296d8329e620aeb900c6237cf9ba5ee3 file: - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: distributors - keyword: RENAME - keyword: TO - index_reference: naked_identifier: suppliers - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: distributors - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: fasttablespace - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: distributors - keyword: SET - bracketed: start_bracket: ( parameter: fillfactor comparison_operator: raw_comparison_operator: '=' numeric_literal: '75' end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: coord_idx - keyword: ALTER - keyword: COLUMN - numeric_literal: '3' - keyword: SET - keyword: STATISTICS - numeric_literal: '1000' - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: foo - keyword: ATTACH - keyword: PARTITION - index_reference: naked_identifier: bar - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: foo - keyword: 'NO' - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: barr - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: foo - keyword: RESET - bracketed: - start_bracket: ( - parameter: thing - comma: ',' - parameter: other_thing - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - index_reference: naked_identifier: foo - keyword: ALTER - numeric_literal: '4' - keyword: SET - keyword: STATISTICS - numeric_literal: '7' - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: foo - keyword: OWNED - keyword: BY - role_reference: naked_identifier: role_1 - comma: ',' - role_reference: naked_identifier: account_admin - comma: ',' - role_reference: naked_identifier: steve - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: bar - keyword: NOWAIT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_materialized_view.sql000066400000000000000000000047161451700765000272620ustar00rootroot00000000000000ALTER MATERIALIZED VIEW bar ALTER column_name SET STATISTICS 1; ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name SET STATISTICS 1; ALTER MATERIALIZED VIEW bar ALTER column_name SET ( attribute_option = 1); ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name SET ( attribute_option1 = 'avalue', attribute_option2 = 'avalue' ); ALTER MATERIALIZED VIEW bar ALTER column_name RESET ( attribute_option ); ALTER MATERIALIZED VIEW bar ALTER column_name RESET ( attribute_option, attribute_option2 ); ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name RESET ( attribute_option ); ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name RESET ( attribute_option, attribute_option2 ); ALTER MATERIALIZED VIEW bar ALTER column_name SET STORAGE PLAIN; ALTER MATERIALIZED VIEW bar ALTER COLUMN column_name SET STORAGE EXTENDED; ALTER MATERIALIZED VIEW bar CLUSTER ON index_name; ALTER MATERIALIZED VIEW bar SET WITHOUT CLUSTER; ALTER MATERIALIZED VIEW bar SET ( storage_parameter); ALTER MATERIALIZED VIEW bar SET ( storage_parameter, storage_parameter); ALTER MATERIALIZED VIEW bar SET ( storage_parameter = 'some_value', storage_parameter ); ALTER MATERIALIZED VIEW bar RESET ( storage_parameter); ALTER MATERIALIZED VIEW bar RESET ( storage_parameter, storage_parameter); ALTER MATERIALIZED VIEW bar OWNER TO baz_role; ALTER MATERIALIZED VIEW bar OWNER TO "baz-role"; ALTER MATERIALIZED VIEW bar DEPENDS ON EXTENSION baz; ALTER MATERIALIZED VIEW bar NO DEPENDS ON EXTENSION baz; ALTER MATERIALIZED VIEW bar RENAME column_name TO new_column_name; ALTER MATERIALIZED VIEW IF EXISTS bar RENAME COLUMN column_name TO new_column_name; ALTER MATERIALIZED VIEW bar RENAME TO baz; ALTER MATERIALIZED VIEW IF EXISTS bar RENAME TO baz; ALTER MATERIALIZED VIEW bar SET SCHEMA new_schema; ALTER MATERIALIZED VIEW IF EXISTS bar SET SCHEMA new_schema; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace SET TABLESPACE new_tablespace; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace SET TABLESPACE new_tablespace NOWAIT; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace OWNED BY role_name SET TABLESPACE new_tablespace; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace OWNED BY role_name SET TABLESPACE new_tablespace NOWAIT; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace OWNED BY role_name, role_name_2 SET TABLESPACE new_tablespace; ALTER MATERIALIZED VIEW ALL IN TABLESPACE old_tablespace OWNED BY role_name, role_name_2 SET TABLESPACE new_tablespace NOWAIT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_materialized_view.yml000066400000000000000000000345521451700765000272650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db066c1cd10146838f254fc78f072e577cbf02eb04feae81b20b04857e20d552 file: - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: SET - keyword: STATISTICS - numeric_literal: '1' - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: SET - keyword: STATISTICS - numeric_literal: '1' - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: SET - bracketed: start_bracket: ( parameter: attribute_option comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: SET - bracketed: - start_bracket: ( - parameter: attribute_option1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'avalue'" - comma: ',' - parameter: attribute_option2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'avalue'" - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: RESET - bracketed: start_bracket: ( parameter: attribute_option end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: RESET - bracketed: - start_bracket: ( - parameter: attribute_option - comma: ',' - parameter: attribute_option2 - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: RESET - bracketed: start_bracket: ( parameter: attribute_option end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: RESET - bracketed: - start_bracket: ( - parameter: attribute_option - comma: ',' - parameter: attribute_option2 - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - column_reference: naked_identifier: column_name - keyword: SET - keyword: STORAGE - keyword: PLAIN - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: SET - keyword: STORAGE - keyword: EXTENDED - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: CLUSTER - keyword: 'ON' - parameter: index_name - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: SET - keyword: WITHOUT - keyword: CLUSTER - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: SET bracketed: start_bracket: ( parameter: storage_parameter end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: SET bracketed: - start_bracket: ( - parameter: storage_parameter - comma: ',' - parameter: storage_parameter - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: SET bracketed: - start_bracket: ( - parameter: storage_parameter - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value'" - comma: ',' - parameter: storage_parameter - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: RESET bracketed: start_bracket: ( parameter: storage_parameter end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: keyword: RESET bracketed: - start_bracket: ( - parameter: storage_parameter - comma: ',' - parameter: storage_parameter - end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: OWNER - keyword: TO - object_reference: naked_identifier: baz_role - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - alter_materialized_view_action_segment: - keyword: OWNER - keyword: TO - object_reference: quoted_identifier: '"baz-role"' - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: baz - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: 'NO' - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: baz - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: RENAME - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: RENAME - keyword: TO - table_reference: naked_identifier: baz - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: RENAME - keyword: TO - table_reference: naked_identifier: baz - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - keyword: NOWAIT - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: OWNED - keyword: BY - object_reference: naked_identifier: role_name - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: OWNED - keyword: BY - object_reference: naked_identifier: role_name - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - keyword: NOWAIT - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: OWNED - keyword: BY - object_reference: naked_identifier: role_name - comma: ',' - object_reference: naked_identifier: role_name_2 - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: ALTER - keyword: MATERIALIZED - keyword: VIEW - keyword: ALL - keyword: IN - keyword: TABLESPACE - tablespace_reference: naked_identifier: old_tablespace - keyword: OWNED - keyword: BY - object_reference: naked_identifier: role_name - comma: ',' - object_reference: naked_identifier: role_name_2 - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - keyword: NOWAIT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_policy.sql000066400000000000000000000012011451700765000250370ustar00rootroot00000000000000ALTER POLICY account_managers ON accounts RENAME TO account_users; ALTER POLICY account_managers ON accounts TO current_user; ALTER POLICY account_managers ON accounts TO public, session_user; ALTER POLICY account_managers ON accounts WITH CHECK ( NOT accounts_is_excluded_full_name(full_name) ); ALTER POLICY account_managers ON accounts WITH CHECK ( col > 10 ); ALTER POLICY account_managers ON accounts USING (username = current_user); ALTER POLICY sales_rep_is_self ON invoices WITH CHECK (sales_rep = CURRENT_USER AND CURRENT_USER IN ( SELECT user_id FROM allowed_users )); sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_policy.yml000066400000000000000000000107211451700765000250500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5eb64c5153c70b70c75901212bc899bdad414ad3709e4f45899a842bd5d96c39 file: - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: RENAME - keyword: TO - object_reference: naked_identifier: account_users - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: TO - role_reference: naked_identifier: current_user - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: TO - role_reference: naked_identifier: public - comma: ',' - keyword: session_user - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: keyword: NOT function: function_name: function_name_identifier: accounts_is_excluded_full_name bracketed: start_bracket: ( expression: column_reference: naked_identifier: full_name end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: USING - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: username - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: current_user end_bracket: ) - statement_terminator: ; - statement: alter_policy_statement: - keyword: ALTER - keyword: POLICY - object_reference: naked_identifier: sales_rep_is_self - keyword: 'ON' - table_reference: naked_identifier: invoices - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: sales_rep - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: CURRENT_USER - binary_operator: AND - column_reference: naked_identifier: CURRENT_USER - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: user_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: allowed_users end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_procedure.sql000066400000000000000000000007261451700765000255430ustar00rootroot00000000000000ALTER PROCEDURE insert_data(integer, integer) RENAME TO insert_record; ALTER PROCEDURE insert_data(integer, integer) OWNER TO joe; ALTER PROCEDURE insert_data(integer, integer) OWNER TO CURRENT_USER; ALTER PROCEDURE insert_data(integer, integer) SET SCHEMA accounting; ALTER PROCEDURE insert_data(integer, integer) DEPENDS ON EXTENSION myext; ALTER PROCEDURE check_password(text) SET search_path = admin, pg_temp; ALTER PROCEDURE check_password(text) RESET search_path; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_procedure.yml000066400000000000000000000073661451700765000255540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1778fedc288d89d634477c6fc5d8e8f07e816b42b47d6a2ce491e73b64386e16 file: - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: insert_record - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: OWNER - keyword: TO - parameter: joe - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: accounting - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: myext - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: text end_bracket: ) - keyword: SET - parameter: search_path - comparison_operator: raw_comparison_operator: '=' - parameter: admin - comma: ',' - parameter: pg_temp - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: text end_bracket: ) - alter_procedure_action_segment: keyword: RESET parameter: search_path - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_publication.sql000066400000000000000000000021621451700765000260600ustar00rootroot00000000000000-- More thorough testing of the PublicationObjectsSegment is in postgres_create_publication.sql. ALTER PUBLICATION abc ADD TABLE def; ALTER PUBLICATION abc ADD TABLE def, TABLE ghi; ALTER PUBLICATION abc ADD TABLE def, ghi*, ONLY jkl, ONLY (mno); ALTER PUBLICATION abc SET TABLE def, ghi, TABLES IN SCHEMA y, z, CURRENT_SCHEMA; ALTER PUBLICATION abc SET (publish = 'insert,update', publish_via_partition_root = TRUE); ALTER PUBLICATION abc OWNER TO bob; ALTER PUBLICATION abc OWNER TO CURRENT_ROLE; ALTER PUBLICATION abc OWNER TO CURRENT_USER; ALTER PUBLICATION abc OWNER TO SESSION_USER; ALTER PUBLICATION abc RENAME TO def; -- examples from https://www.postgresql.org/docs/15/sql-alterpublication.html ALTER PUBLICATION noinsert SET (publish = 'update, delete'); ALTER PUBLICATION mypublication ADD TABLE users (user_id, firstname), departments; ALTER PUBLICATION mypublication SET TABLE users (user_id, firstname, lastname), TABLE departments; ALTER PUBLICATION sales_publication ADD TABLES IN SCHEMA marketing, sales; ALTER PUBLICATION production_publication ADD TABLE users, departments, TABLES IN SCHEMA production; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_publication.yml000066400000000000000000000174061451700765000260710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d78f6bf54923eefc7cbef2aa7bc87808e6b13b9c1a099957863705d933ed049b file: - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: ADD - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: ADD - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: ghi - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: ADD - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: def - comma: ',' - publication_table: table_reference: naked_identifier: ghi star: '*' - comma: ',' - publication_table: keyword: ONLY table_reference: naked_identifier: jkl - comma: ',' - publication_table: keyword: ONLY bracketed: start_bracket: ( table_reference: naked_identifier: mno end_bracket: ) - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: SET - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: def - comma: ',' - publication_table: table_reference: naked_identifier: ghi - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: y - comma: ',' - schema_reference: naked_identifier: z - comma: ',' - keyword: CURRENT_SCHEMA - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: SET - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert,update'" - comma: ',' - definition_parameter: properties_naked_identifier: publish_via_partition_root comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - end_bracket: ) - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: OWNER - keyword: TO - role_reference: naked_identifier: bob - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: OWNER - keyword: TO - keyword: CURRENT_USER - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: RENAME - keyword: TO - publication_reference: naked_identifier: def - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: noinsert - keyword: SET - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'update, delete'" end_bracket: ) - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: mypublication - keyword: ADD - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: users bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - comma: ',' - column_reference: naked_identifier: firstname - end_bracket: ) - comma: ',' - publication_table: table_reference: naked_identifier: departments - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: mypublication - keyword: SET - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: users bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - comma: ',' - column_reference: naked_identifier: firstname - comma: ',' - column_reference: naked_identifier: lastname - end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: departments - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: sales_publication - keyword: ADD - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: marketing - comma: ',' - schema_reference: naked_identifier: sales - statement_terminator: ; - statement: alter_publication_statement: - keyword: ALTER - keyword: PUBLICATION - publication_reference: naked_identifier: production_publication - keyword: ADD - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: users - comma: ',' - publication_table: table_reference: naked_identifier: departments - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: production - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_role.sql000066400000000000000000000037371451700765000245210ustar00rootroot00000000000000ALTER ROLE davide WITH PASSWORD 'hu8jmn3'; ALTER ROLE davide WITH PASSWORD NULL; ALTER ROLE chris VALID UNTIL 'May 4 12:00:00 2015 +1'; ALTER ROLE fred VALID UNTIL 'infinity'; ALTER ROLE worker_bee SET maintenance_work_mem = '100000'; ALTER ROLE fred IN DATABASE devel SET client_min_messages TO DEFAULT; ALTER ROLE miriam CREATEROLE CREATEDB; ALTER USER davide WITH PASSWORD 'hu8jmn3'; ALTER USER davide WITH PASSWORD NULL; ALTER USER chris VALID UNTIL 'May 4 12:00:00 2015 +1'; ALTER USER fred VALID UNTIL 'infinity'; ALTER USER worker_bee SET maintenance_work_mem = '100000'; ALTER USER fred IN DATABASE devel SET client_min_messages TO DEFAULT; ALTER USER miriam CREATEROLE CREATEDB; -- more SET tests: ALTER ROLE fred SET testing FROM CURRENT; ALTER ROLE fred IN DATABASE devel SET testing FROM CURRENT; ALTER ROLE fred IN DATABASE devel SET testing TO 1234; ALTER ROLE fred IN DATABASE devel SET testing = 1234; ALTER ROLE fred IN DATABASE devel SET testing TO DEFAULT; ALTER ROLE fred IN DATABASE devel SET testing = DEFAULT; ALTER ROLE fred IN DATABASE devel SET testing = TRUE; ALTER ROLE fred IN DATABASE devel SET testing = FALSE; ALTER ROLE fred IN DATABASE devel SET testing = 'string value'; ALTER ROLE fred IN DATABASE devel SET testing = on, off, auto; ALTER ROLE fred RESET ALL; ALTER ROLE fred RESET testing; ALTER ROLE fred IN DATABASE devel RESET ALL; ALTER ROLE fred IN DATABASE devel RESET testing; -- CURRENT_ROLE/CURRENT_USER/SESSION_USER and ALL ALTER USER CURRENT_ROLE WITH PASSWORD NULL; ALTER USER CURRENT_USER WITH PASSWORD NULL; ALTER USER SESSION_USER WITH PASSWORD NULL; ALTER ROLE CURRENT_ROLE IN DATABASE devel SET testing FROM CURRENT; ALTER ROLE CURRENT_ROLE IN DATABASE devel SET testing TO 1234; ALTER ROLE CURRENT_ROLE IN DATABASE devel SET testing = 1234; ALTER ROLE CURRENT_ROLE IN DATABASE devel SET testing TO DEFAULT; ALTER ROLE ALL RESET ALL; ALTER ROLE ALL RESET testing; ALTER USER ALL IN DATABASE devel RESET ALL; ALTER ROLE ALL IN DATABASE devel RESET testing; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_role.yml000066400000000000000000000266161451700765000245240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6d26811fd37b12bf88d8f394c7385e1ceafa245efffe0c28c39ab14060ad8596 file: - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - quoted_literal: "'hu8jmn3'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: chris - keyword: VALID - keyword: UNTIL - quoted_literal: "'May 4 12:00:00 2015 +1'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: VALID - keyword: UNTIL - quoted_literal: "'infinity'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: worker_bee - keyword: SET - parameter: maintenance_work_mem - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'100000'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: client_min_messages - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: miriam - keyword: CREATEROLE - keyword: CREATEDB - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - quoted_literal: "'hu8jmn3'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: chris - keyword: VALID - keyword: UNTIL - quoted_literal: "'May 4 12:00:00 2015 +1'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: fred - keyword: VALID - keyword: UNTIL - quoted_literal: "'infinity'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: worker_bee - keyword: SET - parameter: maintenance_work_mem - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'100000'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: client_min_messages - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: miriam - keyword: CREATEROLE - keyword: CREATEDB - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: SET - parameter: testing - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: TO - numeric_literal: '1234' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1234' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'string value'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - naked_identifier: 'on' - comma: ',' - naked_identifier: 'off' - comma: ',' - naked_identifier: auto - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: RESET - parameter: ALL - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: RESET - parameter: testing - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: RESET - parameter: ALL - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: naked_identifier: fred - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: RESET - parameter: testing - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - keyword: CURRENT_ROLE - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - keyword: CURRENT_USER - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - keyword: SESSION_USER - keyword: WITH - keyword: PASSWORD - keyword: 'NULL' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: CURRENT_ROLE - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: FROM - keyword: CURRENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: CURRENT_ROLE - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: TO - numeric_literal: '1234' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: CURRENT_ROLE - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1234' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: CURRENT_ROLE - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: SET - parameter: testing - keyword: TO - keyword: DEFAULT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: ALL - keyword: RESET - parameter: ALL - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: ALL - keyword: RESET - parameter: testing - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: USER - keyword: ALL - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: RESET - parameter: ALL - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: ALL - keyword: IN - keyword: DATABASE - database_reference: naked_identifier: devel - keyword: RESET - parameter: testing - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_schema.sql000066400000000000000000000001211451700765000250000ustar00rootroot00000000000000ALTER SCHEMA schema1 RENAME TO schema2; ALTER SCHEMA schema1 OWNER TO new_owner; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_schema.yml000066400000000000000000000016351451700765000250150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cfb0c467bd784e5970002e70123a796062479878220480a29e8387b5e0babbd8 file: - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: RENAME - keyword: TO - schema_reference: naked_identifier: schema2 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: OWNER - keyword: TO - role_reference: naked_identifier: new_owner - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_sequence.sql000066400000000000000000000010151451700765000253530ustar00rootroot00000000000000ALTER SEQUENCE IF EXISTS foo AS integer; ALTER SEQUENCE foo INCREMENT BY 4 NO MINVALUE MAXVALUE 56; ALTER SEQUENCE foo INCREMENT 3 NO MAXVALUE MINVALUE 3; ALTER SEQUENCE foo START 7 CACHE 4; ALTER SEQUENCE foo RESTART WITH 14 NO CYCLE; ALTER SEQUENCE foo OWNED BY foo.foo; ALTER SEQUENCE foo OWNED BY NONE; ALTER SEQUENCE IF EXISTS foo OWNER TO my_user; ALTER SEQUENCE foo OWNER TO CURRENT_USER; ALTER SEQUENCE foo OWNER TO SESSION_USER; ALTER SEQUENCE foo RENAME TO foo2; ALTER SEQUENCE foo SET SCHEMA my_schema; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_sequence.yml000066400000000000000000000102361451700765000253620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 333c0af65237f8a01e770ca55ad3ddcdc16a74059be8942cebcdb8f0777b6d96 file: - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: AS data_type: keyword: integer - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '4' - alter_sequence_options_segment: - keyword: 'NO' - keyword: MINVALUE - alter_sequence_options_segment: keyword: MAXVALUE numeric_literal: '56' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: INCREMENT numeric_literal: '3' - alter_sequence_options_segment: - keyword: 'NO' - keyword: MAXVALUE - alter_sequence_options_segment: keyword: MINVALUE numeric_literal: '3' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: keyword: START numeric_literal: '7' - alter_sequence_options_segment: keyword: CACHE numeric_literal: '4' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: RESTART - keyword: WITH - numeric_literal: '14' - alter_sequence_options_segment: - keyword: 'NO' - keyword: CYCLE - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: OWNED - keyword: BY - column_reference: - naked_identifier: foo - dot: . - naked_identifier: foo - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - alter_sequence_options_segment: - keyword: OWNED - keyword: BY - keyword: NONE - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - sequence_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - parameter: my_user - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - parameter: CURRENT_USER - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - parameter: SESSION_USER - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: RENAME - keyword: TO - sequence_reference: naked_identifier: foo2 - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_table.sql000066400000000000000000000176031451700765000246440ustar00rootroot00000000000000ALTER TABLE distributors ADD COLUMN address varchar(30); ALTER TABLE measurements ADD COLUMN mtime timestamp with time zone DEFAULT now(); ALTER TABLE transactions ADD COLUMN status varchar(30) DEFAULT 'old', ALTER COLUMN status SET default 'current'; ALTER TABLE distributors DROP COLUMN address RESTRICT; ALTER TABLE distributors ALTER COLUMN address TYPE varchar(80), ALTER COLUMN name TYPE varchar(100); ALTER TABLE foo ALTER COLUMN foo_timestamp SET DATA TYPE timestamp with time zone USING timestamp with time zone 'epoch' + foo_timestamp * interval '1 second'; ALTER TABLE foo ALTER COLUMN foo_timestamp DROP DEFAULT, ALTER COLUMN foo_timestamp TYPE timestamp with time zone USING timestamp 'epoch' + foo_timestamp * interval '1 second', ALTER COLUMN foo_timestamp SET DEFAULT now(); ALTER TABLE mytable ALTER date_column SET DEFAULT NOW(); ALTER TABLE mytable ALTER int_column SET DEFAULT 1; ALTER TABLE mytable ALTER text_column SET DEFAULT 'value'; ALTER TABLE mytable ALTER bool_column SET DEFAULT false; ALTER TABLE mytable ALTER other_column SET DEFAULT other_value; ALTER TABLE mytable ALTER other_column SET DEFAULT CURRENT_TIMESTAMP; ALTER TABLE mytable ALTER other_column SET DEFAULT a_function(a_parameter); ALTER TABLE mytable ALTER other_column SET DEFAULT a_function('a_parameter'); ALTER TABLE mytable ALTER other_column SET DEFAULT 1 + 2 + 3; ALTER TABLE mytable ALTER other_column SET DEFAULT (1 + 2 + 3); ALTER TABLE mytable ALTER other_column DROP DEFAULT; ALTER TABLE IF EXISTS mytable ALTER date_column SET DEFAULT NOW(); ALTER TABLE IF EXISTS mytable ALTER int_column SET DEFAULT 1; ALTER TABLE IF EXISTS mytable ALTER text_column SET DEFAULT 'value'; ALTER TABLE IF EXISTS mytable ALTER bool_column SET DEFAULT false; ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT other_value; ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT CURRENT_TIMESTAMP; ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT a_function(a_parameter); ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT a_function('a_parameter'); ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT 1 + 2 + 3; ALTER TABLE IF EXISTS mytable ALTER other_column SET DEFAULT (1 + 2 + 3); ALTER TABLE IF EXISTS mytable ALTER other_column DROP DEFAULT; ALTER TABLE distributors RENAME COLUMN address TO city; ALTER TABLE distributors RENAME TO suppliers; ALTER TABLE distributors RENAME CONSTRAINT zipchk TO zip_check; ALTER TABLE distributors ALTER COLUMN street SET NOT NULL; ALTER TABLE distributors ALTER COLUMN street DROP NOT NULL; ALTER TABLE distributors ADD CONSTRAINT zipchk CHECK (char_length(zipcode) = 5); ALTER TABLE distributors ADD CONSTRAINT zipchk CHECK (char_length(zipcode) = 5) NO INHERIT; ALTER TABLE distributors DROP CONSTRAINT zipchk; -- constraints can optionally have their names double-quoted ALTER TABLE distributors DROP CONSTRAINT "zipchk"; ALTER TABLE ONLY distributors DROP CONSTRAINT zipchk; ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address); ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) MATCH FULL; ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) ON DELETE RESTRICT ON UPDATE CASCADE; ALTER TABLE distributors ADD CONSTRAINT distfk FOREIGN KEY (address) REFERENCES addresses (address) NOT VALID; ALTER TABLE distributors VALIDATE CONSTRAINT distfk; ALTER TABLE distributors ADD CONSTRAINT dist_id_zipcode_key UNIQUE (dist_id, zipcode); ALTER TABLE distributors ADD PRIMARY KEY (dist_id); ALTER TABLE distributors SET TABLESPACE fasttablespace; -- Issue:2071 ALTER TABLE distributors SET (parameter_1 = 'value'); ALTER TABLE distributors SET (parameter_1 = 1); ALTER TABLE distributors SET (parameter_1 = 1, parameter_2 = 'value'); ALTER TABLE myschema.distributors SET SCHEMA yourschema; ALTER TABLE distributors DROP CONSTRAINT distributors_pkey, ADD CONSTRAINT distributors_pkey PRIMARY KEY USING INDEX dist_id_temp_idx; ALTER TABLE measurement ATTACH PARTITION measurement_y2016m07 FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); ALTER TABLE cities ATTACH PARTITION cities_ab FOR VALUES IN ('a', 'b'); ALTER TABLE orders ATTACH PARTITION orders_p4 FOR VALUES WITH (MODULUS 4, REMAINDER 3); ALTER TABLE cities ATTACH PARTITION cities_partdef DEFAULT; ALTER TABLE measurement DETACH PARTITION measurement_y2015m12; ALTER TABLE measurement DETACH PARTITION measurement_y2021m10 CONCURRENTLY FINALIZE; ALTER TABLE landing.workorderhistory ADD CONSTRAINT workorder_id_foreign_key FOREIGN KEY(workorderid) REFERENCES landing.workorder(id); ALTER TABLE my_table ADD COLUMN IF NOT EXISTS foo TEXT; ALTER TABLE public.obm_buildings OWNER to postgres; ALTER TABLE distributors ALTER COLUMN street ADD GENERATED ALWAYS AS IDENTITY (INCREMENT 4 NO MAXVALUE); ALTER TABLE distributors ALTER COLUMN street SET RESTART WITH 3; ALTER TABLE distributors ADD my_column int GENERATED BY DEFAULT AS IDENTITY (CACHE 3 MAXVALUE 63 OWNED BY NONE); ALTER TABLE public.test OWNER TO "ID"; ALTER TABLE public.test OWNER TO ID; ALTER TABLE IF EXISTS ONLY public.test OWNER TO CURRENT_ROLE; ALTER TABLE public.history ALTER COLUMN id ADD GENERATED ALWAYS AS IDENTITY ( SEQUENCE NAME public.history_id_seq ); -- Test adding columns with UNIQUE and PRIMARY KEY constraints ALTER TABLE tbl ADD COLUMN nulls_distinct text UNIQUE NULLS DISTINCT, ADD COLUMN nulls_not_distinct text UNIQUE NULLS NOT DISTINCT, ADD everything text UNIQUE NULLS DISTINCT WITH (arg1=3, arg5='str') USING INDEX TABLESPACE spc; ALTER TABLE tbl ADD pk text DEFAULT 'hello' PRIMARY KEY WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspace NOT NULL; ALTER TABLE tbl ADD CONSTRAINT foo1 UNIQUE (fld, col), ADD CONSTRAINT foo2 UNIQUE NULLS DISTINCT (fld), ADD CONSTRAINT foo3 UNIQUE NULLS NOT DISTINCT (fld), ADD CONSTRAINT everything UNIQUE NULLS DISTINCT (fld, col) INCLUDE (two, three) WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspc, ADD CONSTRAINT pk PRIMARY KEY (fld, col) INCLUDE (four) WITH (ff=auto, gg=stuff) USING INDEX TABLESPACE tblspc; -- Test SET/RESET actions on both table and column ALTER TABLE foo SET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC); ALTER TABLE foo RESET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC); ALTER TABLE foo ALTER COLUMN baz SET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC); ALTER TABLE foo ALTER COLUMN baz RESET (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC); -- Test out EXCLUDE constraints, as well as other more advanced index parameters on constraints -- from https://www.postgresql.org/docs/15/rangetypes.html: basic usage (adapted for ALTER TABLE) ALTER TABLE reservation ADD EXCLUDE USING gist (during WITH &&); ALTER TABLE room_reservation ADD CONSTRAINT cons EXCLUDE USING gist (room WITH =, during WITH &&); -- all the gnarly options: not every option is valid, but this will parse successfully on PG 15. ALTER TABLE no_using ADD EXCLUDE (field WITH =) NOT DEFERRABLE INITIALLY IMMEDIATE NO INHERIT; ALTER TABLE many_options ADD EXCLUDE USING gist ( one WITH =, nulls_opclass nulls WITH =, nulls_last NULLS LAST WITH =, two COLLATE "en-US" opclass (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC) ASC NULLS FIRST WITH =, (two + 5) WITH =, myfunc(a, b) WITH =, myfunc_opclass(a, b) fop (opt=1, foo=2) WITH =, only_opclass opclass WITH =, desc_order DESC WITH = ) INCLUDE (a, b) WITH (idx_num = 5, idx_str = 'idx_value', idx_kw=DESC) USING INDEX TABLESPACE tblspc WHERE (field != 'def') DEFERRABLE NOT VALID INITIALLY DEFERRED; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_table.yml000066400000000000000000001752301451700765000246470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8e47ff3c0810152cb6f3ddafb3680fabd4e7029d3de173f3fc8514a24b853b96 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: address - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: measurements - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: mtime - data_type: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone - column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: now bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: transactions - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: status - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '30' end_bracket: ) - column_constraint_segment: keyword: DEFAULT quoted_literal: "'old'" - comma: ',' - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: status - keyword: SET - keyword: default - quoted_literal: "'current'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: address - keyword: RESTRICT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: address - keyword: TYPE - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '80' end_bracket: ) - comma: ',' - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: name - keyword: TYPE - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: foo_timestamp - keyword: SET - keyword: DATA - keyword: TYPE - data_type: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone - keyword: USING - expression: - datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone quoted_literal: "'epoch'" - binary_operator: + - column_reference: naked_identifier: foo_timestamp - binary_operator: '*' - datetime_literal: datetime_type_identifier: keyword: interval quoted_literal: "'1 second'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: foo_timestamp - keyword: DROP - keyword: DEFAULT - comma: ',' - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: foo_timestamp - keyword: TYPE - data_type: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone - keyword: USING - expression: - datetime_literal: datetime_type_identifier: keyword: timestamp quoted_literal: "'epoch'" - binary_operator: + - column_reference: naked_identifier: foo_timestamp - binary_operator: '*' - datetime_literal: datetime_type_identifier: keyword: interval quoted_literal: "'1 second'" - comma: ',' - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: foo_timestamp - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: now bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: date_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: NOW bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: int_column - keyword: SET - keyword: DEFAULT - numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: text_column - keyword: SET - keyword: DEFAULT - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: bool_column - keyword: SET - keyword: DEFAULT - boolean_literal: 'false' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: column_reference: naked_identifier: other_value - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function bracketed: start_bracket: ( expression: column_reference: naked_identifier: a_parameter end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function bracketed: start_bracket: ( expression: quoted_literal: "'a_parameter'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: + - numeric_literal: '3' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: + - numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: date_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: NOW bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: int_column - keyword: SET - keyword: DEFAULT - numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: text_column - keyword: SET - keyword: DEFAULT - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: bool_column - keyword: SET - keyword: DEFAULT - boolean_literal: 'false' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: column_reference: naked_identifier: other_value - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function bracketed: start_bracket: ( expression: column_reference: naked_identifier: a_parameter end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function bracketed: start_bracket: ( expression: quoted_literal: "'a_parameter'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: + - numeric_literal: '3' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: + - numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: mytable - alter_table_action_segment: - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: address - keyword: TO - column_reference: naked_identifier: city - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - keyword: RENAME - keyword: TO - table_reference: naked_identifier: suppliers - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - keyword: RENAME - keyword: CONSTRAINT - parameter: zipchk - keyword: TO - parameter: zip_check - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: SET - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - keyword: CHECK - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: char_length bracketed: start_bracket: ( expression: column_reference: naked_identifier: zipcode end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: zipchk - keyword: CHECK - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: char_length bracketed: start_bracket: ( expression: column_reference: naked_identifier: zipcode end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' end_bracket: ) - keyword: 'NO' - keyword: INHERIT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: CONSTRAINT - parameter: zipchk - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: CONSTRAINT - parameter: '"zipchk"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: ONLY - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: CONSTRAINT - parameter: zipchk - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distfk - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distfk - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: MATCH - keyword: FULL - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distfk - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: RESTRICT - keyword: 'ON' - keyword: UPDATE - keyword: CASCADE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distfk - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: addresses - bracketed: start_bracket: ( column_reference: naked_identifier: address end_bracket: ) - keyword: NOT - keyword: VALID - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: VALIDATE - keyword: CONSTRAINT - parameter: distfk - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: dist_id_zipcode_key - keyword: UNIQUE - bracketed: - start_bracket: ( - column_reference: naked_identifier: dist_id - comma: ',' - column_reference: naked_identifier: zipcode - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: dist_id end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: SET - keyword: TABLESPACE - tablespace_reference: naked_identifier: fasttablespace - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: SET relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: parameter_1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: SET relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: parameter_1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: SET relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: parameter_1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: parameter_2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: myschema - dot: . - naked_identifier: distributors - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: yourschema - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: DROP - keyword: CONSTRAINT - parameter: distributors_pkey - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: distributors_pkey - keyword: PRIMARY - keyword: KEY - keyword: USING - keyword: INDEX - index_reference: naked_identifier: dist_id_temp_idx - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: measurement - keyword: ATTACH - keyword: PARTITION - parameter: measurement_y2016m07 - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: quoted_literal: "'2016-07-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: quoted_literal: "'2016-08-01'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: cities - keyword: ATTACH - keyword: PARTITION - parameter: cities_ab - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: orders - keyword: ATTACH - keyword: PARTITION - parameter: orders_p4 - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: cities - keyword: ATTACH - keyword: PARTITION - parameter: cities_partdef - keyword: DEFAULT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: measurement - keyword: DETACH - keyword: PARTITION - parameter: measurement_y2015m12 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: measurement - keyword: DETACH - keyword: PARTITION - parameter: measurement_y2021m10 - keyword: CONCURRENTLY - keyword: FINALIZE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: landing - dot: . - naked_identifier: workorderhistory - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: workorder_id_foreign_key - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: workorderid end_bracket: ) - keyword: REFERENCES - table_reference: - naked_identifier: landing - dot: . - naked_identifier: workorder - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - keyword: IF - keyword: NOT - keyword: EXISTS - column_reference: naked_identifier: foo - data_type: keyword: TEXT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: obm_buildings - alter_table_action_segment: - keyword: OWNER - keyword: to - parameter: postgres - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: ADD - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - alter_sequence_options_segment: keyword: INCREMENT numeric_literal: '4' - alter_sequence_options_segment: - keyword: 'NO' - keyword: MAXVALUE - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: street - keyword: SET - alter_sequence_options_segment: - keyword: RESTART - keyword: WITH - numeric_literal: '3' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: distributors - alter_table_action_segment: keyword: ADD column_reference: naked_identifier: my_column data_type: keyword: int column_constraint_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - alter_sequence_options_segment: keyword: CACHE numeric_literal: '3' - alter_sequence_options_segment: keyword: MAXVALUE numeric_literal: '63' - alter_sequence_options_segment: - keyword: OWNED - keyword: BY - keyword: NONE - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: test - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: '"ID"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: test - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: ID - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - keyword: ONLY - table_reference: - naked_identifier: public - dot: . - naked_identifier: test - alter_table_action_segment: - keyword: OWNER - keyword: TO - parameter: CURRENT_ROLE - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: history - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: id - keyword: ADD - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: IDENTITY - bracketed: start_bracket: ( alter_sequence_options_segment: - keyword: SEQUENCE - keyword: NAME - sequence_reference: - naked_identifier: public - dot: . - naked_identifier: history_id_seq end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tbl - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: nulls_distinct - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - comma: ',' - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: nulls_not_distinct - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: NOT - keyword: DISTINCT - comma: ',' - alter_table_action_segment: keyword: ADD column_reference: naked_identifier: everything data_type: keyword: text column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: spc - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tbl - alter_table_action_segment: - keyword: ADD - column_reference: naked_identifier: pk - data_type: keyword: text - column_constraint_segment: keyword: DEFAULT quoted_literal: "'hello'" - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspace - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: tbl - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: foo1 - keyword: UNIQUE - bracketed: - start_bracket: ( - column_reference: naked_identifier: fld - comma: ',' - column_reference: naked_identifier: col - end_bracket: ) - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: foo2 - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - bracketed: start_bracket: ( column_reference: naked_identifier: fld end_bracket: ) - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: foo3 - keyword: UNIQUE - keyword: NULLS - keyword: NOT - keyword: DISTINCT - bracketed: start_bracket: ( column_reference: naked_identifier: fld end_bracket: ) - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: everything - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - bracketed: - start_bracket: ( - column_reference: naked_identifier: fld - comma: ',' - column_reference: naked_identifier: col - end_bracket: ) - index_parameters: - keyword: INCLUDE - bracketed: - start_bracket: ( - column_reference: naked_identifier: two - comma: ',' - column_reference: naked_identifier: three - end_bracket: ) - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspc - comma: ',' - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: pk - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: fld - comma: ',' - column_reference: naked_identifier: col - end_bracket: ) - index_parameters: - keyword: INCLUDE - bracketed: start_bracket: ( column_reference: naked_identifier: four end_bracket: ) - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: - properties_naked_identifier: ff - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: auto - comma: ',' - definition_parameter: - properties_naked_identifier: gg - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: stuff - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspc - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: keyword: SET relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: keyword: RESET relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: baz - keyword: SET - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: foo - alter_table_action_segment: - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: baz - keyword: RESET - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: reservation - alter_table_action_segment: keyword: ADD table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: start_bracket: ( exclusion_constraint_element: index_element: column_reference: naked_identifier: during keyword: WITH comparison_operator: - ampersand: '&' - ampersand: '&' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: room_reservation - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: cons - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: - start_bracket: ( - exclusion_constraint_element: index_element: column_reference: naked_identifier: room keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: during keyword: WITH comparison_operator: - ampersand: '&' - ampersand: '&' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: no_using - alter_table_action_segment: keyword: ADD table_constraint: - keyword: EXCLUDE - bracketed: start_bracket: ( exclusion_constraint_element: index_element: column_reference: naked_identifier: field keyword: WITH comparison_operator: raw_comparison_operator: '=' end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - keyword: 'NO' - keyword: INHERIT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: many_options - alter_table_action_segment: keyword: ADD table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: - start_bracket: ( - exclusion_constraint_element: index_element: column_reference: naked_identifier: one keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: nulls_opclass index_element_options: operator_class_reference: naked_identifier: nulls keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: nulls_last index_element_options: - keyword: NULLS - keyword: LAST keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: two index_element_options: - keyword: COLLATE - collation_reference: quoted_identifier: '"en-US"' - operator_class_reference: naked_identifier: opclass - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - keyword: ASC - keyword: NULLS - keyword: FIRST keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: bracketed: start_bracket: ( expression: column_reference: naked_identifier: two binary_operator: + numeric_literal: '5' end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: function: function_name: function_name_identifier: myfunc bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: function: function_name: function_name_identifier: myfunc_opclass bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) index_element_options: operator_class_reference: naked_identifier: fop relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: only_opclass index_element_options: operator_class_reference: naked_identifier: opclass keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: desc_order index_element_options: keyword: DESC keyword: WITH comparison_operator: raw_comparison_operator: '=' - end_bracket: ) - index_parameters: - keyword: INCLUDE - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: idx_num comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - definition_parameter: properties_naked_identifier: idx_str comparison_operator: raw_comparison_operator: '=' quoted_literal: "'idx_value'" - comma: ',' - definition_parameter: - properties_naked_identifier: idx_kw - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: DESC - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspc - keyword: WHERE - bracketed: start_bracket: ( expression: column_reference: naked_identifier: field comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'def'" end_bracket: ) - keyword: DEFERRABLE - keyword: NOT - keyword: VALID - keyword: INITIALLY - keyword: DEFERRED - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_trigger.sql000066400000000000000000000004721451700765000252140ustar00rootroot00000000000000ALTER TRIGGER foo ON table_name RENAME TO new_name; ALTER TRIGGER foo ON table_name DEPENDS ON EXTENSION extension_name; ALTER TRIGGER foo ON table_name NO DEPENDS ON EXTENSION extension_name; ALTER TRIGGER emp_stamp ON emp RENAME TO emp_track_chgs; ALTER TRIGGER emp_stamp ON emp DEPENDS ON EXTENSION emplib; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_trigger.yml000066400000000000000000000042331451700765000252150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13e3b4d4c698c500750e8957a91ac71d8ee8d711d531847e9b197400f95fd866 file: - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: table_name - keyword: RENAME - keyword: TO - trigger_reference: naked_identifier: new_name - statement_terminator: ; - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: table_name - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: extension_name - statement_terminator: ; - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: table_name - keyword: 'NO' - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: extension_name - statement_terminator: ; - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: emp_stamp - keyword: 'ON' - table_reference: naked_identifier: emp - keyword: RENAME - keyword: TO - trigger_reference: naked_identifier: emp_track_chgs - statement_terminator: ; - statement: alter_trigger: - keyword: ALTER - keyword: TRIGGER - trigger_reference: naked_identifier: emp_stamp - keyword: 'ON' - table_reference: naked_identifier: emp - keyword: DEPENDS - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: emplib - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_type.sql000066400000000000000000000011421451700765000245250ustar00rootroot00000000000000-- https://www.postgresql.org/docs/current/sql-altertype.html ALTER TYPE foo RENAME TO bar; ALTER TYPE foo OWNER TO CURRENT_USER; ALTER TYPE foo OWNER TO new_owner; ALTER TYPE foo SET SCHEMA new_schema; ALTER TYPE compfoo ADD ATTRIBUTE f3 int, DROP ATTRIBUTE IF EXISTS f4, ALTER ATTRIBUTE f5 TYPE int; ALTER TYPE compfoo RENAME ATTRIBUTE f6 TO f7; ALTER TYPE colors ADD VALUE 'orange' AFTER 'red'; ALTER TYPE foo ADD VALUE 'baz'; ALTER TYPE foo ADD VALUE 'qux' BEFORE 'baz'; ALTER TYPE foo ADD VALUE 'quux' AFTER 'baz'; ALTER TYPE financial.reporting_statuses RENAME VALUE 'partially' TO 'partially-reported'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_type.yml000066400000000000000000000073401451700765000245350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 020ef3781f2ea1fb9fb536740624bacbc93a67b65d6a5b5380a2699a6b0aa8cc file: - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: RENAME - keyword: TO - object_reference: naked_identifier: bar - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - keyword: CURRENT_USER - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: OWNER - keyword: TO - object_reference: naked_identifier: new_owner - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: compfoo - keyword: ADD - keyword: ATTRIBUTE - column_reference: naked_identifier: f3 - data_type: keyword: int - comma: ',' - keyword: DROP - keyword: ATTRIBUTE - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: f4 - comma: ',' - keyword: ALTER - keyword: ATTRIBUTE - column_reference: naked_identifier: f5 - keyword: TYPE - data_type: keyword: int - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: compfoo - keyword: RENAME - keyword: ATTRIBUTE - column_reference: naked_identifier: f6 - keyword: TO - column_reference: naked_identifier: f7 - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: colors - keyword: ADD - keyword: VALUE - quoted_literal: "'orange'" - keyword: AFTER - quoted_literal: "'red'" - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: ADD - keyword: VALUE - quoted_literal: "'baz'" - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: ADD - keyword: VALUE - quoted_literal: "'qux'" - keyword: BEFORE - quoted_literal: "'baz'" - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: naked_identifier: foo - keyword: ADD - keyword: VALUE - quoted_literal: "'quux'" - keyword: AFTER - quoted_literal: "'baz'" - statement_terminator: ; - statement: alter_type_statement: - keyword: ALTER - keyword: TYPE - object_reference: - naked_identifier: financial - dot: . - naked_identifier: reporting_statuses - keyword: RENAME - keyword: VALUE - quoted_literal: "'partially'" - keyword: TO - quoted_literal: "'partially-reported'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_view.sql000066400000000000000000000053061451700765000245240ustar00rootroot00000000000000-- issue:2094 ALTER VIEW myview ALTER date_column SET DEFAULT NOW(); ALTER VIEW myview ALTER int_column SET DEFAULT 1; ALTER VIEW myview ALTER text_column SET DEFAULT 'value'; ALTER VIEW myview ALTER bool_column SET DEFAULT false; ALTER VIEW myview ALTER other_column SET DEFAULT other_value; ALTER VIEW myview ALTER other_column SET DEFAULT CURRENT_TIMESTAMP; ALTER VIEW myview ALTER other_column SET DEFAULT a_function(a_parameter); ALTER VIEW myview ALTER other_column SET DEFAULT a_function('a_parameter'); ALTER VIEW myview ALTER other_column DROP DEFAULT; ALTER VIEW IF EXISTS myview ALTER date_column SET DEFAULT NOW(); ALTER VIEW IF EXISTS myview ALTER int_column SET DEFAULT 1; ALTER VIEW IF EXISTS myview ALTER text_column SET DEFAULT 'value'; ALTER VIEW IF EXISTS myview ALTER bool_column SET DEFAULT false; ALTER VIEW IF EXISTS myview ALTER other_column SET DEFAULT other_value; ALTER VIEW IF EXISTS myview ALTER other_column SET DEFAULT CURRENT_TIMESTAMP; ALTER VIEW IF EXISTS myview ALTER other_column SET DEFAULT a_function(a_parameter); ALTER VIEW IF EXISTS myview ALTER other_column SET DEFAULT a_function('a_parameter'); ALTER VIEW IF EXISTS myview ALTER other_column DROP DEFAULT; ALTER VIEW myview OWNER TO baz_role; ALTER VIEW myview OWNER TO "baz-role"; ALTER VIEW myview OWNER TO CURRENT_ROLE; ALTER VIEW myview OWNER TO CURRENT_USER; ALTER VIEW myview OWNER TO SESSION_USER; ALTER VIEW IF EXISTS myview OWNER TO baz_role; ALTER VIEW IF EXISTS myview OWNER TO "baz-role"; ALTER VIEW IF EXISTS myview OWNER TO CURRENT_ROLE; ALTER VIEW IF EXISTS myview OWNER TO CURRENT_USER; ALTER VIEW IF EXISTS myview OWNER TO SESSION_USER; ALTER VIEW myview RENAME column_name TO new_column_name; ALTER VIEW myview RENAME COLUMN column_name TO new_column_name; ALTER VIEW IF EXISTS myview RENAME column_name TO new_column_name; ALTER VIEW IF EXISTS myview RENAME COLUMN column_name TO new_column_name; ALTER VIEW myview RENAME TO new_name; ALTER VIEW IF EXISTS myview RENAME TO new_name; ALTER VIEW myview SET SCHEMA new_schema; ALTER VIEW IF EXISTS myview SET SCHEMA new_schema; ALTER VIEW myview SET ( view_option_name ); ALTER VIEW myview SET ( view_option_name = 1); ALTER VIEW myview SET ( view_option_name = 1, view_option_name2 = 'value', view_option_name3, view_option_name4 = false); ALTER VIEW IF EXISTS myview SET ( view_option_name ); ALTER VIEW IF EXISTS myview SET ( view_option_name = 1); ALTER VIEW IF EXISTS myview SET ( view_option_name, view_option_name2 = 1, view_option_name3); ALTER VIEW myview RESET ( view_option_name ); ALTER VIEW myview RESET ( view_option_name, view_option_name2 ); ALTER VIEW IF EXISTS myview RESET ( view_option_name ); ALTER VIEW IF EXISTS myview RESET ( view_option_name, view_option_name2 ); sqlfluff-2.3.5/test/fixtures/dialects/postgres/alter_view.yml000066400000000000000000000407651451700765000245360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c35328dd8b5e4defd9cc728caaa122305f9c5317e2ccfaf00b4446b8f12ff409 file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: date_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: NOW bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: int_column - keyword: SET - keyword: DEFAULT - numeric_literal: '1' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: text_column - keyword: SET - keyword: DEFAULT - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: bool_column - keyword: SET - keyword: DEFAULT - boolean_literal: 'false' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: column_reference: naked_identifier: other_value - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function bracketed: start_bracket: ( expression: column_reference: naked_identifier: a_parameter end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function bracketed: start_bracket: ( expression: quoted_literal: "'a_parameter'" end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: date_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: NOW bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: int_column - keyword: SET - keyword: DEFAULT - numeric_literal: '1' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: text_column - keyword: SET - keyword: DEFAULT - quoted_literal: "'value'" - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: bool_column - keyword: SET - keyword: DEFAULT - boolean_literal: 'false' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - expression: column_reference: naked_identifier: other_value - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - bare_function: CURRENT_TIMESTAMP - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function bracketed: start_bracket: ( expression: column_reference: naked_identifier: a_parameter end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: SET - keyword: DEFAULT - function: function_name: function_name_identifier: a_function bracketed: start_bracket: ( expression: quoted_literal: "'a_parameter'" end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: ALTER - column_reference: naked_identifier: other_column - keyword: DROP - keyword: DEFAULT - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: naked_identifier: baz_role - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: quoted_identifier: '"baz-role"' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: naked_identifier: CURRENT_USER - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: naked_identifier: baz_role - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: quoted_identifier: '"baz-role"' - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - object_reference: naked_identifier: CURRENT_USER - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: OWNER - keyword: TO - keyword: SESSION_USER - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RENAME - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RENAME - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: column_name - keyword: TO - column_reference: naked_identifier: new_column_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RENAME - keyword: TO - table_reference: naked_identifier: new_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RENAME - keyword: TO - table_reference: naked_identifier: new_name - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: SET - keyword: SCHEMA - schema_reference: naked_identifier: new_schema - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: SET - bracketed: start_bracket: ( parameter: view_option_name end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: SET - bracketed: start_bracket: ( parameter: view_option_name comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: SET - bracketed: - start_bracket: ( - parameter: view_option_name - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - parameter: view_option_name2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value'" - comma: ',' - parameter: view_option_name3 - comma: ',' - parameter: view_option_name4 - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: SET - bracketed: start_bracket: ( parameter: view_option_name end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: SET - bracketed: start_bracket: ( parameter: view_option_name comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: SET - bracketed: - start_bracket: ( - parameter: view_option_name - comma: ',' - parameter: view_option_name2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - parameter: view_option_name3 - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RESET - bracketed: start_bracket: ( parameter: view_option_name end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: myview - keyword: RESET - bracketed: - start_bracket: ( - parameter: view_option_name - comma: ',' - parameter: view_option_name2 - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RESET - bracketed: start_bracket: ( parameter: view_option_name end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: myview - keyword: RESET - bracketed: - start_bracket: ( - parameter: view_option_name - comma: ',' - parameter: view_option_name2 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/analyze.sql000066400000000000000000000003761451700765000240300ustar00rootroot00000000000000ANALYZE foo; ANALYSE foo; ANALYZE VERBOSE foo; ANALYZE (VERBOSE, SKIP_LOCKED) foo; ANALYZE (VERBOSE FALSE, SKIP_LOCKED TRUE) foo; ANALYZE (SKIP_LOCKED, VERBOSE FALSE) foo; ANALYZE VERBOSE foo (bar, bat); ANALYZE foo (bar, bat), foo2 (bar2, bat2); sqlfluff-2.3.5/test/fixtures/dialects/postgres/analyze.yml000066400000000000000000000052511451700765000240270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6b6c8641a16f20ac8898a51c1dd2ff9b1f1c79eef7aaac5cf250133ffb09727 file: - statement: analyze_statement: keyword: ANALYZE table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: keyword: ANALYSE table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: - keyword: ANALYZE - keyword: VERBOSE - table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: keyword: ANALYZE bracketed: - start_bracket: ( - keyword: VERBOSE - comma: ',' - keyword: SKIP_LOCKED - end_bracket: ) table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: keyword: ANALYZE bracketed: - start_bracket: ( - keyword: VERBOSE - boolean_literal: 'FALSE' - comma: ',' - keyword: SKIP_LOCKED - boolean_literal: 'TRUE' - end_bracket: ) table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: keyword: ANALYZE bracketed: - start_bracket: ( - keyword: SKIP_LOCKED - comma: ',' - keyword: VERBOSE - boolean_literal: 'FALSE' - end_bracket: ) table_reference: naked_identifier: foo - statement_terminator: ; - statement: analyze_statement: - keyword: ANALYZE - keyword: VERBOSE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: bat - end_bracket: ) - statement_terminator: ; - statement: analyze_statement: - keyword: ANALYZE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: bat - end_bracket: ) - comma: ',' - table_reference: naked_identifier: foo2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar2 - comma: ',' - column_reference: naked_identifier: bat2 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/array.sql000066400000000000000000000034161451700765000235010ustar00rootroot00000000000000SELECT ARRAY[1,2] || ARRAY[3,4]; SELECT ARRAY[['meeting', 'lunch'], ['training', 'presentation']]; CREATE TABLE sal_emp ( name text, pay_by_quarter integer[], schedule text[][] ); CREATE TABLE tictactoe ( squares integer[3][3] ); SELECT * FROM sal_emp WHERE pay_by_quarter[1] = 10000 OR pay_by_quarter[2] = 10000 OR pay_by_quarter[3] = 10000 OR pay_by_quarter[4] = 10000; INSERT INTO sal_emp VALUES ('Bill', ARRAY[10000, 10000, 10000, 10000], ARRAY[['meeting', 'lunch'], ['training', 'presentation']]); INSERT INTO sal_emp VALUES ('Carol', ARRAY[20000, 25000, 25000, 25000], ARRAY[['breakfast', 'consulting'], ['meeting', 'lunch']]); SELECT name FROM sal_emp WHERE pay_by_quarter[1] <> pay_by_quarter[2]; SELECT schedule[1:2][1:1] FROM sal_emp WHERE name = 'Bill'; UPDATE sal_emp SET pay_by_quarter[4] = 15000 WHERE name = 'Bill'; UPDATE sal_emp SET pay_by_quarter[1:2] = '{27000,27000}' WHERE name = 'Carol'; SELECT array_dims(ARRAY[1,2] || ARRAY[3,4,5]); SELECT array_dims(ARRAY[1,2] || ARRAY[[3,4],[5,6]]); SELECT ARRAY[1, 2] || '{3, 4}'; SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], 'mon'); SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2 FROM (SELECT '[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}'::int[] AS f1) AS ss; SELECT '{Hello,World}'::_text AS text_array; SELECT ARRAY['A', 'B', 'C']::_TEXT; SELECT SUM(CASE WHEN direction = 'forward' THEN unit ELSE 0 END ) * (MAX(ARRAY[id, vertical]))[2] FROM direction_with_vertical_change; -- More advanced cases with expressions and missing slice start/end when accessing SELECT a[:], b[:1], c[2:], d[2:3]; SELECT a[1+2:3+4], b[5+6]; sqlfluff-2.3.5/test/fixtures/dialects/postgres/array.yml000066400000000000000000000602721451700765000235060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7b1770913ce03e03d4c8a832f905f970d6d1bf3c6090f1876d7a5ec516696e0e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - binary_operator: - pipe: '|' - pipe: '|' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - quoted_literal: "'meeting'" - comma: ',' - quoted_literal: "'lunch'" - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - quoted_literal: "'training'" - comma: ',' - quoted_literal: "'presentation'" - end_square_bracket: ']' - end_square_bracket: ']' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: sal_emp - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - data_type: keyword: text - comma: ',' - column_reference: naked_identifier: pay_by_quarter - data_type: keyword: integer start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: schedule - data_type: - keyword: text - start_square_bracket: '[' - end_square_bracket: ']' - start_square_bracket: '[' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tictactoe - bracketed: start_bracket: ( column_reference: naked_identifier: squares data_type: - keyword: integer - start_square_bracket: '[' - expression: numeric_literal: '3' - end_square_bracket: ']' - start_square_bracket: '[' - expression: numeric_literal: '3' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sal_emp where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - binary_operator: OR - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - binary_operator: OR - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '3' end_square_bracket: ']' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - binary_operator: OR - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10000' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: sal_emp - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Bill'" - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '10000' - comma: ',' - numeric_literal: '10000' - comma: ',' - numeric_literal: '10000' - comma: ',' - numeric_literal: '10000' - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - quoted_literal: "'meeting'" - comma: ',' - quoted_literal: "'lunch'" - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - quoted_literal: "'training'" - comma: ',' - quoted_literal: "'presentation'" - end_square_bracket: ']' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: sal_emp - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Carol'" - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '20000' - comma: ',' - numeric_literal: '25000' - comma: ',' - numeric_literal: '25000' - comma: ',' - numeric_literal: '25000' - end_square_bracket: ']' - comma: ',' - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - quoted_literal: "'breakfast'" - comma: ',' - quoted_literal: "'consulting'" - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - quoted_literal: "'meeting'" - comma: ',' - quoted_literal: "'lunch'" - end_square_bracket: ']' - end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sal_emp where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - column_reference: naked_identifier: pay_by_quarter - array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: schedule - array_accessor: - start_square_bracket: '[' - numeric_literal: '1' - slice: ':' - numeric_literal: '2' - end_square_bracket: ']' - array_accessor: - start_square_bracket: '[' - numeric_literal: '1' - slice: ':' - numeric_literal: '1' - end_square_bracket: ']' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sal_emp where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Bill'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: sal_emp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: pay_by_quarter array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' comparison_operator: raw_comparison_operator: '=' numeric_literal: '15000' where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Bill'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: sal_emp set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: pay_by_quarter array_accessor: - start_square_bracket: '[' - numeric_literal: '1' - slice: ':' - numeric_literal: '2' - end_square_bracket: ']' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'{27000,27000}'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Carol'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: array_dims bracketed: start_bracket: ( expression: - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - binary_operator: - pipe: '|' - pipe: '|' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: array_dims bracketed: start_bracket: ( expression: - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' - binary_operator: - pipe: '|' - pipe: '|' - typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - array_literal: - start_square_bracket: '[' - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_square_bracket: ']' - comma: ',' - array_literal: - start_square_bracket: '[' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_square_bracket: ']' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_square_bracket: ']' binary_operator: - pipe: '|' - pipe: '|' quoted_literal: "'{3, 4}'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: array_position bracketed: - start_bracket: ( - expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'sun'" - comma: ',' - quoted_literal: "'mon'" - comma: ',' - quoted_literal: "'tue'" - comma: ',' - quoted_literal: "'wed'" - comma: ',' - quoted_literal: "'thu'" - comma: ',' - quoted_literal: "'fri'" - comma: ',' - quoted_literal: "'sat'" - end_square_bracket: ']' - comma: ',' - expression: quoted_literal: "'mon'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - column_reference: naked_identifier: f1 - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: sign_indicator: '-' numeric_literal: '2' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '3' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: e1 - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: f1 - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: sign_indicator: '-' numeric_literal: '1' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '5' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: e2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}'" casting_operator: '::' data_type: keyword: int start_square_bracket: '[' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: f1 end_bracket: ) alias_expression: keyword: AS naked_identifier: ss - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{Hello,World}'" casting_operator: '::' data_type: data_type_identifier: _text alias_expression: keyword: AS naked_identifier: text_array - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - quoted_literal: "'A'" - comma: ',' - quoted_literal: "'B'" - comma: ',' - quoted_literal: "'C'" - end_square_bracket: ']' casting_operator: '::' data_type: data_type_identifier: _TEXT - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: direction comparison_operator: raw_comparison_operator: '=' quoted_literal: "'forward'" - keyword: THEN - expression: column_reference: naked_identifier: unit - else_clause: keyword: ELSE expression: numeric_literal: '0' - keyword: END end_bracket: ) binary_operator: '*' bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: MAX bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: ARRAY array_literal: - start_square_bracket: '[' - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: vertical - end_square_bracket: ']' end_bracket: ) end_bracket: ) array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: direction_with_vertical_change - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: a array_accessor: start_square_bracket: '[' slice: ':' end_square_bracket: ']' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: b array_accessor: start_square_bracket: '[' slice: ':' numeric_literal: '1' end_square_bracket: ']' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: c array_accessor: start_square_bracket: '[' numeric_literal: '2' slice: ':' end_square_bracket: ']' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: d array_accessor: - start_square_bracket: '[' - numeric_literal: '2' - slice: ':' - numeric_literal: '3' - end_square_bracket: ']' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: a array_accessor: - start_square_bracket: '[' - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - slice: ':' - expression: - numeric_literal: '3' - binary_operator: + - numeric_literal: '4' - end_square_bracket: ']' - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: b array_accessor: start_square_bracket: '[' expression: - numeric_literal: '5' - binary_operator: + - numeric_literal: '6' end_square_bracket: ']' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/bare_functions.sql000066400000000000000000000002041451700765000253540ustar00rootroot00000000000000SELECT current_date AS col1, current_timestamp AS col2, current_time as col3, localtime as col4, localtimestamp as col5 ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/bare_functions.yml000066400000000000000000000025201451700765000253610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 210d2e71599eaff34740b685cb392e0267bbd3ab09cf0c772df8d8388a79c123 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: current_date alias_expression: keyword: AS naked_identifier: col1 - comma: ',' - select_clause_element: bare_function: current_timestamp alias_expression: keyword: AS naked_identifier: col2 - comma: ',' - select_clause_element: bare_function: current_time alias_expression: keyword: as naked_identifier: col3 - comma: ',' - select_clause_element: bare_function: localtime alias_expression: keyword: as naked_identifier: col4 - comma: ',' - select_clause_element: bare_function: localtimestamp alias_expression: keyword: as naked_identifier: col5 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/call.sql000066400000000000000000000003071451700765000232720ustar00rootroot00000000000000CALL do_db_maintenance(); CALL my_schema.do_db_maintenance(); call procedure_name(); call procedure_name('param1', 123); call schema.procedure_name(); call schema.procedure_name('param1', 123); sqlfluff-2.3.5/test/fixtures/dialects/postgres/call.yml000066400000000000000000000043451451700765000233020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b12374e2b1d93458057b2bea73d70f3a3b0439b0b09dbbe7ef34c4d4f594dec5 file: - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: do_db_maintenance bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: naked_identifier: my_schema dot: . function_name_identifier: do_db_maintenance bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: function_name_identifier: procedure_name bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: function_name_identifier: procedure_name bracketed: - start_bracket: ( - expression: quoted_literal: "'param1'" - comma: ',' - expression: numeric_literal: '123' - end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: naked_identifier: schema dot: . function_name_identifier: procedure_name bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: naked_identifier: schema dot: . function_name_identifier: procedure_name bracketed: - start_bracket: ( - expression: quoted_literal: "'param1'" - comma: ',' - expression: numeric_literal: '123' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/cast_with_whitespaces.sql000066400000000000000000000021601451700765000267420ustar00rootroot00000000000000-- postgres_cast_with_whitespaces.sql /* Several valid queries where there is whitespace surrounding the Postgres cast operator (::) */ -- query from https://github.com/sqlfluff/sqlfluff/issues/2720 SELECT amount_of_honey :: FLOAT FROM bear_inventory; -- should be able to support an arbitrary amount of whitespace SELECT amount_of_honey :: FLOAT FROM bear_inventory; SELECT amount_of_honey:: FLOAT FROM bear_inventory; SELECT amount_of_honey ::FLOAT FROM bear_inventory; -- should support a wide variety of typecasts SELECT amount_of_honey :: time FROM bear_inventory; SELECT amount_of_honey :: text FROM bear_inventory; SELECT amount_of_honey :: VARCHAR( 512 ) FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMPTZ FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMP WITHOUT TIME ZONE FROM bear_inventory; -- should support casts with arbitrary amount of whitespace in join statements SELECT bi.amount_of_honey FROM bear_inventory bi LEFT JOIN favorite_cola fc ON fc.bear_id :: VARCHAR(512) = bi.bear_id ::VARCHAR(512) WHERE fc.favorite_cola = 'RC Cola'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/cast_with_whitespaces.yml000066400000000000000000000201111451700765000267400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8aa7f240daeacea8a3fa688ddb3fea842e1a61cbfda2ff0d93b25e749ebb126b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: text from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: bi - dot: . - naked_identifier: amount_of_honey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory alias_expression: naked_identifier: bi join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: favorite_cola alias_expression: naked_identifier: fc - join_on_condition: keyword: 'ON' expression: - cast_expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - cast_expression: column_reference: - naked_identifier: bi - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: favorite_cola comparison_operator: raw_comparison_operator: '=' quoted_literal: "'RC Cola'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/cluster.sql000066400000000000000000000003301451700765000240340ustar00rootroot00000000000000CLUSTER employees USING employees_ind; CLUSTER VERBOSE employees USING employees_ind; CLUSTER employees; CLUSTER; CLUSTER VERBOSE; CLUSTER index_name ON table_name; CLUSTER public.temp_table USING idx_temp_table_ra; sqlfluff-2.3.5/test/fixtures/dialects/postgres/cluster.yml000066400000000000000000000032641451700765000240470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0d16521927fc3186064bbef4aa28fc13853179831ee346d6cbc2186d1bb483a7 file: - statement: cluster_statement: - keyword: CLUSTER - table_reference: naked_identifier: employees - keyword: USING - index_reference: naked_identifier: employees_ind - statement_terminator: ; - statement: cluster_statement: - keyword: CLUSTER - keyword: VERBOSE - table_reference: naked_identifier: employees - keyword: USING - index_reference: naked_identifier: employees_ind - statement_terminator: ; - statement: cluster_statement: keyword: CLUSTER table_reference: naked_identifier: employees - statement_terminator: ; - statement: cluster_statement: keyword: CLUSTER - statement_terminator: ; - statement: cluster_statement: - keyword: CLUSTER - keyword: VERBOSE - statement_terminator: ; - statement: cluster_statement: - keyword: CLUSTER - index_reference: naked_identifier: index_name - keyword: 'ON' - table_reference: naked_identifier: table_name - statement_terminator: ; - statement: cluster_statement: - keyword: CLUSTER - table_reference: - naked_identifier: public - dot: . - naked_identifier: temp_table - keyword: USING - index_reference: naked_identifier: idx_temp_table_ra - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/comment_on.sql000066400000000000000000000067201451700765000245220ustar00rootroot00000000000000-- This test file includes all examples from the Postgres docs, -- but not all are implemented so some are commented out for now. -- See https://www.postgresql.org/docs/13/sql-comment.html COMMENT ON TABLE mytable IS 'This is my table.'; COMMENT ON TABLE mytable IS NULL; COMMENT ON ACCESS METHOD gin IS 'GIN index access method'; COMMENT ON AGGREGATE my_aggregate (double precision) IS 'Computes sample variance'; COMMENT ON CAST (text AS int4) IS 'Allow casts from text to int4'; COMMENT ON COLLATION "fr_CA" IS 'Canadian French'; COMMENT ON COLUMN my_table.my_column IS 'Employee ID number'; COMMENT ON CONVERSION my_conv IS 'Conversion to UTF8'; COMMENT ON CONSTRAINT bar_col_cons ON bar IS 'Constrains column col'; COMMENT ON CONSTRAINT dom_col_constr ON DOMAIN dom IS 'Constrains col of domain'; COMMENT ON DATABASE my_database IS 'Development Database'; COMMENT ON DOMAIN my_domain IS 'Email Address Domain'; COMMENT ON EVENT TRIGGER abort_ddl IS 'Aborts all DDL commands'; COMMENT ON EXTENSION hstore IS 'implements the hstore data type'; COMMENT ON FOREIGN DATA WRAPPER mywrapper IS 'my foreign data wrapper'; COMMENT ON FOREIGN TABLE my_foreign_table IS 'Employee Information in other database'; COMMENT ON FUNCTION my_function (timestamp) IS 'Returns Roman Numeral'; comment on function function1 is 'comment'; comment on function function2() is 'comment'; COMMENT ON INDEX my_index IS 'Enforces uniqueness on employee ID'; COMMENT ON LANGUAGE plpython IS 'Python support for stored procedures'; --COMMENT ON LARGE OBJECT 346344 IS 'Planning document'; COMMENT ON MATERIALIZED VIEW my_matview IS 'Summary of order history'; --COMMENT ON OPERATOR ^ (text, text) IS 'Performs intersection of two texts'; --COMMENT ON OPERATOR - (NONE, integer) IS 'Unary minus'; --COMMENT ON OPERATOR CLASS int4ops USING btree IS '4 byte integer operators for btrees'; --COMMENT ON OPERATOR FAMILY integer_ops USING btree IS 'all integer operators for btrees'; COMMENT ON POLICY my_policy ON mytable IS 'Filter rows by users'; COMMENT ON PROCEDURE my_proc (integer, integer) IS 'Runs a report'; comment on procedure procedure1 is 'comment'; comment on procedure procedure2() is 'comment'; COMMENT ON PUBLICATION alltables IS 'Publishes all operations on all tables'; COMMENT ON ROLE my_role IS 'Administration group for finance tables'; COMMENT ON ROUTINE my_routine (integer, integer) IS 'Runs a routine (which is a function or procedure)'; COMMENT ON RULE my_rule ON my_table IS 'Logs updates of employee records'; COMMENT ON SCHEMA my_schema IS 'Departmental data'; COMMENT ON SEQUENCE my_sequence IS 'Used to generate primary keys'; COMMENT ON SERVER myserver IS 'my foreign server'; COMMENT ON STATISTICS my_statistics IS 'Improves planner row estimations'; COMMENT ON SUBSCRIPTION alltables IS 'Subscription for all operations on all tables'; COMMENT ON TABLE my_schema.my_table IS 'Employee Information'; COMMENT ON TABLESPACE my_tablespace IS 'Tablespace for indexes'; COMMENT ON TEXT SEARCH CONFIGURATION my_config IS 'Special word filtering'; COMMENT ON TEXT SEARCH DICTIONARY swedish IS 'Snowball stemmer for Swedish language'; COMMENT ON TEXT SEARCH PARSER my_parser IS 'Splits text into words'; COMMENT ON TEXT SEARCH TEMPLATE snowball IS 'Snowball stemmer'; --COMMENT ON TRANSFORM FOR hstore LANGUAGE plpythonu IS 'Transform between hstore and Python dict'; COMMENT ON TRIGGER my_trigger ON my_table IS 'Used for RI'; COMMENT ON TYPE complex IS 'Complex number data type'; COMMENT ON VIEW my_view IS 'View of departmental costs'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/comment_on.yml000066400000000000000000000316771451700765000245350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f823452116c615d2ddd804b34b5f94d8128f3f47c950a45f74202f50f065bab file: - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: mytable - keyword: IS - quoted_literal: "'This is my table.'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: mytable - keyword: IS - keyword: 'NULL' - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: ACCESS - keyword: METHOD - object_reference: naked_identifier: gin - keyword: IS - quoted_literal: "'GIN index access method'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: AGGREGATE - object_reference: naked_identifier: my_aggregate - bracketed: - start_bracket: ( - word: double - word: precision - end_bracket: ) - keyword: IS - quoted_literal: "'Computes sample variance'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CAST - bracketed: - start_bracket: ( - object_reference: naked_identifier: text - keyword: AS - object_reference: naked_identifier: int4 - end_bracket: ) - keyword: IS - quoted_literal: "'Allow casts from text to int4'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLLATION - object_reference: quoted_identifier: '"fr_CA"' - keyword: IS - quoted_literal: "'Canadian French'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: COLUMN - column_reference: - naked_identifier: my_table - dot: . - naked_identifier: my_column - keyword: IS - quoted_literal: "'Employee ID number'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONVERSION - object_reference: naked_identifier: my_conv - keyword: IS - quoted_literal: "'Conversion to UTF8'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONSTRAINT - object_reference: naked_identifier: bar_col_cons - keyword: 'ON' - object_reference: naked_identifier: bar - keyword: IS - quoted_literal: "'Constrains column col'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: CONSTRAINT - object_reference: naked_identifier: dom_col_constr - keyword: 'ON' - keyword: DOMAIN - object_reference: naked_identifier: dom - keyword: IS - quoted_literal: "'Constrains col of domain'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: DATABASE - database_reference: naked_identifier: my_database - keyword: IS - quoted_literal: "'Development Database'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: DOMAIN - object_reference: naked_identifier: my_domain - keyword: IS - quoted_literal: "'Email Address Domain'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: EVENT - keyword: TRIGGER - object_reference: naked_identifier: abort_ddl - keyword: IS - quoted_literal: "'Aborts all DDL commands'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: EXTENSION - extension_reference: naked_identifier: hstore - keyword: IS - quoted_literal: "'implements the hstore data type'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: mywrapper - keyword: IS - quoted_literal: "'my foreign data wrapper'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: FOREIGN - keyword: TABLE - object_reference: naked_identifier: my_foreign_table - keyword: IS - quoted_literal: "'Employee Information in other database'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: my_function - function_parameter_list: bracketed: start_bracket: ( data_type: datetime_type_identifier: keyword: timestamp end_bracket: ) - keyword: IS - quoted_literal: "'Returns Roman Numeral'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: function - function_name: function_name_identifier: function1 - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: function - function_name: function_name_identifier: function2 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: INDEX - index_reference: naked_identifier: my_index - keyword: IS - quoted_literal: "'Enforces uniqueness on employee ID'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: LANGUAGE - object_reference: naked_identifier: plpython - keyword: IS - quoted_literal: "'Python support for stored procedures'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: MATERIALIZED - keyword: VIEW - object_reference: naked_identifier: my_matview - keyword: IS - quoted_literal: "'Summary of order history'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: POLICY - object_reference: naked_identifier: my_policy - keyword: 'ON' - object_reference: naked_identifier: mytable - keyword: IS - quoted_literal: "'Filter rows by users'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: PROCEDURE - object_reference: naked_identifier: my_proc - bracketed: - start_bracket: ( - word: integer - comma: ',' - word: integer - end_bracket: ) - keyword: IS - quoted_literal: "'Runs a report'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: procedure - object_reference: naked_identifier: procedure1 - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: procedure - object_reference: naked_identifier: procedure2 - bracketed: start_bracket: ( end_bracket: ) - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: PUBLICATION - object_reference: naked_identifier: alltables - keyword: IS - quoted_literal: "'Publishes all operations on all tables'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: ROLE - object_reference: naked_identifier: my_role - keyword: IS - quoted_literal: "'Administration group for finance tables'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: ROUTINE - object_reference: naked_identifier: my_routine - bracketed: - start_bracket: ( - word: integer - comma: ',' - word: integer - end_bracket: ) - keyword: IS - quoted_literal: "'Runs a routine (which is a function or procedure)'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: RULE - object_reference: naked_identifier: my_rule - keyword: 'ON' - object_reference: naked_identifier: my_table - keyword: IS - quoted_literal: "'Logs updates of employee records'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - keyword: IS - quoted_literal: "'Departmental data'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SEQUENCE - object_reference: naked_identifier: my_sequence - keyword: IS - quoted_literal: "'Used to generate primary keys'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SERVER - object_reference: naked_identifier: myserver - keyword: IS - quoted_literal: "'my foreign server'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: STATISTICS - object_reference: naked_identifier: my_statistics - keyword: IS - quoted_literal: "'Improves planner row estimations'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: SUBSCRIPTION - object_reference: naked_identifier: alltables - keyword: IS - quoted_literal: "'Subscription for all operations on all tables'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: IS - quoted_literal: "'Employee Information'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TABLESPACE - object_reference: naked_identifier: my_tablespace - keyword: IS - quoted_literal: "'Tablespace for indexes'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TEXT - keyword: SEARCH - keyword: CONFIGURATION - object_reference: naked_identifier: my_config - keyword: IS - quoted_literal: "'Special word filtering'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TEXT - keyword: SEARCH - keyword: DICTIONARY - object_reference: naked_identifier: swedish - keyword: IS - quoted_literal: "'Snowball stemmer for Swedish language'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TEXT - keyword: SEARCH - keyword: PARSER - object_reference: naked_identifier: my_parser - keyword: IS - quoted_literal: "'Splits text into words'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TEXT - keyword: SEARCH - keyword: TEMPLATE - object_reference: naked_identifier: snowball - keyword: IS - quoted_literal: "'Snowball stemmer'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TRIGGER - object_reference: naked_identifier: my_trigger - keyword: 'ON' - object_reference: naked_identifier: my_table - keyword: IS - quoted_literal: "'Used for RI'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: TYPE - object_reference: naked_identifier: complex - keyword: IS - quoted_literal: "'Complex number data type'" - statement_terminator: ; - statement: comment_clause: - keyword: COMMENT - keyword: 'ON' - keyword: VIEW - table_reference: naked_identifier: my_view - keyword: IS - quoted_literal: "'View of departmental costs'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/composite_types.sql000066400000000000000000000001711451700765000256040ustar00rootroot00000000000000CREATE TYPE my_type AS ( int_ INT4, bool_ BOOLEAN, comment_ TEXT ); SELECT ((1, true, null)::my_type).int_; sqlfluff-2.3.5/test/fixtures/dialects/postgres/composite_types.yml000066400000000000000000000031131451700765000256050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2953cebbe0daec9dac94aafed93a8feae0f0c153bf399f8956e7fa5cc36fca69 file: - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: my_type - keyword: AS - bracketed: - start_bracket: ( - word: int_ - word: INT4 - comma: ',' - word: bool_ - word: BOOLEAN - comma: ',' - word: comment_ - word: TEXT - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( expression: cast_expression: bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - boolean_literal: 'true' - comma: ',' - null_literal: 'null' - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: my_type end_bracket: ) semi_structured_expression: dot: . naked_identifier: int_ - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/copy.sql000066400000000000000000000046251451700765000233400ustar00rootroot00000000000000-- Issue #2480 COPY (Select my_col From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, HEADER, DELIMITER '#', ENCODING 'UTF8'); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, HEADER, DELIMITER '#', NULL 'null', QUOTE '"'); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FREEZE true); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_QUOTE (col1, col2)); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_QUOTE *); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_NOT_NULL (col1, col2)); COPY (Select * From my_table) TO '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_NULL (col1, col2), FREEZE false); COPY (Select * From my_table) TO STDOUT WITH (FORMAT csv, ESCAPE '\', FORCE_NULL (col1, col2), FREEZE true); COPY (Select * From my_table) TO PROGRAM '/path/to/script' WITH (FORMAT csv, ESCAPE '\', FORCE_NULL (col1, col2), FREEZE false); COPY my_table(col) TO '/tmp/dump.csv'; COPY my_table TO '/tmp/dump.csv' WITH (FORMAT csv, HEADER true, FREEZE true, FORCE_NULL (col1, col2)); COPY my_table(col1, col2) TO '/tmp/dump.csv' WITH (FORMAT csv, HEADER true); COPY my_table(col1, col2, col3, col4) TO PROGRAM '/path/to/script' WITH (FORMAT csv, HEADER true, FREEZE); COPY my_table(col1, col2) TO STDOUT; COPY my_table(col1, col2) TO STDOUT WITH (FORMAT csv, HEADER true, FREEZE false); COPY my_table TO STDOUT WITH (FORMAT csv, HEADER true, FREEZE true, FORCE_NULL (col1, col2)); COPY my_table FROM '/tmp/dump.csv'; COPY my_table FROM STDIN; COPY my_table FROM PROGRAM '/path/to/script'; COPY my_table(col) FROM '/tmp/dump.csv'; COPY my_table(col1, col2, col3) FROM STDIN; COPY my_table(col1, col2) FROM PROGRAM '/path/to/script'; COPY my_table(col1, col2,col3, col4) FROM PROGRAM '/path/to/script' WITH (FORMAT csv, HEADER true, FREEZE true, FORCE_NULL (col1, col2)); COPY my_table(col1, col2,col3, col4) FROM '/tmp/dump.csv' WITH (FORMAT csv, ESCAPE '\', FORCE_QUOTE *); COPY my_table FROM STDIN WITH (FORMAT csv, HEADER, DELIMITER '#', ENCODING 'UTF8'); COPY my_table FROM STDIN WITH (FORMAT csv, ESCAPE '\', FORCE_NULL (col1, col2), FREEZE true); COPY my_table FROM STDIN WITH (FORMAT csv, HEADER, DELIMITER '#', NULL 'null', QUOTE '"', FORCE_QUOTE *); COPY my_table FROM STDIN WITH (FORMAT csv, HEADER, DELIMITER '#', NULL 'null', QUOTE '"', FORCE_QUOTE *) WHERE col1 = 'some_value'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/copy.yml000066400000000000000000000507361451700765000233460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3de0d78af5228e7c6d59d59beb0758b0cc80503ed69bfb610d20aa1f7931b002 file: - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: column_reference: naked_identifier: my_col from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: ENCODING - quoted_literal: "'UTF8'" - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: 'NULL' - quoted_literal: "'null'" - comma: ',' - keyword: QUOTE - quoted_literal: "'\"'" - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_QUOTE - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_QUOTE - star: '*' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NOT_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: FREEZE - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: From from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - keyword: TO - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: FREEZE - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - comma: ',' - column_reference: naked_identifier: col4 - end_bracket: ) - keyword: TO - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: TO - keyword: STDOUT - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: col end_bracket: ) - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - end_bracket: ) - keyword: FROM - keyword: STDIN - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: FROM - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - comma: ',' - column_reference: naked_identifier: col4 - end_bracket: ) - keyword: FROM - keyword: PROGRAM - quoted_literal: "'/path/to/script'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - boolean_literal: 'true' - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - comma: ',' - column_reference: naked_identifier: col4 - end_bracket: ) - keyword: FROM - quoted_literal: "'/tmp/dump.csv'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_QUOTE - star: '*' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: ENCODING - quoted_literal: "'UTF8'" - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: ESCAPE - quoted_literal: "'\\'" - comma: ',' - keyword: FORCE_NULL - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - keyword: FREEZE - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: 'NULL' - quoted_literal: "'null'" - comma: ',' - keyword: QUOTE - quoted_literal: "'\"'" - comma: ',' - keyword: FORCE_QUOTE - star: '*' - end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: my_table - keyword: FROM - keyword: STDIN - keyword: WITH - bracketed: - start_bracket: ( - keyword: FORMAT - naked_identifier: csv - comma: ',' - keyword: HEADER - comma: ',' - keyword: DELIMITER - quoted_literal: "'#'" - comma: ',' - keyword: 'NULL' - quoted_literal: "'null'" - comma: ',' - keyword: QUOTE - quoted_literal: "'\"'" - comma: ',' - keyword: FORCE_QUOTE - star: '*' - end_bracket: ) - keyword: WHERE - expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'some_value'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_cast.sql000066400000000000000000000017161451700765000246410ustar00rootroot00000000000000CREATE CAST (int AS bool) WITH FUNCTION fname; CREATE CAST (int AS bool) WITH FUNCTION fname AS ASSIGNMENT; CREATE CAST (int AS bool) WITH FUNCTION fname AS IMPLICIT; CREATE CAST (int AS bool) WITH FUNCTION fname(); CREATE CAST (int AS bool) WITH FUNCTION fname() AS ASSIGNMENT; CREATE CAST (int AS bool) WITH FUNCTION fname() AS IMPLICIT; CREATE CAST (int AS bool) WITH FUNCTION fname(bool); CREATE CAST (int AS bool) WITH FUNCTION sch.fname(int, bool) AS ASSIGNMENT; CREATE CAST (udt_1 AS udt_2) WITH FUNCTION fname(udt_1, udt_2); CREATE CAST (sch.udt_1 AS sch.udt_2) WITH FUNCTION sch.fname(sch.udt_1, sch.udt_2); -- PG extension for not listing an actual function: CREATE CAST (int AS bool) WITHOUT FUNCTION; CREATE CAST (int AS bool) WITHOUT FUNCTION AS ASSIGNMENT; CREATE CAST (int AS bool) WITHOUT FUNCTION AS IMPLICIT; CREATE CAST (int AS bool) WITH INOUT; CREATE CAST (int AS bool) WITH INOUT AS ASSIGNMENT; CREATE CAST (int AS bool) WITH INOUT AS IMPLICIT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_cast.yml000066400000000000000000000175711451700765000246510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9e34d987801d78938da48d706596e4179445070e27a9b8e4a6146a28543d4c37 file: - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - keyword: AS - keyword: IMPLICIT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: AS - keyword: IMPLICIT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: bool end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: naked_identifier: sch dot: . function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: int - comma: ',' - data_type: keyword: bool - end_bracket: ) - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - keyword: AS - data_type: data_type_identifier: udt_2 - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - comma: ',' - data_type: data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - keyword: AS - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - keyword: WITH - keyword: FUNCTION - function_name: naked_identifier: sch dot: . function_name_identifier: fname - function_parameter_list: bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - comma: ',' - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITHOUT - keyword: FUNCTION - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITHOUT - keyword: FUNCTION - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITHOUT - keyword: FUNCTION - keyword: AS - keyword: IMPLICIT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: INOUT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: INOUT - keyword: AS - keyword: ASSIGNMENT - statement_terminator: ; - statement: create_cast_statement: - keyword: CREATE - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: WITH - keyword: INOUT - keyword: AS - keyword: IMPLICIT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_collation.sql000066400000000000000000000003631451700765000256700ustar00rootroot00000000000000CREATE COLLATION numeric (provider = icu, locale = 'en@colNumeric=yes'); CREATE COLLATION french (locale = 'fr_FR.utf8'); CREATE COLLATION german_phonebook (provider = icu, locale = 'de-u-co-phonebk'); CREATE COLLATION german FROM "de_DE"; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_collation.yml000066400000000000000000000040061451700765000256700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9e7e11a57e7d87b51f1f6fc06c349a9f2a666f583242bd8c54afb7ca4a447cc2 file: - statement: create_collation_statement: - keyword: CREATE - keyword: COLLATION - object_reference: naked_identifier: numeric - bracketed: - start_bracket: ( - keyword: provider - comparison_operator: raw_comparison_operator: '=' - keyword: icu - comma: ',' - keyword: locale - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en@colNumeric=yes'" - end_bracket: ) - statement_terminator: ; - statement: create_collation_statement: - keyword: CREATE - keyword: COLLATION - object_reference: naked_identifier: french - bracketed: start_bracket: ( keyword: locale comparison_operator: raw_comparison_operator: '=' quoted_literal: "'fr_FR.utf8'" end_bracket: ) - statement_terminator: ; - statement: create_collation_statement: - keyword: CREATE - keyword: COLLATION - object_reference: naked_identifier: german_phonebook - bracketed: - start_bracket: ( - keyword: provider - comparison_operator: raw_comparison_operator: '=' - keyword: icu - comma: ',' - keyword: locale - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'de-u-co-phonebk'" - end_bracket: ) - statement_terminator: ; - statement: create_collation_statement: - keyword: CREATE - keyword: COLLATION - object_reference: naked_identifier: german - keyword: FROM - object_reference: quoted_identifier: '"de_DE"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_database.sql000066400000000000000000000042621451700765000254520ustar00rootroot00000000000000CREATE DATABASE db; CREATE DATABASE db OWNER user_name; CREATE DATABASE db OWNER = user_name; CREATE DATABASE db WITH OWNER = user_name; CREATE DATABASE db ENCODING = 'UTF8'; CREATE DATABASE db TEMPLATE = template_name; CREATE DATABASE db WITH TEMPLATE = template_name; CREATE DATABASE db ENCODING 'UTF8'; CREATE DATABASE db WITH ENCODING = 'UTF8'; CREATE DATABASE db LOCALE 'en_US.UTF-8'; CREATE DATABASE db LOCALE = 'en_US.UTF-8'; CREATE DATABASE db WITH LOCALE = 'en_US.UTF-8'; CREATE DATABASE db LC_COLLATE 'en_US.UTF-8'; CREATE DATABASE db LC_CTYPE 'en_US.UTF-8'; CREATE DATABASE db LC_COLLATE 'en_US.UTF-8' LC_CTYPE 'en_US.UTF-8'; CREATE DATABASE db WITH LC_COLLATE 'en_US.UTF-8' LC_CTYPE 'en_US.UTF-8'; CREATE DATABASE db WITH LC_CTYPE 'en_US.UTF-8' LC_COLLATE 'en_US.UTF-8' ; CREATE DATABASE db LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8'; CREATE DATABASE db WITH LC_COLLATE = 'en_US.UTF-8' LC_CTYPE = 'en_US.UTF-8'; CREATE DATABASE db WITH LC_CTYPE = 'en_US.UTF-8' LC_COLLATE = 'en_US.UTF-8'; CREATE DATABASE db TABLESPACE DEFAULT; CREATE DATABASE db TABLESPACE = DEFAULT; CREATE DATABASE db TABLESPACE new_tablespace; CREATE DATABASE db TABLESPACE = new_tablespace; CREATE DATABASE db WITH TABLESPACE = new_tablespace; CREATE DATABASE db ALLOW_CONNECTIONS true; CREATE DATABASE db ALLOW_CONNECTIONS = true; CREATE DATABASE db CONNECTION LIMIT 10; CREATE DATABASE db CONNECTION LIMIT = 10; CREATE DATABASE db IS_TEMPLATE true; CREATE DATABASE db IS_TEMPLATE = true; CREATE DATABASE db IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true; CREATE DATABASE db WITH IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true; CREATE DATABASE db WITH IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true TEMPLATE = template_name ENCODING = 'UTF8' LOCALE 'en_US.UTF-8' OWNER user_name; CREATE DATABASE db IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true TEMPLATE = template_name ENCODING = 'UTF8' LOCALE = 'en_US.UTF-8' OWNER user_name; CREATE DATABASE db IS_TEMPLATE = true CONNECTION LIMIT 10 ALLOW_CONNECTIONS = true TEMPLATE = template_name ENCODING = 'UTF8' LC_COLLATE 'en_US.UTF-8' LC_CTYPE 'en_US.UTF-8' OWNER user_name; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_database.yml000066400000000000000000000322121451700765000254500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 546e7d3c6499fa9a0d9eed09ff47ad0b47ff23d6443a1c61e311fe00425dd943 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: OWNER - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: OWNER - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ENCODING - quoted_literal: "'UTF8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LOCALE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LOCALE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LOCALE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: LC_COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LC_COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: LC_CTYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - keyword: LC_COLLATE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TABLESPACE - keyword: DEFAULT - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TABLESPACE - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TABLESPACE - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: TABLESPACE - comparison_operator: raw_comparison_operator: '=' - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: TABLESPACE - comparison_operator: raw_comparison_operator: '=' - tablespace_reference: naked_identifier: new_tablespace - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ALLOW_CONNECTIONS - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: CONNECTION - keyword: LIMIT - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - keyword: LOCALE - quoted_literal: "'en_US.UTF-8'" - keyword: OWNER - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - keyword: LOCALE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_US.UTF-8'" - keyword: OWNER - object_reference: naked_identifier: user_name - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: db - keyword: IS_TEMPLATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '10' - keyword: ALLOW_CONNECTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: TEMPLATE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: template_name - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTF8'" - keyword: LC_COLLATE - quoted_literal: "'en_US.UTF-8'" - keyword: LC_CTYPE - quoted_literal: "'en_US.UTF-8'" - keyword: OWNER - object_reference: naked_identifier: user_name - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_dollar_quoted_function.sql000066400000000000000000000001451451700765000304450ustar00rootroot00000000000000CREATE FUNCTION foo(integer, integer) RETURNS integer AS $$ select $1 + $2; $$ LANGUAGE SQL; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_dollar_quoted_function.yml000066400000000000000000000020131451700765000304430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0187462d58927b4c36410e856006e4b9add87bd3af7d3937567adbc49fd54695 file: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: foo - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: $$ select $1 + $2; $$ language_clause: keyword: LANGUAGE naked_identifier: SQL statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_domain.sql000066400000000000000000000003741451700765000251550ustar00rootroot00000000000000CREATE DOMAIN us_postal_code AS TEXT CHECK( VALUE ~ '^\d{5}$' OR VALUE ~ '^\d{5}-\d{4}$' ); create domain oname as text; CREATE DOMAIN mystr AS text CONSTRAINT not_empty CHECK (LENGTH(value) > 0) CONSTRAINT too_big CHECK (LENGTH(value) <= 50000); sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_domain.yml000066400000000000000000000054041451700765000251560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2afc00620c19ab518360a0671524869ccf5691b6e7b1116a0ce06e4aebd41214 file: - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: us_postal_code - keyword: AS - data_type: keyword: TEXT - keyword: CHECK - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: VALUE - like_operator: '~' - quoted_literal: "'^\\d{5}$'" - binary_operator: OR - column_reference: naked_identifier: VALUE - like_operator: '~' - quoted_literal: "'^\\d{5}-\\d{4}$'" end_bracket: ) - statement_terminator: ; - statement: create_domain_statement: - keyword: create - keyword: domain - object_reference: naked_identifier: oname - keyword: as - data_type: keyword: text - statement_terminator: ; - statement: create_domain_statement: - keyword: CREATE - keyword: DOMAIN - object_reference: naked_identifier: mystr - keyword: AS - data_type: keyword: text - keyword: CONSTRAINT - object_reference: naked_identifier: not_empty - keyword: CHECK - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: LENGTH bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - keyword: CONSTRAINT - object_reference: naked_identifier: too_big - keyword: CHECK - expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: LENGTH bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '50000' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_extension.sql000066400000000000000000000006141451700765000257170ustar00rootroot00000000000000CREATE EXTENSION amazing_extension with schema schema1 VERSION '2.0.1.2' FROM '1.0'; CREATE EXTENSION IF NOT EXISTS amazing_extension with schema schema1 VERSION '1.2.3a4' FROM '1.0'; CREATE EXTENSION amazing_extension with schema schema1 VERSION version_named FROM from_named; DROP EXTENSION amazing_extension; DROP EXTENSION IF EXISTS amazing_extension; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_extension.yml000066400000000000000000000042641451700765000257260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13f0d8a792a648d25f079cc2edfbf2bd9fa6571e79c3aab948efd5a20f9e8526 file: - statement: create_extension_statement: - keyword: CREATE - keyword: EXTENSION - extension_reference: naked_identifier: amazing_extension - keyword: with - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: VERSION - version_identifier: quoted_literal: "'2.0.1.2'" - keyword: FROM - version_identifier: quoted_literal: "'1.0'" - statement_terminator: ; - statement: create_extension_statement: - keyword: CREATE - keyword: EXTENSION - keyword: IF - keyword: NOT - keyword: EXISTS - extension_reference: naked_identifier: amazing_extension - keyword: with - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: VERSION - version_identifier: quoted_literal: "'1.2.3a4'" - keyword: FROM - version_identifier: quoted_literal: "'1.0'" - statement_terminator: ; - statement: create_extension_statement: - keyword: CREATE - keyword: EXTENSION - extension_reference: naked_identifier: amazing_extension - keyword: with - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: VERSION - version_identifier: naked_identifier: version_named - keyword: FROM - version_identifier: naked_identifier: from_named - statement_terminator: ; - statement: drop_extension_statement: - keyword: DROP - keyword: EXTENSION - extension_reference: naked_identifier: amazing_extension - statement_terminator: ; - statement: drop_extension_statement: - keyword: DROP - keyword: EXTENSION - keyword: IF - keyword: EXISTS - extension_reference: naked_identifier: amazing_extension - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_function.sql000066400000000000000000000070361451700765000255350ustar00rootroot00000000000000-- Some more complicated Postgres function creations. CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; -- Quoted language options are deprecated but still supported CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE 'sql'; CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS ' BEGIN RETURN i + 1; END; ' LANGUAGE plpgsql VOLATILE; CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS ' BEGIN RETURN i + 1; END; ' LANGUAGE plpgsql WINDOW IMMUTABLE STABLE LEAKPROOF RETURNS NULL ON NULL INPUT EXTERNAL SECURITY DEFINER ROWS 5 SET test_param = 3; CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS 'C:\\my_file.c', 'symlink_c' LANGUAGE plpgsql WINDOW IMMUTABLE STABLE NOT LEAKPROOF CALLED ON NULL INPUT EXTERNAL SECURITY DEFINER COST 123 ROWS 5 SET test_param = 3 WITH (isStrict); CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer PARALLEL UNSAFE AS $$ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql SUPPORT my_function; CREATE FUNCTION add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; CREATE OR REPLACE FUNCTION increment(i integer) RETURNS integer AS $$ BEGIN RETURN i + 1; END; $$ LANGUAGE plpgsql; CREATE FUNCTION dup(in int, out f1 int, out f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; SELECT * FROM dup(42); CREATE TYPE dup_result AS (f1 int, f2 text); CREATE FUNCTION dup(int) RETURNS dup_result AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; SELECT * FROM dup(42); CREATE FUNCTION dup(int) RETURNS TABLE(f1 int, f2 text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; CREATE FUNCTION dup(int) RETURNS TABLE("f1" int, "f2" text) AS $$ SELECT $1, CAST($1 AS text) || ' is text' $$ LANGUAGE SQL; SELECT * FROM dup(42); CREATE FUNCTION check_password(uname TEXT, pass TEXT) RETURNS BOOLEAN AS $$ DECLARE passed BOOLEAN; BEGIN SELECT (pwd = $2) INTO passed FROM pwds WHERE username = $1; RETURN passed; END; $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = admin, pg_temp; BEGIN; CREATE FUNCTION check_password(uname TEXT, pass TEXT) RETURNS BOOLEAN AS $$ DECLARE passed BOOLEAN; BEGIN SELECT (pwd = $2) INTO passed FROM pwds WHERE username = $1; RETURN passed; END; $$ LANGUAGE plpgsql SECURITY DEFINER; REVOKE ALL ON FUNCTION check_password(uname TEXT, pass TEXT) FROM PUBLIC; GRANT EXECUTE ON FUNCTION check_password(uname TEXT, pass TEXT) TO admins; COMMIT; CREATE OR REPLACE FUNCTION public.setof_test() RETURNS SETOF text LANGUAGE sql STABLE STRICT AS $function$ select unnest(array['hi', 'test']) $function$ ; CREATE OR REPLACE FUNCTION public.foo(_a TEXT, _$b INT) RETURNS FLOAT AS $$ RETURN 0.0 $$ LANGUAGE plpgsql STABLE PARALLEL SAFE; CREATE FUNCTION _add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; CREATE FUNCTION _$add(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL; create function test2( x date = current_date ) returns date as $$ begin return x; end; $$; create function test3( x date default current_date ) returns date as $$ begin return x; end; $$; CREATE OR REPLACE FUNCTION data_wrapper() RETURNS SETOF data STABLE PARALLEL SAFE LEAKPROOF BEGIN ATOMIC SELECT * FROM data; END; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_function.yml000066400000000000000000000513441451700765000255400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e159dd09576772d0f482616449f7291ae6875bd834947338133532762478aec file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "'select $1 + $2;'" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "'select $1 + $2;'" language_clause: keyword: LANGUAGE quoted_identifier: "'sql'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: AS - quoted_literal: "'\n BEGIN\n RETURN i + 1;\n END;\n'" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: VOLATILE - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: AS - quoted_literal: "'\n BEGIN\n RETURN i + 1;\n END;\n'" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: WINDOW - keyword: IMMUTABLE - keyword: STABLE - keyword: LEAKPROOF - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - keyword: ROWS - numeric_literal: '5' - keyword: SET - parameter: test_param - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: AS - quoted_literal: "'C:\\\\my_file.c'" - comma: ',' - quoted_literal: "'symlink_c'" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: WINDOW - keyword: IMMUTABLE - keyword: STABLE - keyword: NOT - keyword: LEAKPROOF - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: EXTERNAL - keyword: SECURITY - keyword: DEFINER - keyword: COST - numeric_literal: '123' - keyword: ROWS - numeric_literal: '5' - keyword: SET - parameter: test_param - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - keyword: WITH - bracketed: start_bracket: ( parameter: isStrict end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: PARALLEL - keyword: UNSAFE - keyword: AS - quoted_literal: "$$\n BEGIN\n RETURN i + 1;\n END;\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: SUPPORT - parameter: my_function - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: - keyword: AS - quoted_literal: "'select $1 + $2;'" - language_clause: keyword: LANGUAGE naked_identifier: SQL - keyword: IMMUTABLE - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: increment - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: keyword: integer end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "$$\n BEGIN\n RETURN i + 1;\n \ \ END;\n$$" language_clause: keyword: LANGUAGE naked_identifier: plpgsql - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: dup - function_parameter_list: bracketed: - start_bracket: ( - keyword: in - data_type: keyword: int - comma: ',' - keyword: out - parameter: f1 - data_type: keyword: int - comma: ',' - keyword: out - parameter: f2 - data_type: keyword: text - end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$ SELECT $1, CAST($1 AS text) || ' is text' $$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: dup bracketed: start_bracket: ( expression: numeric_literal: '42' end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: dup_result - keyword: AS - bracketed: - start_bracket: ( - word: f1 - word: int - comma: ',' - word: f2 - word: text - end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: dup - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: dup_result - function_definition: keyword: AS quoted_literal: "$$ SELECT $1, CAST($1 AS text) || ' is text' $$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: dup bracketed: start_bracket: ( expression: numeric_literal: '42' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: dup - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_reference: naked_identifier: f1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: f2 - data_type: keyword: text - end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$ SELECT $1, CAST($1 AS text) || ' is text' $$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: dup - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: int end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '"f1"' - data_type: keyword: int - comma: ',' - column_reference: quoted_identifier: '"f2"' - data_type: keyword: text - end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$ SELECT $1, CAST($1 AS text) || ' is text' $$" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: dup bracketed: start_bracket: ( expression: numeric_literal: '42' end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: - start_bracket: ( - parameter: uname - data_type: keyword: TEXT - comma: ',' - parameter: pass - data_type: keyword: TEXT - end_bracket: ) - keyword: RETURNS - data_type: keyword: BOOLEAN - function_definition: - keyword: AS - quoted_literal: "$$\nDECLARE passed BOOLEAN;\nBEGIN\n SELECT (pwd\ \ = $2) INTO passed\n FROM pwds\n WHERE username = $1;\n\ \n RETURN passed;\nEND;\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: SECURITY - keyword: DEFINER - keyword: SET - parameter: search_path - comparison_operator: raw_comparison_operator: '=' - parameter: admin - comma: ',' - parameter: pg_temp - statement_terminator: ; - statement: transaction_statement: keyword: BEGIN - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: - start_bracket: ( - parameter: uname - data_type: keyword: TEXT - comma: ',' - parameter: pass - data_type: keyword: TEXT - end_bracket: ) - keyword: RETURNS - data_type: keyword: BOOLEAN - function_definition: - keyword: AS - quoted_literal: "$$\nDECLARE passed BOOLEAN;\nBEGIN\n SELECT (pwd\ \ = $2) INTO passed\n FROM pwds\n WHERE username = $1;\n\ \n RETURN passed;\nEND;\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: SECURITY - keyword: DEFINER - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: ALL - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: - start_bracket: ( - parameter: uname - data_type: keyword: TEXT - comma: ',' - parameter: pass - data_type: keyword: TEXT - end_bracket: ) - keyword: FROM - object_reference: naked_identifier: PUBLIC - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: FUNCTION - function_name: function_name_identifier: check_password - function_parameter_list: bracketed: - start_bracket: ( - parameter: uname - data_type: keyword: TEXT - comma: ',' - parameter: pass - data_type: keyword: TEXT - end_bracket: ) - keyword: TO - role_reference: naked_identifier: admins - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: setof_test - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: SETOF - data_type: keyword: text - function_definition: - language_clause: keyword: LANGUAGE naked_identifier: sql - keyword: STABLE - keyword: STRICT - keyword: AS - quoted_literal: "$function$\nselect unnest(array['hi', 'test'])\n$function$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: foo - function_parameter_list: bracketed: - start_bracket: ( - parameter: _a - data_type: keyword: TEXT - comma: ',' - parameter: _$b - data_type: keyword: INT - end_bracket: ) - keyword: RETURNS - data_type: keyword: FLOAT - function_definition: - keyword: AS - quoted_literal: "$$\n RETURN 0.0\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: STABLE - keyword: PARALLEL - keyword: SAFE - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: _add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "'select $1 + $2;'" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: _$add - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: integer - comma: ',' - data_type: keyword: integer - end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "'select $1 + $2;'" language_clause: keyword: LANGUAGE naked_identifier: SQL - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: function - function_name: function_name_identifier: test2 - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: datetime_type_identifier: keyword: date comparison_operator: raw_comparison_operator: '=' expression: bare_function: current_date end_bracket: ) - keyword: returns - data_type: datetime_type_identifier: keyword: date - function_definition: keyword: as quoted_literal: "$$\n begin\n return x;\n end;\n$$" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: function - function_name: function_name_identifier: test3 - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: datetime_type_identifier: keyword: date keyword: default expression: bare_function: current_date end_bracket: ) - keyword: returns - data_type: datetime_type_identifier: keyword: date - function_definition: keyword: as quoted_literal: "$$\n begin\n return x;\n end;\n$$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: data_wrapper - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: SETOF - data_type: data_type_identifier: data - function_definition: - keyword: STABLE - keyword: PARALLEL - keyword: SAFE - keyword: LEAKPROOF - keyword: BEGIN - keyword: ATOMIC - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: data - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_index.sql000066400000000000000000000027021451700765000250120ustar00rootroot00000000000000CREATE UNIQUE INDEX title_idx ON films (title); CREATE UNIQUE INDEX title_idx ON films (title) INCLUDE (director, rating); CREATE INDEX title_idx ON films (title) WITH (deduplicate_items = 'off'); CREATE INDEX ON films ((lower(title))); CREATE INDEX title_idx_german ON films (title COLLATE "de_DE"); CREATE INDEX title_idx_nulls_low ON films (title NULLS FIRST); CREATE INDEX title_idx_nulls_high ON films (title NULLS LAST); CREATE UNIQUE INDEX title_idx ON films (title) WITH (fillfactor = 70); CREATE INDEX gin_idx ON documents_table USING GIN (locations) WITH (fastupdate = 'off'); CREATE INDEX code_idx ON films (code) TABLESPACE indexspace; CREATE INDEX pointloc ON points USING gist (box(location,location)); CREATE INDEX CONCURRENTLY sales_quantity_index ON sales_table (quantity); CREATE INDEX super_idx ON super_table USING btree(super_column DESC); CREATE INDEX opclass_index ON schema.opclass_table (col varchar_pattern_ops); CREATE INDEX opclass_index_with_parameters ON schema.opclass_table (col varchar_pattern_ops(p1='3', p2='4')); CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target) WHERE success; CREATE INDEX nulls_distinct_index ON documents_table USING GIN (locations) NULLS DISTINCT WITH (fastupdate = 'off'); CREATE INDEX nulls_not_distinct_index ON documents_table USING GIN (locations) NULLS NOT DISTINCT WITH (fastupdate = 'off'); CREATE INDEX code_idx ON films (code) TABLESPACE indexspace; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_index.yml000066400000000000000000000310471451700765000250200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0a68c64141d43e375847c6dabd5d5cd128636a78296754feb4f2d13ba33ee451 file: - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: title_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: title_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title end_bracket: ) - keyword: INCLUDE - bracketed: - start_bracket: ( - index_element: column_reference: naked_identifier: director - comma: ',' - index_element: column_reference: naked_identifier: rating - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: title_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: deduplicate_items comparison_operator: raw_comparison_operator: '=' quoted_literal: "'off'" end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: lower bracketed: start_bracket: ( expression: column_reference: naked_identifier: title end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: title_idx_german - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title index_element_options: keyword: COLLATE collation_reference: quoted_identifier: '"de_DE"' end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: title_idx_nulls_low - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title index_element_options: - keyword: NULLS - keyword: FIRST end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: title_idx_nulls_high - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title index_element_options: - keyword: NULLS - keyword: LAST end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: title_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: title end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fillfactor comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: gin_idx - keyword: 'ON' - table_reference: naked_identifier: documents_table - keyword: USING - index_access_method: naked_identifier: GIN - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: locations end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fastupdate comparison_operator: raw_comparison_operator: '=' quoted_literal: "'off'" end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: code_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: code end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: indexspace - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: pointloc - keyword: 'ON' - table_reference: naked_identifier: points - keyword: USING - index_access_method: naked_identifier: gist - bracketed: start_bracket: ( index_element: function: function_name: function_name_identifier: box bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: location - comma: ',' - expression: column_reference: naked_identifier: location - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - keyword: CONCURRENTLY - index_reference: naked_identifier: sales_quantity_index - keyword: 'ON' - table_reference: naked_identifier: sales_table - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: quantity end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: super_idx - keyword: 'ON' - table_reference: naked_identifier: super_table - keyword: USING - index_access_method: naked_identifier: btree - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: super_column index_element_options: keyword: DESC end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: opclass_index - keyword: 'ON' - table_reference: - naked_identifier: schema - dot: . - naked_identifier: opclass_table - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: col index_element_options: operator_class_reference: naked_identifier: varchar_pattern_ops end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: opclass_index_with_parameters - keyword: 'ON' - table_reference: - naked_identifier: schema - dot: . - naked_identifier: opclass_table - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: col index_element_options: operator_class_reference: naked_identifier: varchar_pattern_ops relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: p1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'3'" - comma: ',' - relation_option: properties_naked_identifier: p2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'4'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: tests_success_constraint - keyword: 'ON' - table_reference: naked_identifier: tests - bracketed: - start_bracket: ( - index_element: column_reference: naked_identifier: subject - comma: ',' - index_element: column_reference: naked_identifier: target - end_bracket: ) - keyword: WHERE - expression: column_reference: naked_identifier: success - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: nulls_distinct_index - keyword: 'ON' - table_reference: naked_identifier: documents_table - keyword: USING - index_access_method: naked_identifier: GIN - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: locations end_bracket: ) - keyword: NULLS - keyword: DISTINCT - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fastupdate comparison_operator: raw_comparison_operator: '=' quoted_literal: "'off'" end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: nulls_not_distinct_index - keyword: 'ON' - table_reference: naked_identifier: documents_table - keyword: USING - index_access_method: naked_identifier: GIN - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: locations end_bracket: ) - keyword: NULLS - keyword: NOT - keyword: DISTINCT - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fastupdate comparison_operator: raw_comparison_operator: '=' quoted_literal: "'off'" end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: code_idx - keyword: 'ON' - table_reference: naked_identifier: films - bracketed: start_bracket: ( index_element: column_reference: naked_identifier: code end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: indexspace - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_materialized_view.sql000066400000000000000000000046051451700765000274130ustar00rootroot00000000000000CREATE MATERIALIZED VIEW my_mat_view AS SELECT a FROM my_table; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS SELECT a FROM my_table; CREATE MATERIALIZED VIEW my_mat_view AS ( SELECT a FROM my_table ); CREATE MATERIALIZED VIEW my_mat_view AS SELECT a FROM my_table WITH NO DATA; CREATE MATERIALIZED VIEW my_mat_view AS SELECT a FROM my_table WITH DATA; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS ( SELECT a FROM my_table ); CREATE MATERIALIZED VIEW my_mat_view AS ( SELECT a, b FROM my_table WHERE y = 'value' ); CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS ( SELECT a, b FROM my_table WHERE y = 'value' ); CREATE MATERIALIZED VIEW my_mat_view AS SELECT a, b FROM my_table WHERE y = 'value'; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS SELECT a, b FROM my_table WHERE y = 'value'; CREATE MATERIALIZED VIEW my_mat_view AS SELECT a, b FROM my_table; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view AS SELECT a, b FROM my_table; -- SQL from issue #2039 CREATE MATERIALIZED VIEW bar AS ( SELECT col FROM my_table ) WITH NO DATA; CREATE MATERIALIZED VIEW IF NOT EXISTS bar AS ( SELECT col FROM my_table ) WITH NO DATA; CREATE MATERIALIZED VIEW my_mat_view USING heap WITH (prop_a = 1, prob_b = 'some_value', prop_c = FALSE, prop_d) TABLESPACE pg_default AS ( SELECT a, avg(b) AS my_avg, count(*) AS my_count FROM my_table GROUP BY grp HAVING col > 2 ) WITH DATA; CREATE MATERIALIZED VIEW IF NOT EXISTS my_mat_view USING heap WITH (prop_a = 1, prob_b = 'some_value', prop_c = FALSE, prop_d) TABLESPACE pg_default AS ( SELECT a, avg(b) AS my_avg, count(*) AS my_count FROM my_table GROUP BY grp HAVING col > 2 ) WITH DATA; CREATE MATERIALIZED VIEW my_mat_view TABLESPACE pg_default AS SELECT table_1.field_1, table_1.field_2 FROM table_1 UNION SELECT table_2.field_1, table_2.field_2 FROM table_2 ORDER BY field_1, field_2 WITH DATA; CREATE MATERIALIZED VIEW my_mat_view WITH (left.right) AS SELECT a FROM my_table; CREATE MATERIALIZED VIEW my_mat_view WITH (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC) AS SELECT a FROM my_table; CREATE OR REPLACE MATERIALIZED VIEW my_mat_view AS SELECT a FROM my_table; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_materialized_view.yml000066400000000000000000000562671451700765000274300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dfaeec3bfd6fd9a2a771d1e41fc2fe9a05ee677f498f73a239f3b1c87f8fc24a file: - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table where_clause: keyword: WHERE expression: column_reference: naked_identifier: y comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: USING - parameter: heap - keyword: WITH - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: prop_a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: prob_b comparison_operator: raw_comparison_operator: '=' quoted_literal: "'some_value'" - comma: ',' - relation_option: properties_naked_identifier: prop_c comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - comma: ',' - relation_option: properties_naked_identifier: prop_d - end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: pg_default - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) alias_expression: keyword: AS naked_identifier: my_avg - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: my_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: grp having_clause: keyword: HAVING expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '2' end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: my_mat_view - keyword: USING - parameter: heap - keyword: WITH - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: prop_a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: prob_b comparison_operator: raw_comparison_operator: '=' quoted_literal: "'some_value'" - comma: ',' - relation_option: properties_naked_identifier: prop_c comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - comma: ',' - relation_option: properties_naked_identifier: prop_d - end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: pg_default - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: b end_bracket: ) alias_expression: keyword: AS naked_identifier: my_avg - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: my_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: grp having_clause: keyword: HAVING expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '2' end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: TABLESPACE - tablespace_reference: naked_identifier: pg_default - keyword: AS - set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: field_2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 - set_operator: keyword: UNION - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table_2 - dot: . - naked_identifier: field_1 - comma: ',' - select_clause_element: column_reference: - naked_identifier: table_2 - dot: . - naked_identifier: field_2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_2 - orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: field_1 - comma: ',' - column_reference: naked_identifier: field_2 - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: left - dot: . - properties_naked_identifier: right end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: WITH - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_mat_view - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_policy.sql000066400000000000000000000011611451700765000252000ustar00rootroot00000000000000CREATE POLICY account_managers ON accounts TO current_user; CREATE POLICY account_managers ON sch.accounts AS permissive FOR ALL TO managers; CREATE POLICY account_managers ON accounts TO public, session_user; CREATE POLICY account_managers ON accounts WITH CHECK ( NOT accounts_is_excluded_full_name(full_name) ); CREATE POLICY emp_rls_policy ON employee FOR all TO public USING (ename=current_setting('rls.ename')); CREATE POLICY account_managers ON accounts WITH CHECK ( col > 10 ); CREATE POLICY account_managers ON accounts USING (username = current_user); sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_policy.yml000066400000000000000000000102611451700765000252030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5eef8817d8adec60e63a49eb7a08d3022e047e29f5cfe8c9ba9a01aa0cc339b5 file: - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: TO - object_reference: naked_identifier: current_user - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: - naked_identifier: sch - dot: . - naked_identifier: accounts - keyword: AS - keyword: permissive - keyword: FOR - keyword: ALL - keyword: TO - object_reference: naked_identifier: managers - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: TO - object_reference: naked_identifier: public - comma: ',' - keyword: session_user - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: keyword: NOT function: function_name: function_name_identifier: accounts_is_excluded_full_name bracketed: start_bracket: ( expression: column_reference: naked_identifier: full_name end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: emp_rls_policy - keyword: 'ON' - table_reference: naked_identifier: employee - keyword: FOR - keyword: all - keyword: TO - object_reference: naked_identifier: public - keyword: USING - bracketed: start_bracket: ( expression: column_reference: naked_identifier: ename comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: current_setting bracketed: start_bracket: ( expression: quoted_literal: "'rls.ename'" end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: WITH - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: col comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: create_policy_statement: - keyword: CREATE - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: USING - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: username - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: current_user end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_procedure.sql000066400000000000000000000003731451700765000256750ustar00rootroot00000000000000CREATE OR REPLACE PROCEDURE create_account ( _account_uuid UUID ) AS $$ BEGIN RETURN; END; $$ LANGUAGE plpgsql; CREATE PROCEDURE insert_data(a integer, b integer) LANGUAGE SQL AS $$ INSERT INTO tbl VALUES (a); INSERT INTO tbl VALUES (b); $$; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_procedure.yml000066400000000000000000000032331451700765000256750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b038c0c2ee3d0f7a1afe80e9651148bcf3952190b0f8ab3f94250e4166e03bee file: - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: create_account - function_parameter_list: bracketed: start_bracket: ( parameter: _account_uuid data_type: keyword: UUID end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$\nBEGIN\n RETURN;\nEND;\n$$" language_clause: keyword: LANGUAGE naked_identifier: plpgsql - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - function_name: function_name_identifier: insert_data - function_parameter_list: bracketed: - start_bracket: ( - parameter: a - data_type: keyword: integer - comma: ',' - parameter: b - data_type: keyword: integer - end_bracket: ) - function_definition: language_clause: keyword: LANGUAGE naked_identifier: SQL keyword: AS quoted_literal: "$$\nINSERT INTO tbl VALUES (a);\nINSERT INTO tbl VALUES (b);\n\ $$" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_publication.sql000066400000000000000000000030721451700765000262150ustar00rootroot00000000000000CREATE PUBLICATION abc; CREATE PUBLICATION abc FOR ALL TABLES; CREATE PUBLICATION abc FOR TABLE def; CREATE PUBLICATION abc FOR TABLE def, sch.ghi; CREATE PUBLICATION abc FOR TABLE def, TABLE sch.ghi; CREATE PUBLICATION abc FOR TABLE def*; CREATE PUBLICATION abc FOR TABLE a, TABLE aa, ab, ac, TABLE ONLY b, TABLE c*, TABLE ca*, cb*, TABLE ONLY (d), TABLE e (col1), TABLE f (col2, col3), TABLE g* (col4, col5), TABLE h WHERE (col6 > col7), TABLE i (col8, col9) WHERE (col10 > col11), TABLES IN SCHEMA j, TABLES IN SCHEMA k, TABLES IN SCHEMA CURRENT_SCHEMA, l, m, TABLES IN SCHEMA n, o, p; CREATE PUBLICATION abc FOR TABLE a, b WITH (publish = 'insert,update', publish_via_partition_root = TRUE); CREATE PUBLICATION abc FOR TABLE a, b WITH (publish_via_partition_root = TRUE); CREATE PUBLICATION abc FOR TABLE a, b WITH (publish = 'insert,update'); CREATE PUBLICATION abc WITH (publish = 'insert,update'); -- examples from https://www.postgresql.org/docs/15/sql-createpublication.html CREATE PUBLICATION mypublication FOR TABLE users, departments; CREATE PUBLICATION active_departments FOR TABLE departments WHERE (active IS TRUE); CREATE PUBLICATION alltables FOR ALL TABLES; CREATE PUBLICATION insert_only FOR TABLE mydata WITH (publish = 'insert'); CREATE PUBLICATION production_publication FOR TABLE users, departments, TABLES IN SCHEMA production; CREATE PUBLICATION sales_publication FOR TABLES IN SCHEMA marketing, sales; CREATE PUBLICATION users_filtered FOR TABLE users (user_id, firstname); sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_publication.yml000066400000000000000000000335421451700765000262240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dfab8d08af80c56bc6be0c0658fedcccb5142a9c490c88f6198973e7ef322b5e file: - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - keyword: ALL - keyword: TABLES - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: def - comma: ',' - publication_table: table_reference: - naked_identifier: sch - dot: . - naked_identifier: ghi - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: - naked_identifier: sch - dot: . - naked_identifier: ghi - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: def star: '*' - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: a - comma: ',' - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: aa - comma: ',' - publication_table: table_reference: naked_identifier: ab - comma: ',' - publication_table: table_reference: naked_identifier: ac - comma: ',' - publication_objects: keyword: TABLE publication_table: keyword: ONLY table_reference: naked_identifier: b - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: c star: '*' - comma: ',' - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: ca star: '*' - comma: ',' - publication_table: table_reference: naked_identifier: cb star: '*' - comma: ',' - publication_objects: keyword: TABLE publication_table: keyword: ONLY bracketed: start_bracket: ( table_reference: naked_identifier: d end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: e bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: f bracketed: - start_bracket: ( - column_reference: naked_identifier: col2 - comma: ',' - column_reference: naked_identifier: col3 - end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: g star: '*' bracketed: - start_bracket: ( - column_reference: naked_identifier: col4 - comma: ',' - column_reference: naked_identifier: col5 - end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: h keyword: WHERE bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col6 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: col7 end_bracket: ) - comma: ',' - publication_objects: keyword: TABLE publication_table: - table_reference: naked_identifier: i - bracketed: - start_bracket: ( - column_reference: naked_identifier: col8 - comma: ',' - column_reference: naked_identifier: col9 - end_bracket: ) - keyword: WHERE - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: col10 - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: col11 end_bracket: ) - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: j - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: k - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - keyword: CURRENT_SCHEMA - comma: ',' - schema_reference: naked_identifier: l - comma: ',' - schema_reference: naked_identifier: m - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: n - comma: ',' - schema_reference: naked_identifier: o - comma: ',' - schema_reference: naked_identifier: p - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: a - comma: ',' - publication_table: table_reference: naked_identifier: b - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert,update'" - comma: ',' - definition_parameter: properties_naked_identifier: publish_via_partition_root comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: a - comma: ',' - publication_table: table_reference: naked_identifier: b - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish_via_partition_root comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: a - comma: ',' - publication_table: table_reference: naked_identifier: b - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert,update'" end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert,update'" end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: mypublication - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: users - comma: ',' - publication_table: table_reference: naked_identifier: departments - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: active_departments - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: departments keyword: WHERE bracketed: start_bracket: ( expression: column_reference: naked_identifier: active keyword: IS boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: alltables - keyword: FOR - keyword: ALL - keyword: TABLES - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: insert_only - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: mydata - keyword: WITH - definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: publish comparison_operator: raw_comparison_operator: '=' quoted_literal: "'insert'" end_bracket: ) - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: production_publication - keyword: FOR - publication_objects: - keyword: TABLE - publication_table: table_reference: naked_identifier: users - comma: ',' - publication_table: table_reference: naked_identifier: departments - comma: ',' - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: production - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: sales_publication - keyword: FOR - publication_objects: - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: marketing - comma: ',' - schema_reference: naked_identifier: sales - statement_terminator: ; - statement: create_publication_statement: - keyword: CREATE - keyword: PUBLICATION - publication_reference: naked_identifier: users_filtered - keyword: FOR - publication_objects: keyword: TABLE publication_table: table_reference: naked_identifier: users bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - comma: ',' - column_reference: naked_identifier: firstname - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_role.sql000066400000000000000000000014701451700765000246450ustar00rootroot00000000000000CREATE USER foo_role WITH SUPERUSER NOLOGIN REPLICATION IN GROUP foo_group; CREATE USER foo_role; CREATE USER frank WITH CONNECTION LIMIT 1; CREATE USER frank WITH IN ROLE frank; CREATE USER frank WITH SUPERUSER CREATEDB CREATEROLE; CREATE USER frank WITH INHERIT LOGIN REPLICATION BYPASSRLS; CREATE USER davide WITH PASSWORD 'jw8s0F4'; CREATE USER miriam WITH LOGIN PASSWORD 'jw8s0F4' VALID UNTIL '2005-01-01'; CREATE ROLE foo_role WITH SUPERUSER NOLOGIN REPLICATION IN GROUP foo_group; CREATE ROLE foo_role; CREATE ROLE frank WITH CONNECTION LIMIT 1; CREATE ROLE frank WITH IN ROLE frank; CREATE ROLE frank WITH SUPERUSER CREATEDB CREATEROLE; CREATE ROLE frank WITH INHERIT LOGIN REPLICATION BYPASSRLS; CREATE ROLE davide WITH PASSWORD 'jw8s0F4'; CREATE ROLE miriam WITH LOGIN PASSWORD 'jw8s0F4' VALID UNTIL '2005-01-01'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_role.yml000066400000000000000000000111741451700765000246510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5fae24fa08e8818702a933e632e2b092005758915d5dfa79a9efa647eb0b7f2e file: - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: foo_role - keyword: WITH - keyword: SUPERUSER - keyword: NOLOGIN - keyword: REPLICATION - keyword: IN - keyword: GROUP - role_reference: naked_identifier: foo_group - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: foo_role - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: frank - keyword: WITH - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '1' - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: frank - keyword: WITH - keyword: IN - keyword: ROLE - role_reference: naked_identifier: frank - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: frank - keyword: WITH - keyword: SUPERUSER - keyword: CREATEDB - keyword: CREATEROLE - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: frank - keyword: WITH - keyword: INHERIT - keyword: LOGIN - keyword: REPLICATION - keyword: BYPASSRLS - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - quoted_literal: "'jw8s0F4'" - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: miriam - keyword: WITH - keyword: LOGIN - keyword: PASSWORD - quoted_literal: "'jw8s0F4'" - keyword: VALID - keyword: UNTIL - quoted_literal: "'2005-01-01'" - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: foo_role - keyword: WITH - keyword: SUPERUSER - keyword: NOLOGIN - keyword: REPLICATION - keyword: IN - keyword: GROUP - role_reference: naked_identifier: foo_group - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: foo_role - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: frank - keyword: WITH - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '1' - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: frank - keyword: WITH - keyword: IN - keyword: ROLE - role_reference: naked_identifier: frank - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: frank - keyword: WITH - keyword: SUPERUSER - keyword: CREATEDB - keyword: CREATEROLE - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: frank - keyword: WITH - keyword: INHERIT - keyword: LOGIN - keyword: REPLICATION - keyword: BYPASSRLS - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: davide - keyword: WITH - keyword: PASSWORD - quoted_literal: "'jw8s0F4'" - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: miriam - keyword: WITH - keyword: LOGIN - keyword: PASSWORD - quoted_literal: "'jw8s0F4'" - keyword: VALID - keyword: UNTIL - quoted_literal: "'2005-01-01'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_schema.sql000066400000000000000000000003451451700765000251440ustar00rootroot00000000000000CREATE SCHEMA asdf; CREATE SCHEMA IF NOT EXISTS asdf; CREATE SCHEMA asdf AUTHORIZATION bob; CREATE SCHEMA AUTHORIZATION bob; CREATE SCHEMA IF NOT EXISTS asdf AUTHORIZATION bob; CREATE SCHEMA IF NOT EXISTS AUTHORIZATION bob; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_schema.yml000066400000000000000000000033731451700765000251520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb7e7b5ffc233fb6fb4275a3a828a7ec400f50bc68a49014a36be30461ccd0c7 file: - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: asdf - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: asdf - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: asdf - keyword: AUTHORIZATION - role_reference: naked_identifier: bob - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: bob - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: asdf - keyword: AUTHORIZATION - role_reference: naked_identifier: bob - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: AUTHORIZATION - role_reference: naked_identifier: bob - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_sequence.sql000066400000000000000000000006531451700765000255160ustar00rootroot00000000000000CREATE SEQUENCE foo; CREATE SEQUENCE foo AS integer; CREATE SEQUENCE foo INCREMENT BY 3; CREATE SEQUENCE foo MINVALUE 5 NO MAXVALUE; CREATE SEQUENCE foo NO MINVALUE MAXVALUE 12; CREATE SEQUENCE foo INCREMENT 5 START WITH 8 CACHE 4; CREATE SEQUENCE foo NO CYCLE; CREATE SEQUENCE foo OWNED BY NONE; CREATE SEQUENCE foo OWNED BY my_table.my_column; CREATE TEMP SEQUENCE IF NOT EXISTS foo; CREATE TEMPORARY SEQUENCE foo; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_sequence.yml000066400000000000000000000071141451700765000255170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb150d8d47a791d133800c357839dfb4681a1409835240f03ce06f55ff0b8a17 file: - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: AS data_type: keyword: integer - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '3' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: MINVALUE numeric_literal: '5' - create_sequence_options_segment: - keyword: 'NO' - keyword: MAXVALUE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: 'NO' - keyword: MINVALUE - create_sequence_options_segment: keyword: MAXVALUE numeric_literal: '12' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: keyword: INCREMENT numeric_literal: '5' - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '8' - create_sequence_options_segment: keyword: CACHE numeric_literal: '4' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: 'NO' - keyword: CYCLE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: OWNED - keyword: BY - keyword: NONE - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - create_sequence_options_segment: - keyword: OWNED - keyword: BY - column_reference: - naked_identifier: my_table - dot: . - naked_identifier: my_column - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: TEMP - keyword: SEQUENCE - keyword: IF - keyword: NOT - keyword: EXISTS - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_server.sql000066400000000000000000000004631451700765000252130ustar00rootroot00000000000000CREATE SERVER test FOREIGN DATA WRAPPER postgres_fdw; CREATE SERVER IF NOT EXISTS test FOREIGN DATA WRAPPER oracle_fdw; CREATE SERVER test TYPE 'test' VERSION '1.0' FOREIGN DATA WRAPPER postgres_fdw; CREATE SERVER test FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host 'foo', dbname 'foodb', port '5432'); sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_server.yml000066400000000000000000000040631451700765000252150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae594fff2cb69400bbe9153346e3c557399190bc2a82ebffd5db81317d439923 file: - statement: create_server_statement: - keyword: CREATE - keyword: SERVER - server_reference: naked_identifier: test - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: postgres_fdw - statement_terminator: ; - statement: create_server_statement: - keyword: CREATE - keyword: SERVER - keyword: IF - keyword: NOT - keyword: EXISTS - server_reference: naked_identifier: test - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: oracle_fdw - statement_terminator: ; - statement: create_server_statement: - keyword: CREATE - keyword: SERVER - server_reference: naked_identifier: test - keyword: TYPE - quoted_literal: "'test'" - keyword: VERSION - version_identifier: quoted_literal: "'1.0'" - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: postgres_fdw - statement_terminator: ; - statement: create_server_statement: - keyword: CREATE - keyword: SERVER - server_reference: naked_identifier: test - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: postgres_fdw - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: host - quoted_literal: "'foo'" - comma: ',' - naked_identifier_all: dbname - quoted_literal: "'foodb'" - comma: ',' - naked_identifier_all: port - quoted_literal: "'5432'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_table.sql000066400000000000000000000224251451700765000247760ustar00rootroot00000000000000-- Test qualifying datatype with schema CREATE TABLE counters ( my_type public.MY_TYPE ); --CREATE TABLE films ( -- code char(5) CONSTRAINT firstkey PRIMARY KEY, -- title varchar(40) NOT NULL, -- did integer NOT NULL, -- date_prod date, -- kind varchar(10), -- len interval hour to minute --); CREATE TABLE distributors ( did integer PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, name varchar(40) NOT NULL CHECK (name <> '') ); CREATE TABLE array_int ( vector int[][] ); --CREATE TABLE films ( -- code char(5), -- title varchar(40), -- did integer, -- date_prod date, -- kind varchar(10), -- len interval hour to minute, -- CONSTRAINT production UNIQUE(date_prod) --); CREATE TABLE distributors ( did integer CHECK (did > 100), name varchar(40), long_varying char varying(100) ); CREATE TABLE distributors ( did integer, name varchar(40), CONSTRAINT con1 CHECK (did > 100 AND name <> '') ); --CREATE TABLE films ( -- code char(5), -- title varchar(40), -- did integer, -- date_prod date, -- kind varchar(10), -- len interval hour to minute, -- CONSTRAINT code_title PRIMARY KEY(code,title) --); CREATE TABLE distributors ( did integer, name varchar(40), PRIMARY KEY(did) ); CREATE TABLE distributors ( did integer PRIMARY KEY, name varchar(40) ); CREATE TABLE distributors ( name varchar(40) DEFAULT 'Luso Films', did integer DEFAULT nextval('distributors_serial'), modtime timestamp DEFAULT current_timestamp ); CREATE TABLE distributors ( did integer CONSTRAINT no_null NOT NULL, name varchar(40) NOT NULL ); CREATE TABLE distributors ( did integer, name varchar(40) UNIQUE ); CREATE TABLE distributors ( did integer, name varchar(40), UNIQUE(name) ); CREATE TABLE distributors ( did integer, name varchar(40), UNIQUE(name) WITH (fillfactor=70) ) WITH (fillfactor=70); --CREATE TABLE circles ( -- c circle, -- EXCLUDE USING gist (c WITH &&) --); CREATE TABLE cinemas ( id serial, name text, location text ) TABLESPACE diskvol1; CREATE TYPE employee_type AS (name text, salary numeric); CREATE TABLE employees OF employee_type ( PRIMARY KEY (name), salary WITH OPTIONS DEFAULT 1000 ); CREATE TABLE measurement ( logdate date not null, peaktemp int, unitsales int ) PARTITION BY RANGE (logdate); CREATE TABLE measurement_year_month ( logdate date not null, peaktemp int, unitsales int ) PARTITION BY RANGE (EXTRACT(YEAR FROM logdate), EXTRACT(MONTH FROM logdate)); CREATE TABLE cities ( city_id bigserial not null, name text not null, population bigint ) PARTITION BY LIST (left(lower(name), 1)); CREATE TABLE orders ( order_id bigint not null, cust_id bigint not null, status text ) PARTITION BY HASH (order_id); CREATE TABLE measurement_y2016m07 PARTITION OF measurement ( unitsales DEFAULT 0 ) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); CREATE TABLE measurement_ym_older PARTITION OF measurement_year_month FOR VALUES FROM (MINVALUE, MINVALUE) TO (2016, 11); CREATE TABLE measurement_ym_y2016m11 PARTITION OF measurement_year_month FOR VALUES FROM (2016, 11) TO (2016, 12); CREATE TABLE measurement_ym_y2016m12 PARTITION OF measurement_year_month FOR VALUES FROM (2016, 12) TO (2017, 01); CREATE TABLE measurement_ym_y2017m01 PARTITION OF measurement_year_month FOR VALUES FROM (2017, 01) TO (2017, 02); CREATE TABLE cities_ab PARTITION OF cities ( CONSTRAINT city_id_nonzero CHECK (city_id != 0) ) FOR VALUES IN ('a', 'b'); CREATE TABLE cities_ab PARTITION OF cities ( CONSTRAINT city_id_nonzero CHECK (city_id != 0) ) FOR VALUES IN ('a', 'b') PARTITION BY RANGE (population); CREATE TABLE cities_ab_10000_to_100000 PARTITION OF cities_ab FOR VALUES FROM (10000) TO (100000); CREATE TABLE orders_p1 PARTITION OF orders FOR VALUES WITH (MODULUS 4, REMAINDER 0); CREATE TABLE orders_p2 PARTITION OF orders FOR VALUES WITH (MODULUS 4, REMAINDER 1); CREATE TABLE orders_p3 PARTITION OF orders FOR VALUES WITH (MODULUS 4, REMAINDER 2); CREATE TABLE orders_p4 PARTITION OF orders FOR VALUES WITH (MODULUS 4, REMAINDER 3); CREATE TABLE cities_partdef PARTITION OF cities DEFAULT; CREATE UNLOGGED TABLE staging ( event_type INTEGER , event_time TIMESTAMP , user_email VARCHAR , phone_number VARCHAR , processing_date DATE , PRIMARY KEY (event_type, event_time, user_email, phone_number, processing_date) ); CREATE TABLE measurement ( city_id int NOT NULL, logdate date NOT NULL, peaktemp int, unitsales int ) PARTITION BY RANGE (logdate); CREATE TABLE public.public ( id serial NOT NULL, name text NOT NULL, group_name text NULL, cluster_id int8 NULL, date_created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, date_updated timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, operation_id int4 NOT NULL DEFAULT '-1'::integer ); CREATE TABLE main.test_table ( "col1" character varying(40) NOT NULL, "col2" double precision ); CREATE TABLE groups ( group_id INTEGER PRIMARY KEY generated BY DEFAULT AS IDENTITY ); CREATE TABLE users ( user_id INTEGER PRIMARY KEY generated BY DEFAULT AS IDENTITY, group_id INTEGER REFERENCES groups (group_id) ON DELETE CASCADE, domain_id INTEGER REFERENCES groups (group_id) ON UPDATE RESTRICT, other_id INTEGER REFERENCES groups (group_id) MATCH SIMPLE ); CREATE TABLE orders ( id bigint NOT NULL DEFAULT NEXTVAL('orders_id_seq'::regclass), constraint_collate_constraints text UNIQUE COLLATE numeric NOT NULL PRIMARY KEY, constraints_collate text NOT NULL UNIQUE COLLATE numeric, collate_constraints text COLLATE numeric NOT NULL UNIQUE, nulls_distinct text UNIQUE NULLS DISTINCT, nulls_not_distinct text UNIQUE NULLS NOT DISTINCT, everything text UNIQUE NULLS DISTINCT WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspace COLLATE numeric ); CREATE TABLE primary_key_options ( everything int PRIMARY KEY WITH (arg1=3, arg5='str') USING INDEX TABLESPACE tblspace NOT NULL ); -- Use non-reserved `usage` word as a table identifier CREATE TABLE IF NOT EXISTS quotas.usage(foo int); -- Use non-reserved `usage` word as a column identifier CREATE TABLE IF NOT EXISTS quotas.my_table(usage int); -- NOT NULL both before and after a default constraint CREATE TABLE with_constraints1 ( col_1 boolean NOT NULL DEFAULT false ); CREATE TABLE with_constraints2 ( col_1 boolean DEFAULT false NOT NULL ); -- default constraint expression CREATE TABLE with_constraints3 ( col_1 int DEFAULT (1 + 2) * (3 + 4) NOT NULL ); CREATE TABLE with_constraints33 ( col_1 int DEFAULT 1 + 2 * 3 + 4 NOT NULL ); CREATE TABLE with_constraints4 ( col_1 int DEFAULT (1 + 2 * 3 + 4) NOT NULL ); CREATE TABLE with_constraints5 ( col_1 bool DEFAULT (1 NOT IN (3, 4)) NOT NULL ); CREATE TABLE with_constraints6 ( col_1 bool NOT NULL DEFAULT (5 NOT IN (5, 6)) ); CREATE TABLE test_with_storage_param ( col_1 boolean ) WITH (autovacuum_enabled=true); CREATE TABLE test_with_storage_params ( col_1 boolean ) WITH (autovacuum_enabled=true, vacuum_truncate=false); CREATE TABLE tbl ( -- All forms of character data types listed at: -- https://www.postgresql.org/docs/current/datatype-character.html col_char_varying_unlimited character varying, col_char_varying_limited character varying(50), col_varchar_unlimited varchar, col_varchar_limited varchar(50), col_character_default character, col_character_specified character(50), col_char_default char, col_char_specified character(50), col_text text, -- some types you'll find in pg_catalog col_system_char "char", -- this is NOT the same as unquoted char col_name name ); -- Test out EXCLUDE constraints, as well as other more advanced index parameters on constraints -- from https://www.postgresql.org/docs/15/rangetypes.html: basic usage CREATE TABLE reservation ( during tsrange, EXCLUDE USING gist (during WITH &&) ); CREATE TABLE room_reservation ( room text, during tsrange, EXCLUDE USING gist (room WITH =, during WITH &&) ); -- all the gnarly options: not every option is valid, but this will parse successfully on PG 15. CREATE TABLE no_using ( field text, EXCLUDE (field WITH =) NOT DEFERRABLE INITIALLY IMMEDIATE NO INHERIT ); CREATE TABLE many_options ( field text, EXCLUDE USING gist ( one WITH =, nulls_opclass nulls WITH =, nulls_last NULLS LAST WITH =, two COLLATE "en-US" opclass (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC) ASC NULLS FIRST WITH =, (two + 5) WITH =, myfunc(a, b) WITH =, myfunc_opclass(a, b) fop (opt=1, foo=2) WITH =, only_opclass opclass WITH =, desc_order DESC WITH = ) INCLUDE (a, b) WITH (idx_num = 5, idx_str = 'idx_value', idx_kw=DESC) USING INDEX TABLESPACE tblspc WHERE (field != 'def') DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE example_table () INHERITS (parent_table); sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_table.yml000066400000000000000000001766611451700765000250140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8df5f4f21b70c5cf33fa4443295289fdd156cf3290c0f437389f5906a0e9f983 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: counters - bracketed: start_bracket: ( column_reference: naked_identifier: my_type data_type: naked_identifier: public dot: . data_type_identifier: MY_TYPE end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: name comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "''" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: array_int - bracketed: start_bracket: ( column_reference: naked_identifier: vector data_type: - keyword: int - start_square_bracket: '[' - end_square_bracket: ']' - start_square_bracket: '[' - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: did comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' end_bracket: ) - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - column_reference: naked_identifier: long_varying - data_type: - keyword: char - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: con1 - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: did - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '100' - binary_operator: AND - column_reference: naked_identifier: name - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - quoted_literal: "''" end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: did end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: keyword: DEFAULT quoted_literal: "'Luso Films'" - comma: ',' - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: nextval bracketed: start_bracket: ( expression: quoted_literal: "'distributors_serial'" end_bracket: ) - comma: ',' - column_reference: naked_identifier: modtime - data_type: datetime_type_identifier: keyword: timestamp - column_constraint_segment: keyword: DEFAULT bare_function: current_timestamp - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: no_null - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: keyword: UNIQUE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: name end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: distributors - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: name end_bracket: ) index_parameters: keyword: WITH definition_parameters: bracketed: start_bracket: ( definition_parameter: properties_naked_identifier: fillfactor comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' end_bracket: ) - end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: fillfactor comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cinemas - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: serial - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: text - comma: ',' - column_reference: naked_identifier: location - data_type: keyword: text - end_bracket: ) - keyword: TABLESPACE - tablespace_reference: naked_identifier: diskvol1 - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: employee_type - keyword: AS - bracketed: - start_bracket: ( - word: name - word: text - comma: ',' - word: salary - word: numeric - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: employees - keyword: OF - parameter: employee_type - bracketed: - start_bracket: ( - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: name end_bracket: ) - comma: ',' - column_reference: naked_identifier: salary - keyword: WITH - keyword: OPTIONS - column_constraint_segment: keyword: DEFAULT numeric_literal: '1000' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement - bracketed: - start_bracket: ( - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: logdate end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_year_month - bracketed: - start_bracket: ( - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: - start_bracket: ( - function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: YEAR keyword: FROM expression: column_reference: naked_identifier: logdate end_bracket: ) - comma: ',' - function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: MONTH keyword: FROM expression: column_reference: naked_identifier: logdate end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities - bracketed: - start_bracket: ( - column_reference: naked_identifier: city_id - data_type: keyword: bigserial - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: text - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: population - data_type: keyword: bigint - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: LIST - bracketed: start_bracket: ( function: function_name: function_name_identifier: left bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: lower bracketed: start_bracket: ( expression: column_reference: naked_identifier: name end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_reference: naked_identifier: order_id - data_type: keyword: bigint - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: cust_id - data_type: keyword: bigint - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_reference: naked_identifier: status - data_type: keyword: text - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: HASH - bracketed: start_bracket: ( column_reference: naked_identifier: order_id end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_y2016m07 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement - bracketed: start_bracket: ( column_reference: naked_identifier: unitsales column_constraint_segment: keyword: DEFAULT numeric_literal: '0' end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: quoted_literal: "'2016-07-01'" end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: quoted_literal: "'2016-08-01'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_ym_older - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement_year_month - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: MINVALUE - comma: ',' - expression: column_reference: naked_identifier: MINVALUE - end_bracket: ) - keyword: TO - bracketed: - start_bracket: ( - expression: numeric_literal: '2016' - comma: ',' - expression: numeric_literal: '11' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_ym_y2016m11 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement_year_month - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: - start_bracket: ( - expression: numeric_literal: '2016' - comma: ',' - expression: numeric_literal: '11' - end_bracket: ) - keyword: TO - bracketed: - start_bracket: ( - expression: numeric_literal: '2016' - comma: ',' - expression: numeric_literal: '12' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_ym_y2016m12 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement_year_month - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: - start_bracket: ( - expression: numeric_literal: '2016' - comma: ',' - expression: numeric_literal: '12' - end_bracket: ) - keyword: TO - bracketed: - start_bracket: ( - expression: numeric_literal: '2017' - comma: ',' - expression: numeric_literal: '01' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement_ym_y2017m01 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: measurement_year_month - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: - start_bracket: ( - expression: numeric_literal: '2017' - comma: ',' - expression: numeric_literal: '01' - end_bracket: ) - keyword: TO - bracketed: - start_bracket: ( - expression: numeric_literal: '2017' - comma: ',' - expression: numeric_literal: '02' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities_ab - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: cities - bracketed: start_bracket: ( table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: city_id_nonzero - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: city_id comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities_ab - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: cities - bracketed: start_bracket: ( table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: city_id_nonzero - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: city_id comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) end_bracket: ) - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: IN bracketed: - start_bracket: ( - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: population end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities_ab_10000_to_100000 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: cities_ab - keyword: FOR - keyword: VALUES - partition_bound_spec: - keyword: FROM - bracketed: start_bracket: ( expression: numeric_literal: '10000' end_bracket: ) - keyword: TO - bracketed: start_bracket: ( expression: numeric_literal: '100000' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_p1 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: orders - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '0' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_p2 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: orders - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_p3 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: orders - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_p4 - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: orders - keyword: FOR - keyword: VALUES - partition_bound_spec: keyword: WITH bracketed: - start_bracket: ( - keyword: MODULUS - numeric_literal: '4' - comma: ',' - keyword: REMAINDER - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: cities_partdef - keyword: PARTITION - keyword: OF - table_reference: naked_identifier: cities - keyword: DEFAULT - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: UNLOGGED - keyword: TABLE - table_reference: naked_identifier: staging - bracketed: - start_bracket: ( - column_reference: naked_identifier: event_type - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: event_time - data_type: datetime_type_identifier: keyword: TIMESTAMP - comma: ',' - column_reference: naked_identifier: user_email - data_type: keyword: VARCHAR - comma: ',' - column_reference: naked_identifier: phone_number - data_type: keyword: VARCHAR - comma: ',' - column_reference: naked_identifier: processing_date - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: event_type - comma: ',' - column_reference: naked_identifier: event_time - comma: ',' - column_reference: naked_identifier: user_email - comma: ',' - column_reference: naked_identifier: phone_number - comma: ',' - column_reference: naked_identifier: processing_date - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: measurement - bracketed: - start_bracket: ( - column_reference: naked_identifier: city_id - data_type: keyword: int - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: logdate - data_type: datetime_type_identifier: keyword: date - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: peaktemp - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: unitsales - data_type: keyword: int - end_bracket: ) - keyword: PARTITION - keyword: BY - keyword: RANGE - bracketed: start_bracket: ( column_reference: naked_identifier: logdate end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: public - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: serial - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: name - data_type: keyword: text - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: group_name - data_type: keyword: text - column_constraint_segment: keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: cluster_id - data_type: keyword: int8 - column_constraint_segment: keyword: 'NULL' - comma: ',' - column_reference: naked_identifier: date_created - data_type: datetime_type_identifier: keyword: timestamp - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP - comma: ',' - column_reference: naked_identifier: date_updated - data_type: datetime_type_identifier: keyword: timestamp - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP - comma: ',' - column_reference: naked_identifier: operation_id - data_type: keyword: int4 - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT cast_expression: quoted_literal: "'-1'" casting_operator: '::' data_type: keyword: integer - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: main - dot: . - naked_identifier: test_table - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '"col1"' - data_type: - keyword: character - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '40' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_reference: quoted_identifier: '"col2"' - data_type: - keyword: double - keyword: precision - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: groups - bracketed: - start_bracket: ( - column_reference: naked_identifier: group_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: generated - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_reference: naked_identifier: user_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: generated - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - comma: ',' - column_reference: naked_identifier: group_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: groups - bracketed: start_bracket: ( column_reference: naked_identifier: group_id end_bracket: ) - keyword: 'ON' - keyword: DELETE - keyword: CASCADE - comma: ',' - column_reference: naked_identifier: domain_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: groups - bracketed: start_bracket: ( column_reference: naked_identifier: group_id end_bracket: ) - keyword: 'ON' - keyword: UPDATE - keyword: RESTRICT - comma: ',' - column_reference: naked_identifier: other_id - data_type: keyword: INTEGER - column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: groups - bracketed: start_bracket: ( column_reference: naked_identifier: group_id end_bracket: ) - keyword: MATCH - keyword: SIMPLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: keyword: bigint - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: NEXTVAL bracketed: start_bracket: ( expression: cast_expression: quoted_literal: "'orders_id_seq'" casting_operator: '::' data_type: data_type_identifier: regclass end_bracket: ) - comma: ',' - column_reference: naked_identifier: constraint_collate_constraints - data_type: keyword: text - column_constraint_segment: keyword: UNIQUE - keyword: COLLATE - collation_reference: naked_identifier: numeric - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: constraints_collate - data_type: keyword: text - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: UNIQUE - keyword: COLLATE - collation_reference: naked_identifier: numeric - comma: ',' - column_reference: naked_identifier: collate_constraints - data_type: keyword: text - keyword: COLLATE - collation_reference: naked_identifier: numeric - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: nulls_distinct - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - comma: ',' - column_reference: naked_identifier: nulls_not_distinct - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: NOT - keyword: DISTINCT - comma: ',' - column_reference: naked_identifier: everything - data_type: keyword: text - column_constraint_segment: - keyword: UNIQUE - keyword: NULLS - keyword: DISTINCT - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspace - keyword: COLLATE - collation_reference: naked_identifier: numeric - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: primary_key_options - bracketed: - start_bracket: ( - column_reference: naked_identifier: everything - data_type: keyword: int - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: arg1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - comma: ',' - definition_parameter: properties_naked_identifier: arg5 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspace - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: quotas - dot: . - naked_identifier: usage - bracketed: start_bracket: ( column_reference: naked_identifier: foo data_type: keyword: int end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: quotas - dot: . - naked_identifier: my_table - bracketed: start_bracket: ( column_reference: naked_identifier: usage data_type: keyword: int end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: boolean - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: boolean - column_constraint_segment: keyword: DEFAULT boolean_literal: 'false' - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints3 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: int - column_constraint_segment: keyword: DEFAULT expression: - bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' end_bracket: ) - binary_operator: '*' - bracketed: start_bracket: ( expression: - numeric_literal: '3' - binary_operator: + - numeric_literal: '4' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints33 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: int - column_constraint_segment: keyword: DEFAULT expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' - binary_operator: + - numeric_literal: '4' - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints4 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: int - column_constraint_segment: keyword: DEFAULT expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' - binary_operator: + - numeric_literal: '4' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints5 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: bool - column_constraint_segment: keyword: DEFAULT expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - keyword: NOT - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '4' - end_bracket: ) end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: with_constraints6 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_1 - data_type: keyword: bool - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT expression: bracketed: start_bracket: ( expression: - numeric_literal: '5' - keyword: NOT - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_with_storage_param - bracketed: start_bracket: ( column_reference: naked_identifier: col_1 data_type: keyword: boolean end_bracket: ) - keyword: WITH - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: autovacuum_enabled comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_with_storage_params - bracketed: start_bracket: ( column_reference: naked_identifier: col_1 data_type: keyword: boolean end_bracket: ) - keyword: WITH - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: autovacuum_enabled comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' - comma: ',' - relation_option: properties_naked_identifier: vacuum_truncate comparison_operator: raw_comparison_operator: '=' boolean_literal: 'false' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tbl - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_char_varying_unlimited - data_type: - keyword: character - keyword: varying - comma: ',' - column_reference: naked_identifier: col_char_varying_limited - data_type: - keyword: character - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col_varchar_unlimited - data_type: keyword: varchar - comma: ',' - column_reference: naked_identifier: col_varchar_limited - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col_character_default - data_type: keyword: character - comma: ',' - column_reference: naked_identifier: col_character_specified - data_type: keyword: character bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col_char_default - data_type: keyword: char - comma: ',' - column_reference: naked_identifier: col_char_specified - data_type: keyword: character bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col_text - data_type: keyword: text - comma: ',' - column_reference: naked_identifier: col_system_char - data_type: quoted_identifier: '"char"' - comma: ',' - column_reference: naked_identifier: col_name - data_type: data_type_identifier: name - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: reservation - bracketed: start_bracket: ( column_reference: naked_identifier: during data_type: keyword: tsrange comma: ',' table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: start_bracket: ( exclusion_constraint_element: index_element: column_reference: naked_identifier: during keyword: WITH comparison_operator: - ampersand: '&' - ampersand: '&' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: room_reservation - bracketed: - start_bracket: ( - column_reference: naked_identifier: room - data_type: keyword: text - comma: ',' - column_reference: naked_identifier: during - data_type: keyword: tsrange - comma: ',' - table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: - start_bracket: ( - exclusion_constraint_element: index_element: column_reference: naked_identifier: room keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: during keyword: WITH comparison_operator: - ampersand: '&' - ampersand: '&' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: no_using - bracketed: start_bracket: ( column_reference: naked_identifier: field data_type: keyword: text comma: ',' table_constraint: - keyword: EXCLUDE - bracketed: start_bracket: ( exclusion_constraint_element: index_element: column_reference: naked_identifier: field keyword: WITH comparison_operator: raw_comparison_operator: '=' end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - keyword: 'NO' - keyword: INHERIT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: many_options - bracketed: start_bracket: ( column_reference: naked_identifier: field data_type: keyword: text comma: ',' table_constraint: - keyword: EXCLUDE - keyword: USING - index_access_method: naked_identifier: gist - bracketed: - start_bracket: ( - exclusion_constraint_element: index_element: column_reference: naked_identifier: one keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: nulls_opclass index_element_options: operator_class_reference: naked_identifier: nulls keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: nulls_last index_element_options: - keyword: NULLS - keyword: LAST keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: two index_element_options: - keyword: COLLATE - collation_reference: quoted_identifier: '"en-US"' - operator_class_reference: naked_identifier: opclass - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - keyword: ASC - keyword: NULLS - keyword: FIRST keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: bracketed: start_bracket: ( expression: column_reference: naked_identifier: two binary_operator: + numeric_literal: '5' end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: function: function_name: function_name_identifier: myfunc bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: function: function_name: function_name_identifier: myfunc_opclass bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) index_element_options: operator_class_reference: naked_identifier: fop relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - relation_option: properties_naked_identifier: foo comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - end_bracket: ) keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: only_opclass index_element_options: operator_class_reference: naked_identifier: opclass keyword: WITH comparison_operator: raw_comparison_operator: '=' - comma: ',' - exclusion_constraint_element: index_element: column_reference: naked_identifier: desc_order index_element_options: keyword: DESC keyword: WITH comparison_operator: raw_comparison_operator: '=' - end_bracket: ) - index_parameters: - keyword: INCLUDE - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - end_bracket: ) - keyword: WITH - definition_parameters: bracketed: - start_bracket: ( - definition_parameter: properties_naked_identifier: idx_num comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - definition_parameter: properties_naked_identifier: idx_str comparison_operator: raw_comparison_operator: '=' quoted_literal: "'idx_value'" - comma: ',' - definition_parameter: - properties_naked_identifier: idx_kw - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: DESC - end_bracket: ) - keyword: USING - keyword: INDEX - keyword: TABLESPACE - tablespace_reference: naked_identifier: tblspc - keyword: WHERE - bracketed: start_bracket: ( expression: column_reference: naked_identifier: field comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'def'" end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: example_table - bracketed: start_bracket: ( end_bracket: ) - keyword: INHERITS - bracketed: start_bracket: ( table_reference: naked_identifier: parent_table end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_table_as.sql000066400000000000000000000035001451700765000254520ustar00rootroot00000000000000CREATE TEMP TABLE t1 AS ( SELECT something FROM t2 ); CREATE TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE TEMPORARY TABLE t1 AS SELECT something FROM t2 ; CREATE TABLE t1 AS ( SELECT something FROM t2 ); CREATE TABLE t1 AS SELECT something FROM t2 ; CREATE TABLE IF NOT EXISTS t1 AS SELECT something FROM t2 ; CREATE TABLE t1 ON COMMIT DELETE ROWS AS SELECT something FROM t2 ; CREATE TABLE t1 ON COMMIT PRESERVE ROWS AS SELECT something FROM t2 ; CREATE TABLE t1 ON COMMIT DROP AS SELECT something FROM t2 ; CREATE TABLE t1 AS ( SELECT something FROM t2 ) WITH NO DATA ; CREATE TABLE t1 AS SELECT something FROM t2 WITH NO DATA ; CREATE TABLE t1 AS ( SELECT something FROM t2 ) WITH DATA ; CREATE TABLE t1 AS SELECT something FROM t2 WITH DATA ; CREATE UNLOGGED TABLE t1 AS SELECT something FROM t2 ; CREATE GLOBAL TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE LOCAL TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE TABLE t1 USING method AS SELECT something FROM t2 ; CREATE TABLE t1 WITHOUT OIDS AS SELECT something FROM t2 ; CREATE TABLE t1 (c1, c2, c3) AS VALUES ('val1', 'val2', 'val3'), ('val4', 'val5', 'val6') ; CREATE TABLE t1 AS TABLE t2 ; CREATE TABLE t1 AS EXECUTE func() ; CREATE TABLE t1 TABLESPACE ts AS SELECT something FROM t2 ; CREATE TABLE t1 WITH (val=70) AS SELECT something FROM t2 ; create temp table t1 with (autovacuum_enabled = true, toast_tuple_target = 123, vacuum_index_cleanup = false) as select column_1 , column_2 , column_3 from tablename; create temp table a_new_table with (appendoptimized = true, compresstype = zstd) as select column_1 , column_2 , column_3 from schema.tablename group by 1, 2, 3; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_table_as.yml000066400000000000000000000431201451700765000254560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1ae3c61fd8e9753e50ab692f5949825c30fa47744146dc30d1fa774fca367f3d file: - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: 'ON' - keyword: COMMIT - keyword: DROP - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: UNLOGGED - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: GLOBAL - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: LOCAL - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: USING - parameter: method - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: WITHOUT - keyword: OIDS - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - keyword: AS - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'val1'" - comma: ',' - expression: quoted_literal: "'val2'" - comma: ',' - expression: quoted_literal: "'val3'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'val4'" - comma: ',' - expression: quoted_literal: "'val5'" - comma: ',' - expression: quoted_literal: "'val6'" - end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - keyword: TABLE - table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: AS - keyword: EXECUTE - function: function_name: function_name_identifier: func bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: TABLESPACE - tablespace_reference: naked_identifier: ts - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - keyword: WITH - bracketed: start_bracket: ( parameter: val comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: create - keyword: temp - keyword: table - table_reference: naked_identifier: t1 - keyword: with - bracketed: - start_bracket: ( - parameter: autovacuum_enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: toast_tuple_target - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '123' - comma: ',' - parameter: vacuum_index_cleanup - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - end_bracket: ) - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: column_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tablename - statement_terminator: ; - statement: create_table_as_statement: - keyword: create - keyword: temp - keyword: table - table_reference: naked_identifier: a_new_table - keyword: with - bracketed: - start_bracket: ( - parameter: appendoptimized - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comma: ',' - parameter: compresstype - comparison_operator: raw_comparison_operator: '=' - naked_identifier: zstd - end_bracket: ) - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: column_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_2 - comma: ',' - select_clause_element: column_reference: naked_identifier: column_3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: schema - dot: . - naked_identifier: tablename groupby_clause: - keyword: group - keyword: by - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - comma: ',' - numeric_literal: '3' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_trigger.sql000066400000000000000000000045021451700765000253460ustar00rootroot00000000000000CREATE TRIGGER foo AFTER INSERT ON bar EXECUTE FUNCTION proc(args); CREATE TRIGGER foo BEFORE INSERT on bar EXECUTE FUNCTION proc(args); CREATE TRIGGER foo AFTER UPDATE OF bar, baz ON bar EXECUTE FUNCTION proc(args); CREATE TRIGGER foo INSTEAD OF DELETE ON bar FROM baz DEFERRABLE INITIALLY DEFERRED FOR EACH STATEMENT EXECUTE FUNCTION proc(args); CREATE TRIGGER foo INSTEAD OF DELETE ON bar FROM baz DEFERRABLE INITIALLY DEFERRED FOR EACH STATEMENT EXECUTE FUNCTION schema.proc(args); CREATE TRIGGER foo BEFORE INSERT ON bar WHEN (a=b) EXECUTE FUNCTION proc(args); CREATE OR REPLACE CONSTRAINT TRIGGER foo BEFORE INSERT ON bar EXECUTE FUNCTION proc(args); CREATE TRIGGER foo BEFORE INSERT ON bar REFERENCING OLD TABLE as old_table NEW TABLE AS new_table EXECUTE PROCEDURE proc(args); CREATE TRIGGER check_update BEFORE INSERT OR UPDATE ON accounts FOR EACH ROW EXECUTE FUNCTION check_account_update(); CREATE OR REPLACE TRIGGER check_update BEFORE UPDATE OF balance, transactions ON accounts FOR EACH ROW EXECUTE FUNCTION check_account_update(); CREATE OR REPLACE TRIGGER check_update BEFORE UPDATE OF balance, transactions OR TRUNCATE ON accounts FOR EACH ROW EXECUTE FUNCTION check_account_update(); CREATE OR REPLACE TRIGGER check_update BEFORE UPDATE OF balance ON accounts FOR EACH ROW EXECUTE FUNCTION check_account_update(); CREATE TRIGGER check_update BEFORE UPDATE ON accounts FOR EACH ROW WHEN (OLD.balance IS DISTINCT FROM NEW.balance) EXECUTE FUNCTION check_account_update(); --CREATE TRIGGER log_update -- AFTER UPDATE ON accounts -- FOR EACH ROW -- WHEN (OLD.* IS DISTINCT FROM NEW.*) -- EXECUTE FUNCTION log_account_update(); CREATE TRIGGER view_insert INSTEAD OF INSERT ON my_view FOR EACH ROW EXECUTE FUNCTION view_insert_row(); CREATE TRIGGER transfer_insert AFTER INSERT ON transfer REFERENCING NEW TABLE AS inserted FOR EACH STATEMENT EXECUTE FUNCTION check_transfer_balances_to_zero(); CREATE TRIGGER paired_items_update AFTER UPDATE ON paired_items REFERENCING NEW TABLE AS newtab OLD TABLE AS oldtab FOR EACH ROW EXECUTE FUNCTION check_matching_pairs(); CREATE TRIGGER log_update AFTER UPDATE ON accounts FOR EACH ROW WHEN (OLD.* IS DISTINCT FROM NEW.*) EXECUTE FUNCTION log_account_update(); sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_trigger.yml000066400000000000000000000313061451700765000253520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 834efd7fb7e37450e536ce2dfe8cd171d0c3f52d1c3749a32167280fdb466071 file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'on' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: AFTER - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: INSTEAD - keyword: OF - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: FROM - table_reference: naked_identifier: baz - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - keyword: FOR - keyword: EACH - keyword: STATEMENT - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: INSTEAD - keyword: OF - keyword: DELETE - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: FROM - table_reference: naked_identifier: baz - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - keyword: FOR - keyword: EACH - keyword: STATEMENT - keyword: EXECUTE - keyword: FUNCTION - function: function_name: naked_identifier: schema dot: . function_name_identifier: proc bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: WHEN - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: CONSTRAINT - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: proc bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: REFERENCING - keyword: OLD - keyword: TABLE - keyword: as - table_reference: naked_identifier: old_table - keyword: NEW - keyword: TABLE - keyword: AS - table_reference: naked_identifier: new_table - keyword: EXECUTE - keyword: PROCEDURE - function: function_name: function_name_identifier: proc bracketed: start_bracket: ( expression: column_reference: naked_identifier: args end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: INSERT - keyword: OR - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: balance - comma: ',' - column_reference: naked_identifier: transactions - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: balance - comma: ',' - column_reference: naked_identifier: transactions - keyword: OR - keyword: TRUNCATE - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: balance - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: check_update - keyword: BEFORE - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: WHEN - bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: OLD - dot: . - naked_identifier: balance - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: - naked_identifier: NEW - dot: . - naked_identifier: balance end_bracket: ) - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_account_update bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: view_insert - keyword: INSTEAD - keyword: OF - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: my_view - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: view_insert_row bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: transfer_insert - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: transfer - keyword: REFERENCING - keyword: NEW - keyword: TABLE - keyword: AS - table_reference: naked_identifier: inserted - keyword: FOR - keyword: EACH - keyword: STATEMENT - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_transfer_balances_to_zero bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: paired_items_update - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: paired_items - keyword: REFERENCING - keyword: NEW - keyword: TABLE - keyword: AS - table_reference: naked_identifier: newtab - keyword: OLD - keyword: TABLE - keyword: AS - table_reference: naked_identifier: oldtab - keyword: FOR - keyword: EACH - keyword: ROW - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: check_matching_pairs bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: log_update - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: FOR - keyword: EACH - keyword: ROW - keyword: WHEN - bracketed: start_bracket: ( expression: - naked_identifier: OLD - dot: . - star: '*' - keyword: IS - keyword: DISTINCT - keyword: FROM - naked_identifier: NEW - dot: . - star: '*' end_bracket: ) - keyword: EXECUTE - keyword: FUNCTION - function: function_name: function_name_identifier: log_account_update bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_type.sql000066400000000000000000000004501451700765000246620ustar00rootroot00000000000000-- https://www.postgresql.org/docs/current/sql-createtype.html CREATE TYPE foo; CREATE TYPE bar AS ENUM (); CREATE TYPE bar AS ENUM ('foo', 'bar'); CREATE TYPE foobar AS RANGE (SUBTYPE = FLOAT); CREATE TYPE barbar AS (INPUT = foo, OUTPUT = bar); CREATE TYPE foofoo AS (foo varchar collate utf8); sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_type.yml000066400000000000000000000042411451700765000246660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d3954998d4806cba845c8ef75e0800508e4d47788ba7ef64ae82c03aebd82dc0 file: - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: foo - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: bar - keyword: AS - keyword: ENUM - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: bar - keyword: AS - keyword: ENUM - bracketed: - start_bracket: ( - single_quote: "'foo'" - comma: ',' - single_quote: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: foobar - keyword: AS - keyword: RANGE - bracketed: - start_bracket: ( - word: SUBTYPE - equals: '=' - word: FLOAT - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: barbar - keyword: AS - bracketed: - start_bracket: ( - word: INPUT - equals: '=' - word: foo - comma: ',' - word: OUTPUT - equals: '=' - word: bar - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: foofoo - keyword: AS - bracketed: - start_bracket: ( - word: foo - word: varchar - word: collate - word: utf8 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_user_mapping.sql000066400000000000000000000004021451700765000263670ustar00rootroot00000000000000CREATE USER MAPPING FOR bob SERVER foo OPTIONS (user 'bob', password 'secret'); CREATE USER MAPPING IF NOT EXISTS FOR PUBLIC SERVER foo; CREATE USER MAPPING IF NOT EXISTS FOR CURRENT_USER SERVER foo OPTIONS (user 'bob', password 'secret', option 'value'); sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_user_mapping.yml000066400000000000000000000035501451700765000264000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 76adf061fe695abd6daa2ca9aed64cea78ad3a29153b04b85a6bd6e542d78bea file: - statement: create_user_mapping_statement: - keyword: CREATE - keyword: USER - keyword: MAPPING - keyword: FOR - naked_identifier: bob - keyword: SERVER - server_reference: naked_identifier: foo - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: user - quoted_literal: "'bob'" - comma: ',' - naked_identifier_all: password - quoted_literal: "'secret'" - end_bracket: ) - statement_terminator: ; - statement: create_user_mapping_statement: - keyword: CREATE - keyword: USER - keyword: MAPPING - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: FOR - naked_identifier: PUBLIC - keyword: SERVER - server_reference: naked_identifier: foo - statement_terminator: ; - statement: create_user_mapping_statement: - keyword: CREATE - keyword: USER - keyword: MAPPING - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: FOR - naked_identifier: CURRENT_USER - keyword: SERVER - server_reference: naked_identifier: foo - keyword: OPTIONS - bracketed: - start_bracket: ( - naked_identifier_all: user - quoted_literal: "'bob'" - comma: ',' - naked_identifier_all: password - quoted_literal: "'secret'" - comma: ',' - naked_identifier_all: option - quoted_literal: "'value'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_view.sql000066400000000000000000000036451451700765000246640ustar00rootroot00000000000000CREATE VIEW vista AS SELECT 'Hello World'; CREATE OR REPLACE VIEW vista AS SELECT 'Hello World'; CREATE VIEW vista AS SELECT text 'Hello World' AS hello; CREATE TEMP VIEW vista AS SELECT text 'Hello World' AS hello; CREATE TEMPORARY VIEW vista AS SELECT text 'Hello World' AS hello; CREATE VIEW comedies AS SELECT * FROM films WHERE kind = 'Comedy'; CREATE VIEW pg_comedies AS VALUES (1, 'one'), (2, 'two'), (3, 'three') WITH LOCAL CHECK OPTION; CREATE VIEW pg_comedies AS SELECT * FROM comedies WHERE classification = 'PG' WITH CASCADED CHECK OPTION; create view foo with (security_invoker) as select 1; create view foo with (security_barrier) as select 1; create view foo with (security_invoker=BOOLEAN) as select 1; create view foo with (security_barrier=BOOLEAN) as select 1; create view foo with (check_option=local) as select * from OTHER_VIEW; create view foo with (check_option=cascaded) as select * from OTHER_VIEW; create view foo with (opt1, opt2=5, opt3='str', ns.opt4, ns.opt5=6, ns.opt6='str', opt7=ASC) as select 1; create view foo as select * from OTHER_VIEW with local check option; create view foo as select * from OTHER_VIEW with cascaded check option; CREATE OR REPLACE RECURSIVE VIEW "grouping_node" ( "node_id", "ancestors", "category_id", "path", "path_nodes" ) AS SELECT "group_id" AS "node_id", ARRAY[]::INTEGER[] AS "ancestors", "category_id", ARRAY["name"]::text[] AS "path", ARRAY["group_id"]::INTEGER[] AS "path_nodes" FROM "grouping_managementgroup" WHERE "parent_id" IS NULL UNION ALL SELECT "group_id", "ancestors" || "parent_id", "grouping_node"."category_id", "path" || "name"::text, "path_nodes" || "group_id" FROM "grouping_managementgroup", "grouping_node" WHERE "parent_id" = "node_id"; -- use of collation as non-reserved keyword create view foo as select col1 as collation from OTHER_VIEW; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_view.yml000066400000000000000000000466201451700765000246660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 75ecad5fbe335560df4e2eb042a1f1a6e4ead7fb50e681f3004acf18f06cdd1e file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Hello World'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Hello World'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: keyword: text quoted_literal: "'Hello World'" alias_expression: keyword: AS naked_identifier: hello - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMP - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: keyword: text quoted_literal: "'Hello World'" alias_expression: keyword: AS naked_identifier: hello - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: VIEW - table_reference: naked_identifier: vista - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: expression: data_type: keyword: text quoted_literal: "'Hello World'" alias_expression: keyword: AS naked_identifier: hello - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: comedies - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: films where_clause: keyword: WHERE expression: column_reference: naked_identifier: kind comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Comedy'" - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: pg_comedies - keyword: AS - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'one'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'two'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'three'" - end_bracket: ) - with_check_option: - keyword: WITH - keyword: LOCAL - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: pg_comedies - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: comedies where_clause: keyword: WHERE expression: column_reference: naked_identifier: classification comparison_operator: raw_comparison_operator: '=' quoted_literal: "'PG'" - with_check_option: - keyword: WITH - keyword: CASCADED - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: security_invoker end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: properties_naked_identifier: security_barrier end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: security_invoker - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: BOOLEAN end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: security_barrier - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: BOOLEAN end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: check_option - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: local end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: start_bracket: ( relation_option: - properties_naked_identifier: check_option - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: cascaded end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: with - relation_options: bracketed: - start_bracket: ( - relation_option: properties_naked_identifier: opt1 - comma: ',' - relation_option: properties_naked_identifier: opt2 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - comma: ',' - relation_option: properties_naked_identifier: opt3 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt4 - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt5 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - comma: ',' - relation_option: - properties_naked_identifier: ns - dot: . - properties_naked_identifier: opt6 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'str'" - comma: ',' - relation_option: - properties_naked_identifier: opt7 - comparison_operator: raw_comparison_operator: '=' - properties_naked_identifier: ASC - end_bracket: ) - keyword: as - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - with_check_option: - keyword: with - keyword: local - keyword: check - keyword: option - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: as - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - with_check_option: - keyword: with - keyword: cascaded - keyword: check - keyword: option - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: RECURSIVE - keyword: VIEW - table_reference: quoted_identifier: '"grouping_node"' - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '"node_id"' - comma: ',' - column_reference: quoted_identifier: '"ancestors"' - comma: ',' - column_reference: quoted_identifier: '"category_id"' - comma: ',' - column_reference: quoted_identifier: '"path"' - comma: ',' - column_reference: quoted_identifier: '"path_nodes"' - end_bracket: ) - keyword: AS - set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '"group_id"' alias_expression: keyword: AS quoted_identifier: '"node_id"' - comma: ',' - select_clause_element: expression: cast_expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' end_square_bracket: ']' casting_operator: '::' data_type: keyword: INTEGER start_square_bracket: '[' end_square_bracket: ']' alias_expression: keyword: AS quoted_identifier: '"ancestors"' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"category_id"' - comma: ',' - select_clause_element: expression: cast_expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' column_reference: quoted_identifier: '"name"' end_square_bracket: ']' casting_operator: '::' data_type: keyword: text start_square_bracket: '[' end_square_bracket: ']' alias_expression: keyword: AS quoted_identifier: '"path"' - comma: ',' - select_clause_element: expression: cast_expression: typed_array_literal: array_type: keyword: ARRAY array_literal: start_square_bracket: '[' column_reference: quoted_identifier: '"group_id"' end_square_bracket: ']' casting_operator: '::' data_type: keyword: INTEGER start_square_bracket: '[' end_square_bracket: ']' alias_expression: keyword: AS quoted_identifier: '"path_nodes"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"grouping_managementgroup"' where_clause: keyword: WHERE expression: column_reference: quoted_identifier: '"parent_id"' keyword: IS null_literal: 'NULL' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '"group_id"' - comma: ',' - select_clause_element: expression: - column_reference: quoted_identifier: '"ancestors"' - binary_operator: - pipe: '|' - pipe: '|' - column_reference: quoted_identifier: '"parent_id"' - comma: ',' - select_clause_element: column_reference: - quoted_identifier: '"grouping_node"' - dot: . - quoted_identifier: '"category_id"' - comma: ',' - select_clause_element: expression: column_reference: quoted_identifier: '"path"' binary_operator: - pipe: '|' - pipe: '|' cast_expression: column_reference: quoted_identifier: '"name"' casting_operator: '::' data_type: keyword: text - comma: ',' - select_clause_element: expression: - column_reference: quoted_identifier: '"path_nodes"' - binary_operator: - pipe: '|' - pipe: '|' - column_reference: quoted_identifier: '"group_id"' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"grouping_managementgroup"' - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"grouping_node"' where_clause: keyword: WHERE expression: - column_reference: quoted_identifier: '"parent_id"' - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '"node_id"' - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: foo - keyword: as - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: col1 alias_expression: keyword: as naked_identifier: collation from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: OTHER_VIEW - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_zero_argument_function.sql000066400000000000000000000001241451700765000304650ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION a() RETURNS integer AS $$ SELECT 1; $$ LANGUAGE SQL; sqlfluff-2.3.5/test/fixtures/dialects/postgres/create_zero_argument_function.yml000066400000000000000000000016731451700765000305010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 16714c14ab92e8d16180df1b9b8947ff24806a84991014788994ceba5835921f file: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: a - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: integer - function_definition: keyword: AS quoted_literal: "$$\n SELECT 1;\n$$" language_clause: keyword: LANGUAGE naked_identifier: SQL statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/cte_in_materialized_view.sql000066400000000000000000000010211451700765000273760ustar00rootroot00000000000000CREATE MATERIALIZED VIEW public.mv_sales TABLESPACE pg_default AS WITH regional_sales AS ( SELECT region, SUM(amount) AS total_sales FROM orders GROUP BY region ), top_regions AS ( SELECT region FROM regional_sales WHERE total_sales > (SELECT SUM(total_sales) / 10 FROM regional_sales) ) SELECT region, product, SUM(quantity) AS product_units, SUM(amount) AS product_sales FROM orders WHERE region IN (SELECT region FROM top_regions) GROUP BY region, product WITH DATA; sqlfluff-2.3.5/test/fixtures/dialects/postgres/cte_in_materialized_view.yml000066400000000000000000000154271451700765000274170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ed863caa9a7eab7e2c71c52f49b316faee0ebd9cc2f9407ba9fa7d343d2385e file: statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: public - dot: . - naked_identifier: mv_sales - keyword: TABLESPACE - tablespace_reference: naked_identifier: pg_default - keyword: AS - with_compound_statement: - keyword: WITH - common_table_expression: naked_identifier: regional_sales keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) alias_expression: keyword: AS naked_identifier: total_sales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: region end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: top_regions keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: region from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: regional_sales where_clause: keyword: WHERE expression: column_reference: naked_identifier: total_sales comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: expression: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: total_sales end_bracket: ) binary_operator: / numeric_literal: '10' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: regional_sales end_bracket: ) end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: product - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: product_units - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) alias_expression: keyword: AS naked_identifier: product_sales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: WHERE expression: column_reference: naked_identifier: region keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: region from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: top_regions end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: region - comma: ',' - column_reference: naked_identifier: product - with_data_clause: - keyword: WITH - keyword: DATA statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/datatypes.sql000066400000000000000000000042101451700765000243520ustar00rootroot00000000000000create table a( a smallint, b integer, ba int2, bb int4, bc int8, bd int, c bigint, d real, e double precision, f smallserial, g serial, ga serial2, gb serial4, gc serial8, h bigserial ); create table b( a float, b float(24), c float4, e float8 ); create table c( a numeric, aa decimal, b numeric(7), ba decimal(7), c numeric(7,2), ca decimal(7,2) ); create table d( a money ); create table e( a char, b char(7), c character, d character(5), e character varying, f character varying(8), g varchar(9), h varchar, i text ); create table f( a bytea ); create table g( a date, b interval(4), c time(4), d time(4) with time zone, e time(4) without time zone, f timestamp(4), g timestamp(4) with time zone, h timestamp(4) without time zone, i timetz, j timetz(4), k timestamptz, l timestamptz(4) ); create table h( a boolean, b bool ); create table i( a point, b line, c lseg, d box, e path, f polygon, g circle ); create table j( a cidr, b inet, c macaddr, d macaddr8 ); create table k( a bit, b bit(3), c bit varying, d bit varying(5) ); create table l( a pg_lsn ); create table l( a tsvector, b tsquery ); create table m( a uuid ); create table n( a xml ); create table o( a json, b jsonb ); create table p( a integer[], b float[][], c char[1], d jsonb[3][5], e money ARRAY, f money ARRAY[7] ); -- user defined data types CREATE TYPE bar AS ENUM ('foo', 'bar'); create table q( a bar ); -- data type with schema create type public.c AS ENUM ('foo', 'bar'); create table r( a public.c ); -- DATETIME is a valid datatype, but is not a date_time_identifier; it is only -- potentially a user-defined type (i.e. a data_type_identifier). CREATE TABLE a ( b DATE, c DATETIME ); -- from https://github.com/sqlfluff/sqlfluff/issues/2649 SELECT b::DATETIME FROM a; SELECT b, c::DATE FROM a; create table test ( situation bpchar(1) null default 'A'::bpchar ); sqlfluff-2.3.5/test/fixtures/dialects/postgres/datatypes.yml000066400000000000000000000525231451700765000243660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12b835637c480381b3232c23c43d461412fd2f32613bdc3a16137e43b515c598 file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: a - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: smallint - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: ba - data_type: keyword: int2 - comma: ',' - column_reference: naked_identifier: bb - data_type: keyword: int4 - comma: ',' - column_reference: naked_identifier: bc - data_type: keyword: int8 - comma: ',' - column_reference: naked_identifier: bd - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: bigint - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: real - comma: ',' - column_reference: naked_identifier: e - data_type: - keyword: double - keyword: precision - comma: ',' - column_reference: naked_identifier: f - data_type: keyword: smallserial - comma: ',' - column_reference: naked_identifier: g - data_type: keyword: serial - comma: ',' - column_reference: naked_identifier: ga - data_type: keyword: serial2 - comma: ',' - column_reference: naked_identifier: gb - data_type: keyword: serial4 - comma: ',' - column_reference: naked_identifier: gc - data_type: keyword: serial8 - comma: ',' - column_reference: naked_identifier: h - data_type: keyword: bigserial - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: b - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: float - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: float bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '24' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: float4 - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: float8 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: c - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: numeric - comma: ',' - column_reference: naked_identifier: aa - data_type: keyword: decimal - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: numeric bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: ba - data_type: keyword: decimal bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: numeric bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '7' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: ca - data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '7' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: d - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: money end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: e - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: char - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: character - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: character bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: e - data_type: - keyword: character - keyword: varying - comma: ',' - column_reference: naked_identifier: f - data_type: - keyword: character - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '8' end_bracket: ) - comma: ',' - column_reference: naked_identifier: g - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '9' end_bracket: ) - comma: ',' - column_reference: naked_identifier: h - data_type: keyword: varchar - comma: ',' - column_reference: naked_identifier: i - data_type: keyword: text - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: f - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: bytea end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: g - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: datetime_type_identifier: keyword: date - comma: ',' - column_reference: naked_identifier: b - data_type: datetime_type_identifier: keyword: interval bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: datetime_type_identifier: keyword: time bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - comma: ',' - column_reference: naked_identifier: d - data_type: datetime_type_identifier: - keyword: time - bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: with - keyword: time - keyword: zone - comma: ',' - column_reference: naked_identifier: e - data_type: datetime_type_identifier: - keyword: time - bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: without - keyword: time - keyword: zone - comma: ',' - column_reference: naked_identifier: f - data_type: datetime_type_identifier: keyword: timestamp bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - comma: ',' - column_reference: naked_identifier: g - data_type: datetime_type_identifier: - keyword: timestamp - bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: with - keyword: time - keyword: zone - comma: ',' - column_reference: naked_identifier: h - data_type: datetime_type_identifier: - keyword: timestamp - bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - keyword: without - keyword: time - keyword: zone - comma: ',' - column_reference: naked_identifier: i - data_type: datetime_type_identifier: keyword: timetz - comma: ',' - column_reference: naked_identifier: j - data_type: datetime_type_identifier: keyword: timetz bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - comma: ',' - column_reference: naked_identifier: k - data_type: datetime_type_identifier: keyword: timestamptz - comma: ',' - column_reference: naked_identifier: l - data_type: datetime_type_identifier: keyword: timestamptz bracketed: start_bracket: ( numeric_literal: '4' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: h - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: boolean - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: bool - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: i - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: point - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: line - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: lseg - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: box - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: path - comma: ',' - column_reference: naked_identifier: f - data_type: keyword: polygon - comma: ',' - column_reference: naked_identifier: g - data_type: keyword: circle - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: j - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: cidr - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: inet - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: macaddr - comma: ',' - column_reference: naked_identifier: d - data_type: keyword: macaddr8 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: k - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: bit - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: bit bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) - comma: ',' - column_reference: naked_identifier: c - data_type: - keyword: bit - keyword: varying - comma: ',' - column_reference: naked_identifier: d - data_type: - keyword: bit - keyword: varying - bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: l - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: pg_lsn end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: l - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: tsvector - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: tsquery - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: m - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: uuid end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: n - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: keyword: xml end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: o - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: json - comma: ',' - column_reference: naked_identifier: b - data_type: keyword: jsonb - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: p - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - data_type: keyword: integer start_square_bracket: '[' end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: b - data_type: - keyword: float - start_square_bracket: '[' - end_square_bracket: ']' - start_square_bracket: '[' - end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: c - data_type: keyword: char start_square_bracket: '[' expression: numeric_literal: '1' end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: d - data_type: - keyword: jsonb - start_square_bracket: '[' - expression: numeric_literal: '3' - end_square_bracket: ']' - start_square_bracket: '[' - expression: numeric_literal: '5' - end_square_bracket: ']' - comma: ',' - column_reference: naked_identifier: e - data_type: keyword: money array_type: keyword: ARRAY - comma: ',' - column_reference: naked_identifier: f - data_type: keyword: money sized_array_type: array_type: keyword: ARRAY array_accessor: start_square_bracket: '[' numeric_literal: '7' end_square_bracket: ']' - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: bar - keyword: AS - keyword: ENUM - bracketed: - start_bracket: ( - single_quote: "'foo'" - comma: ',' - single_quote: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: q - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: data_type_identifier: bar end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: create - keyword: type - object_reference: - naked_identifier: public - dot: . - naked_identifier: c - keyword: AS - keyword: ENUM - bracketed: - start_bracket: ( - single_quote: "'foo'" - comma: ',' - single_quote: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: r - bracketed: start_bracket: ( column_reference: naked_identifier: a data_type: naked_identifier: public dot: . data_type_identifier: c end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: a - bracketed: - start_bracket: ( - column_reference: naked_identifier: b - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - column_reference: naked_identifier: c - data_type: data_type_identifier: DATETIME - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: data_type_identifier: DATETIME from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: c casting_operator: '::' data_type: datetime_type_identifier: keyword: DATE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: situation - data_type: keyword: bpchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - column_constraint_segment: keyword: 'null' - column_constraint_segment: keyword: default cast_expression: quoted_literal: "'A'" casting_operator: '::' data_type: keyword: bpchar - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/datetime_units.sql000066400000000000000000000017211451700765000253760ustar00rootroot00000000000000SELECT t1.field, EXTRACT(CENTURY FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(DECADE FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(DOW FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(DOY FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(EPOCH FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(ISODOW FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(ISOYEAR FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(MICROSECONDS FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(MILLENNIUM FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(MILLISECONDS FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(TIMEZONE FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(TIMEZONE_HOUR FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(TIMEZONE_MINUTE FROM t1.sometime) AS a FROM t1; sqlfluff-2.3.5/test/fixtures/dialects/postgres/datetime_units.yml000066400000000000000000000320241451700765000254000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7dc3b38c3430f2a5dde1089f1c4be7b91fee8462f6b59ad25341839f8923ef2b file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: CENTURY keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DECADE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DOW keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: DOY keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: EPOCH keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: ISODOW keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: ISOYEAR keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: MICROSECONDS keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: MILLENNIUM keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: MILLISECONDS keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: TIMEZONE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: TIMEZONE_HOUR keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: TIMEZONE_MINUTE keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/delete.sql000066400000000000000000000026521451700765000236260ustar00rootroot00000000000000DELETE FROM films; DELETE FROM ONLY films; DELETE FROM films *; DELETE FROM films AS f; DELETE FROM films f; DELETE FROM films USING producers WHERE producer_id = producers.id AND producers.name = 'foo'; DELETE FROM films AS f USING producers AS p WHERE f.producer_id = p.id AND p.name = 'foo'; DELETE FROM films AS f USING producers AS p, actors AS a WHERE f.producer_id = p.id AND p.name = 'foo' AND f.actor_id = a.id AND a.name = 'joe cool'; DELETE FROM films f USING producers p WHERE f.producer_id = p.id AND p.name = 'foo'; DELETE FROM films f USING producers p, actors a WHERE f.producer_id = p.id AND p.name = 'foo' AND f.actor_id = a.id AND a.name = 'joe cool'; DELETE FROM tasks WHERE CURRENT OF c_tasks; DELETE FROM films WHERE kind <> 'Musical'; DELETE FROM tasks WHERE status = 'DONE' RETURNING *; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id as a_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id a_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id, producer_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id as a_id, producer_id as p_id; DELETE FROM tasks WHERE status = 'DONE' RETURNING actor_id a_id, producer_id p_id; WITH test as (select foo from bar) DELETE FROM films; WITH RECURSIVE t(n) AS ( VALUES (1) UNION ALL SELECT n+1 FROM t WHERE n < 100 ) DELETE FROM films; sqlfluff-2.3.5/test/fixtures/dialects/postgres/delete.yml000066400000000000000000000354141451700765000236320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 807bc630f751ca3385876e9134d7b8d6e4a64f4898fca5dcfe14f3cf656b92b2 file: - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - keyword: ONLY - table_reference: naked_identifier: films - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - star: '*' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: keyword: AS naked_identifier: f - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: naked_identifier: f - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - keyword: USING - table_expression: table_reference: naked_identifier: producers - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: producers - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: producers - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: keyword: AS naked_identifier: f - keyword: USING - table_expression: table_reference: naked_identifier: producers - alias_expression: keyword: AS naked_identifier: p - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: f - dot: . - naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: p - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: keyword: AS naked_identifier: f - keyword: USING - table_expression: table_reference: naked_identifier: producers - alias_expression: keyword: AS naked_identifier: p - comma: ',' - table_expression: table_reference: naked_identifier: actors - alias_expression: keyword: AS naked_identifier: a - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: f - dot: . - naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: p - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - binary_operator: AND - column_reference: - naked_identifier: f - dot: . - naked_identifier: actor_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: a - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'joe cool'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: naked_identifier: f - keyword: USING - table_expression: table_reference: naked_identifier: producers - alias_expression: naked_identifier: p - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: f - dot: . - naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: p - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - alias_expression: naked_identifier: f - keyword: USING - table_expression: table_reference: naked_identifier: producers - alias_expression: naked_identifier: p - comma: ',' - table_expression: table_reference: naked_identifier: actors - alias_expression: naked_identifier: a - where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: f - dot: . - naked_identifier: producer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: p - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - binary_operator: AND - column_reference: - naked_identifier: f - dot: . - naked_identifier: actor_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: a - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'joe cool'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - keyword: WHERE - keyword: CURRENT - keyword: OF - object_reference: naked_identifier: c_tasks - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - where_clause: keyword: WHERE expression: column_reference: naked_identifier: kind comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' quoted_literal: "'Musical'" - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - star: '*' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - alias_expression: keyword: as naked_identifier: a_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - alias_expression: naked_identifier: a_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - comma: ',' - expression: column_reference: naked_identifier: producer_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - alias_expression: keyword: as naked_identifier: a_id - comma: ',' - expression: column_reference: naked_identifier: producer_id - alias_expression: keyword: as naked_identifier: p_id - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: tasks - where_clause: keyword: WHERE expression: column_reference: naked_identifier: status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DONE'" - keyword: RETURNING - expression: column_reference: naked_identifier: actor_id - alias_expression: naked_identifier: a_id - comma: ',' - expression: column_reference: naked_identifier: producer_id - alias_expression: naked_identifier: p_id - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: test keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar end_bracket: ) delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: naked_identifier: n end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) set_operator: - keyword: UNION - keyword: ALL select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: n binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: column_reference: naked_identifier: n comparison_operator: raw_comparison_operator: < numeric_literal: '100' end_bracket: ) - delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: films - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/discard.sql000066400000000000000000000001201451700765000237610ustar00rootroot00000000000000DISCARD ALL; DISCARD PLANS; DISCARD SEQUENCES; DISCARD TEMPORARY; DISCARD TEMP; sqlfluff-2.3.5/test/fixtures/dialects/postgres/discard.yml000066400000000000000000000016411451700765000237740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00eaafbc98c8ec708bf86bddd93c306cd4f60e77c39d8b9d01d8e7aa70b90d7f file: - statement: discard_statement: - keyword: DISCARD - keyword: ALL - statement_terminator: ; - statement: discard_statement: - keyword: DISCARD - keyword: PLANS - statement_terminator: ; - statement: discard_statement: - keyword: DISCARD - keyword: SEQUENCES - statement_terminator: ; - statement: discard_statement: - keyword: DISCARD - keyword: TEMPORARY - statement_terminator: ; - statement: discard_statement: - keyword: DISCARD - keyword: TEMP - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/do.sql000066400000000000000000000032311451700765000227600ustar00rootroot00000000000000-- postgres_do.sql /* Postgres DO statements (https://www.postgresql.org/docs/14/sql-do.html). */ -- From Issue #2018 (https://github.com/sqlfluff/sqlfluff/issues/2018) DO $$DECLARE r record; BEGIN FOR r IN SELECT table_schema, table_name FROM information_schema.tables WHERE table_type = 'VIEW' AND table_schema = 'public' LOOP EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser'; END LOOP; END$$; -- can put language before code block DO LANGUAGE plpgsql $$ DECLARE r record; BEGIN FOR r IN SELECT table_schema, table_name FROM information_schema.tables WHERE table_type = 'VIEW' AND table_schema = 'public' LOOP EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser'; END LOOP; END$$; -- can put language after code block DO $$ DECLARE r record; BEGIN FOR r IN SELECT table_schema, table_name FROM information_schema.tables WHERE table_type = 'VIEW' AND table_schema = 'public' LOOP EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser'; END LOOP; END$$ LANGUAGE plpgsql; -- code block can be any string literal DO E' DECLARE r record; BEGIN FOR r IN SELECT table_schema, table_name FROM information_schema.tables WHERE table_type = \'VIEW\' AND table_schema = \'public\' LOOP EXECUTE \'GRANT ALL ON \' || quote_ident(r.table_schema) || \'.\' || quote_ident(r.table_name) || \' TO webuser\'; END LOOP; END'; DO 'DECLARE r record;'; DO U&'\0441\043B\043E\043D'; DO 'SELECT foo' 'bar'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/do.yml000066400000000000000000000054221451700765000227660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 738c737d70b4d4a8260eaa4d23a5bef445996042f9bac82bbb32bc5ee324e5ad file: - statement: do_statement: keyword: DO quoted_literal: "$$DECLARE r record;\nBEGIN\n FOR r IN SELECT table_schema,\ \ table_name FROM information_schema.tables\n WHERE table_type\ \ = 'VIEW' AND table_schema = 'public'\n LOOP\n EXECUTE 'GRANT ALL\ \ ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name)\ \ || ' TO webuser';\n END LOOP;\nEND$$" - statement_terminator: ; - statement: do_statement: keyword: DO language_clause: keyword: LANGUAGE naked_identifier: plpgsql quoted_literal: "$$\nDECLARE r record;\nBEGIN\n FOR r IN SELECT table_schema,\ \ table_name FROM information_schema.tables\n WHERE table_type\ \ = 'VIEW' AND table_schema = 'public'\n LOOP\n EXECUTE 'GRANT ALL\ \ ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name)\ \ || ' TO webuser';\n END LOOP;\nEND$$" - statement_terminator: ; - statement: do_statement: keyword: DO quoted_literal: "$$\nDECLARE r record;\nBEGIN\n FOR r IN SELECT table_schema,\ \ table_name FROM information_schema.tables\n WHERE table_type\ \ = 'VIEW' AND table_schema = 'public'\n LOOP\n EXECUTE 'GRANT ALL\ \ ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name)\ \ || ' TO webuser';\n END LOOP;\nEND$$" language_clause: keyword: LANGUAGE naked_identifier: plpgsql - statement_terminator: ; - statement: do_statement: keyword: DO quoted_literal: "E'\nDECLARE r record;\nBEGIN\n FOR r IN SELECT table_schema,\ \ table_name FROM information_schema.tables\n WHERE table_type\ \ = \\'VIEW\\' AND table_schema = \\'public\\'\n LOOP\n EXECUTE\ \ \\'GRANT ALL ON \\' || quote_ident(r.table_schema) || \\'.\\' || quote_ident(r.table_name)\ \ || \\' TO webuser\\';\n END LOOP;\nEND'" - statement_terminator: ; - statement: do_statement: keyword: DO quoted_literal: "'DECLARE r record;'" - statement_terminator: ; - statement: do_statement: keyword: DO quoted_literal: "U&'\\0441\\043B\\043E\\043D'" - statement_terminator: ; - statement: do_statement: - keyword: DO - quoted_literal: "'SELECT foo'" - quoted_literal: "'bar'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_cast.sql000066400000000000000000000005071451700765000243370ustar00rootroot00000000000000-- ANSI SQL: DROP CAST (int AS bool); DROP CAST (int AS bool) RESTRICT; DROP CAST (int AS bool) CASCADE; DROP CAST (udt_1 AS udt_2); DROP CAST (sch.udt_1 AS sch.udt_2); -- Additional PG extensions: DROP CAST IF EXISTS (int AS bool); DROP CAST IF EXISTS (int AS bool) RESTRICT; DROP CAST IF EXISTS (int AS bool) CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_cast.yml000066400000000000000000000056141451700765000243450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 797c59758187f686eabaaa295a9e2b392158e11ab37a934556cf024cf468e083 file: - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: RESTRICT - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: CASCADE - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: data_type_identifier: udt_1 - keyword: AS - data_type: data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - bracketed: - start_bracket: ( - data_type: naked_identifier: sch dot: . data_type_identifier: udt_1 - keyword: AS - data_type: naked_identifier: sch dot: . data_type_identifier: udt_2 - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: RESTRICT - statement_terminator: ; - statement: drop_cast_statement: - keyword: DROP - keyword: CAST - keyword: IF - keyword: EXISTS - bracketed: - start_bracket: ( - data_type: keyword: int - keyword: AS - data_type: keyword: bool - end_bracket: ) - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_database.sql000066400000000000000000000002651451700765000251520ustar00rootroot00000000000000DROP DATABASE db; DROP DATABASE db (FORCE); DROP DATABASE db WITH (FORCE); DROP DATABASE IF EXISTS db; DROP DATABASE IF EXISTS db (FORCE); DROP DATABASE IF EXISTS db WITH (FORCE); sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_database.yml000066400000000000000000000035641451700765000251610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d40f9773665452c0ebe432f0dcd4dac3bb66eb3a31b67101aa130727475cb543 file: - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - bracketed: start_bracket: ( keyword: FORCE end_bracket: ) - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: db - keyword: WITH - bracketed: start_bracket: ( keyword: FORCE end_bracket: ) - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: db - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: db - bracketed: start_bracket: ( keyword: FORCE end_bracket: ) - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: db - keyword: WITH - bracketed: start_bracket: ( keyword: FORCE end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_domain.sql000066400000000000000000000000211451700765000246430ustar00rootroot00000000000000DROP DOMAIN box; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_domain.yml000066400000000000000000000010541451700765000246540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 38570fd1de11ce681838552aac7a502c8163cd496475a4639714b51b0b736a48 file: statement: drop_domain_statement: - keyword: DROP - keyword: DOMAIN - object_reference: naked_identifier: box statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_function.sql000066400000000000000000000004171451700765000252320ustar00rootroot00000000000000DROP FUNCTION sqrt (integer); DROP FUNCTION sqrt (integer), sqrt (bigint); DROP FUNCTION update_employee_salaries; DROP FUNCTION update_employee_salaries (); DROP FUNCTION IF EXISTS foo (IN my_var integer, VARIADIC my_var_2 text); DROP FUNCTION IF EXISTS f_name CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_function.yml000066400000000000000000000047011451700765000252340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c558b7e99c57564a274aa6d3dde08cbc81b55855a124d4df7cc3531f991db2ed file: - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - object_reference: naked_identifier: sqrt - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: integer end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - object_reference: naked_identifier: sqrt - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: integer end_bracket: ) - comma: ',' - object_reference: naked_identifier: sqrt - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: bigint end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - object_reference: naked_identifier: update_employee_salaries - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - object_reference: naked_identifier: update_employee_salaries - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: foo - function_parameter_list: bracketed: - start_bracket: ( - keyword: IN - parameter: my_var - data_type: keyword: integer - comma: ',' - keyword: VARIADIC - parameter: my_var_2 - data_type: keyword: text - end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: f_name - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_index.sql000066400000000000000000000011031451700765000245050ustar00rootroot00000000000000DROP INDEX abc; DROP INDEX "abc"; DROP INDEX IF EXISTS abc; DROP INDEX abc, "def", ghi; DROP INDEX IF EXISTS abc, def, ghi; -- Test CASCADE trailing keyword DROP INDEX abc CASCADE; DROP INDEX abc, def, ghi CASCADE; DROP INDEX IF EXISTS abc, def, ghi CASCADE; -- Test RESTRICT trailing keyword DROP INDEX abc RESTRICT; DROP INDEX abc, def, ghi RESTRICT; -- Test CONCURRENTLY DROP INDEX CONCURRENTLY abc; DROP INDEX CONCURRENTLY IF EXISTS abc; DROP INDEX CONCURRENTLY abc, def; DROP INDEX CONCURRENTLY IF EXISTS abc, def; DROP INDEX CONCURRENTLY abc, def CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_index.yml000066400000000000000000000101231451700765000245110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a8bfaf3bfd8572b31cd437d25ca9449d13350a5be6765b90320af9bf1047a214 file: - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: quoted_identifier: '"abc"' - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - comma: ',' - index_reference: quoted_identifier: '"def"' - comma: ',' - index_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - comma: ',' - index_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - comma: ',' - index_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - comma: ',' - index_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - comma: ',' - index_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - index_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - keyword: IF - keyword: EXISTS - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - statement_terminator: ; - statement: drop_index_statement: - keyword: DROP - keyword: INDEX - keyword: CONCURRENTLY - index_reference: naked_identifier: abc - comma: ',' - index_reference: naked_identifier: def - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_materialized_view.sql000066400000000000000000000007561451700765000271170ustar00rootroot00000000000000DROP MATERIALIZED VIEW bar; DROP MATERIALIZED VIEW foo, bar; DROP MATERIALIZED VIEW bar CASCADE; DROP MATERIALIZED VIEW foo, bar CASCADE; DROP MATERIALIZED VIEW bar RESTRICT; DROP MATERIALIZED VIEW foo, bar RESTRICT; DROP MATERIALIZED VIEW IF EXISTS bar; DROP MATERIALIZED VIEW IF EXISTS foo, bar; DROP MATERIALIZED VIEW IF EXISTS bar CASCADE; DROP MATERIALIZED VIEW IF EXISTS foo, bar CASCADE; DROP MATERIALIZED VIEW IF EXISTS bar RESTRICT; DROP MATERIALIZED VIEW IF EXISTS foo, bar RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_materialized_view.yml000066400000000000000000000070421451700765000271140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 363142dc482ee6645284249f7955af56d4238c12b8fac7bdae1b10e5419e0588 file: - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: foo - comma: ',' - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_owned.sql000066400000000000000000000005011451700765000245130ustar00rootroot00000000000000DROP OWNED BY bob; DROP OWNED BY bob, alice; DROP OWNED BY CURRENT_ROLE; DROP OWNED BY CURRENT_USER; DROP OWNED BY SESSION_USER; DROP OWNED BY bob, CURRENT_ROLE, alice, CURRENT_USER, ted; DROP OWNED BY bob CASCADE; DROP OWNED BY bob RESTRICT; DROP OWNED BY bob, alice CASCADE; DROP OWNED BY bob, alice RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_owned.yml000066400000000000000000000050731451700765000245260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f30947e3b468c099a73640d9924d061e48b1c3df0bfbdbef07c4f22810c3ca42 file: - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - role_reference: naked_identifier: alice - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - keyword: CURRENT_ROLE - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - keyword: CURRENT_USER - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - keyword: SESSION_USER - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - keyword: CURRENT_ROLE - comma: ',' - role_reference: naked_identifier: alice - comma: ',' - keyword: CURRENT_USER - comma: ',' - role_reference: naked_identifier: ted - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: CASCADE - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: RESTRICT - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - role_reference: naked_identifier: alice - keyword: CASCADE - statement_terminator: ; - statement: drop_owned_statement: - keyword: DROP - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - role_reference: naked_identifier: alice - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_policy.sql000066400000000000000000000004001451700765000246740ustar00rootroot00000000000000DROP POLICY account_managers ON accounts; DROP POLICY IF EXISTS account_managers ON accounts; DROP POLICY account_managers ON accounts CASCADE; DROP POLICY account_managers ON accounts RESTRICT; DROP POLICY IF EXISTS account_managers ON accounts RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_policy.yml000066400000000000000000000034221451700765000247050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1933526cad5db1a8339d06b992d0fb310d39f4bb001a60484304c512585d481b file: - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - statement_terminator: ; - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - statement_terminator: ; - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: CASCADE - statement_terminator: ; - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: RESTRICT - statement_terminator: ; - statement: drop_policy_statement: - keyword: DROP - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: account_managers - keyword: 'ON' - table_reference: naked_identifier: accounts - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_procedure.sql000066400000000000000000000005671451700765000254030ustar00rootroot00000000000000DROP PROCEDURE do_db_maintenance(); drop procedure insert_actor; drop procedure insert_actor(varchar); drop procedure insert_actor(varchar, varchar); drop procedure delete_actor, update_actor; drop procedure delete_actor, update_actor CASCADE; drop procedure delete_actor(in id varchar); drop procedure insert_actor(varchar, varchar), insert_actor2(varchar, varchar); sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_procedure.yml000066400000000000000000000062671451700765000254100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9412d5773862ec85a0ca1370edef125b7c70bf59d502b73cf9419c88241353e6 file: - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: function_name_identifier: do_db_maintenance - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: insert_actor - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: insert_actor - function_parameter_list: bracketed: start_bracket: ( data_type: keyword: varchar end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: insert_actor - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: varchar - end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: delete_actor - comma: ',' - function_name: function_name_identifier: update_actor - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: delete_actor - comma: ',' - function_name: function_name_identifier: update_actor - keyword: CASCADE - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: delete_actor - function_parameter_list: bracketed: start_bracket: ( keyword: in parameter: id data_type: keyword: varchar end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: drop - keyword: procedure - function_name: function_name_identifier: insert_actor - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: varchar - end_bracket: ) - comma: ',' - function_name: function_name_identifier: insert_actor2 - function_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: varchar - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_publication.sql000066400000000000000000000012431451700765000257140ustar00rootroot00000000000000-- Test no trailing keyword with combinations of: -- * IF EXISTS -- * One publication vs multiple publications. DROP PUBLICATION abc; DROP PUBLICATION "abc"; DROP PUBLICATION IF EXISTS abc; DROP PUBLICATION abc, "def", ghi; DROP PUBLICATION IF EXISTS abc, def, ghi; -- Test CASCADE trailing keyword DROP PUBLICATION abc CASCADE; DROP PUBLICATION IF EXISTS abc CASCADE; DROP PUBLICATION abc, def, ghi CASCADE; DROP PUBLICATION IF EXISTS abc, def, ghi CASCADE; -- Test RESTRICT trailing keyword DROP PUBLICATION abc RESTRICT; DROP PUBLICATION IF EXISTS abc RESTRICT; DROP PUBLICATION abc, def, ghi RESTRICT; DROP PUBLICATION IF EXISTS abc, def, ghi RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_publication.yml000066400000000000000000000077151451700765000257300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41225e922471bc8b5fb87ee6f2ac4199f7c7a5f29a52dd5dee1d99fdd1e16485 file: - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: quoted_identifier: '"abc"' - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: quoted_identifier: '"def"' - comma: ',' - publication_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; - statement: drop_publication_statement: - keyword: DROP - keyword: PUBLICATION - keyword: IF - keyword: EXISTS - publication_reference: naked_identifier: abc - comma: ',' - publication_reference: naked_identifier: def - comma: ',' - publication_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_sequence.sql000066400000000000000000000002141451700765000252100ustar00rootroot00000000000000DROP SEQUENCE foo; DROP SEQUENCE foo.foo; DROP SEQUENCE IF EXISTS foo; DROP SEQUENCE IF EXISTS foo CASCADE; DROP SEQUENCE foo RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_sequence.yml000066400000000000000000000025761451700765000252270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f19977ccbaad4b1cac3168bab6bc93b927fc4d2b68926f3abd0f25258bc1c93 file: - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: - naked_identifier: foo - dot: . - naked_identifier: foo - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - sequence_reference: naked_identifier: foo - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - sequence_reference: naked_identifier: foo - keyword: CASCADE - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: naked_identifier: foo - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_trigger.sql000066400000000000000000000002631451700765000250470ustar00rootroot00000000000000DROP TRIGGER foo ON bar; DROP TRIGGER IF EXISTS foo ON bar; DROP TRIGGER foo ON bar CASCADE; DROP TRIGGER IF EXISTS foo ON bar RESTRICT; DROP TRIGGER if_dist_exists ON films; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_trigger.yml000066400000000000000000000032121451700765000250460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df94df5aa4e31d1c17cd308ad44d4b6c3080f149b07591753693654209d4e6f9 file: - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: bar - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: CASCADE - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: foo - keyword: 'ON' - table_reference: naked_identifier: bar - keyword: RESTRICT - statement_terminator: ; - statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: if_dist_exists - keyword: 'ON' - table_reference: naked_identifier: films - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_type.sql000066400000000000000000000001571451700765000243670ustar00rootroot00000000000000DROP TYPE foo; DROP TYPE IF EXISTS foo; DROP TYPE foo, bar; DROP TYPE foo CASCADE; DROP TYPE foo RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_type.yml000066400000000000000000000024541451700765000243730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ad296a4db75013a8f4ca8ff393984f2190041a834f478fc1f9ec2f7ead8c44b file: - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: foo - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - keyword: IF - keyword: EXISTS - data_type: data_type_identifier: foo - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: foo - comma: ',' - data_type: data_type_identifier: bar - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: foo - keyword: CASCADE - statement_terminator: ; - statement: drop_type_statement: - keyword: DROP - keyword: TYPE - data_type: data_type_identifier: foo - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_view.sql000066400000000000000000000007251451700765000243610ustar00rootroot00000000000000DROP VIEW abc; DROP VIEW "abc"; DROP VIEW IF EXISTS abc; DROP VIEW abc, "def", ghi; DROP VIEW IF EXISTS abc, def, ghi; -- Test CASCADE trailing keyword DROP VIEW abc CASCADE; DROP VIEW IF EXISTS abc CASCADE; DROP VIEW abc, def, ghi CASCADE; DROP VIEW IF EXISTS abc, def, ghi CASCADE; -- Test RESTRICT trailing keyword DROP VIEW abc RESTRICT; DROP VIEW IF EXISTS abc RESTRICT; DROP VIEW abc, def, ghi RESTRICT; DROP VIEW IF EXISTS abc, def, ghi RESTRICT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/drop_view.yml000066400000000000000000000072011451700765000243570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 36b7a96e8518c113ef9d614a8b704ce02aa976d1b69949a8e9f68c287ded1420 file: - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: quoted_identifier: '"abc"' - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - comma: ',' - table_reference: quoted_identifier: '"def"' - comma: ',' - table_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - keyword: CASCADE - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - keyword: RESTRICT - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: abc - comma: ',' - table_reference: naked_identifier: def - comma: ',' - table_reference: naked_identifier: ghi - keyword: RESTRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/escape.sql000066400000000000000000000001301451700765000236110ustar00rootroot00000000000000SELECT E'\''; SELECT E''''; SELECT E'''\''; SELECT E'\\\''''; SELECT E' \\ '' \\'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/escape.yml000066400000000000000000000023741451700765000236270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ed3fa9d320d68f0ea2d61a06cf688da9d62fc4ebe815dbbea99b6c115f985a17 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'''\\''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\\\\\\''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "E'\n\n\\\\\n''\n\\\\'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/explain.sql000066400000000000000000000014001451700765000240120ustar00rootroot00000000000000explain ( analyze true, analyse true, verbose true, costs true, settings true, buffers true, wal true, timing true, summary true, format xml ) select 1; explain ( analyze false, analyse false, verbose false, costs false, settings false, buffers false, wal false, timing false, summary false, format xml ) select 1; explain ( analyze, analyse, verbose, costs, settings, buffers, wal, timing, summary, format xml ) select 1; explain analyze verbose select 1; explain analyse verbose select 1; explain analyze select 1; explain analyse select 1; explain (format text) select 1; explain (format json) select 1; explain (format yaml) select 1; sqlfluff-2.3.5/test/fixtures/dialects/postgres/explain.yml000066400000000000000000000137201451700765000240240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b43be4e2d4d82bc59868e3fea7954af36498598340cdc0d885be35d2da8ecbf5 file: - statement: explain_statement: keyword: explain bracketed: - start_bracket: ( - explain_option: keyword: analyze boolean_literal: 'true' - comma: ',' - explain_option: keyword: analyse boolean_literal: 'true' - comma: ',' - explain_option: keyword: verbose boolean_literal: 'true' - comma: ',' - explain_option: keyword: costs boolean_literal: 'true' - comma: ',' - explain_option: keyword: settings boolean_literal: 'true' - comma: ',' - explain_option: keyword: buffers boolean_literal: 'true' - comma: ',' - explain_option: keyword: wal boolean_literal: 'true' - comma: ',' - explain_option: keyword: timing boolean_literal: 'true' - comma: ',' - explain_option: keyword: summary boolean_literal: 'true' - comma: ',' - explain_option: - keyword: format - keyword: xml - end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: - start_bracket: ( - explain_option: keyword: analyze boolean_literal: 'false' - comma: ',' - explain_option: keyword: analyse boolean_literal: 'false' - comma: ',' - explain_option: keyword: verbose boolean_literal: 'false' - comma: ',' - explain_option: keyword: costs boolean_literal: 'false' - comma: ',' - explain_option: keyword: settings boolean_literal: 'false' - comma: ',' - explain_option: keyword: buffers boolean_literal: 'false' - comma: ',' - explain_option: keyword: wal boolean_literal: 'false' - comma: ',' - explain_option: keyword: timing boolean_literal: 'false' - comma: ',' - explain_option: keyword: summary boolean_literal: 'false' - comma: ',' - explain_option: - keyword: format - keyword: xml - end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: - start_bracket: ( - explain_option: keyword: analyze - comma: ',' - explain_option: keyword: analyse - comma: ',' - explain_option: keyword: verbose - comma: ',' - explain_option: keyword: costs - comma: ',' - explain_option: keyword: settings - comma: ',' - explain_option: keyword: buffers - comma: ',' - explain_option: keyword: wal - comma: ',' - explain_option: keyword: timing - comma: ',' - explain_option: keyword: summary - comma: ',' - explain_option: - keyword: format - keyword: xml - end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: analyze - keyword: verbose - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: analyse - keyword: verbose - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: analyze - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: analyse - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: start_bracket: ( explain_option: - keyword: format - keyword: text end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: start_bracket: ( explain_option: - keyword: format - keyword: json end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain bracketed: start_bracket: ( explain_option: - keyword: format - keyword: yaml end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/filter.sql000066400000000000000000000001321451700765000236400ustar00rootroot00000000000000SELECT COUNT(*) FILTER (WHERE c_expires > CURRENT_TIMESTAMP) AS c_active FROM t_test; sqlfluff-2.3.5/test/fixtures/dialects/postgres/filter.yml000066400000000000000000000025741451700765000236560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 45740a915ed57655d4afae8fe7583bc12d9c0dffd10b7c795413958c02c86a07 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: function_name_identifier: COUNT - bracketed: start_bracket: ( star: '*' end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: c_expires comparison_operator: raw_comparison_operator: '>' bare_function: CURRENT_TIMESTAMP end_bracket: ) alias_expression: keyword: AS naked_identifier: c_active from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_test statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/grant_privileges.sql000066400000000000000000000024151451700765000257250ustar00rootroot00000000000000GRANT USAGE ON SCHEMA api TO web_anon; GRANT EXECUTE ON FUNCTION api.test TO web_anon; GRANT web_anon TO my_user; GRANT CONNECT, CREATE, TEMP, TEMPORARY ON DATABASE my_db TO app; GRANT TRIGGER ON ALL TABLES IN SCHEMA my_schema TO app; GRANT USAGE ON DOMAIN my_domain TO my_user; GRANT USAGE ON FOREIGN DATA WRAPPER my_fdw TO my_user; GRANT USAGE ON FOREIGN SERVER fs TO my_user; GRANT EXECUTE ON PROCEDURE fn TO my_user; GRANT EXECUTE ON ROUTINE fn TO my_user; GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA my_schema TO my_user; GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA my_schema TO my_user; GRANT EXECUTE ON ALL ROUTINES IN SCHEMA my_schema TO my_user; GRANT USAGE ON LANGUAGE my_lang TO my_user; GRANT SELECT ON LARGE OBJECT 564182 TO my_user; GRANT ALL ON TABLESPACE my_tblspc TO my_user; GRANT USAGE ON TYPE my_type TO my_role; GRANT my_user TO my_group WITH ADMIN OPTION GRANTED BY CURRENT_USER; GRANT my_user TO my_group GRANTED BY SESSION_USER; GRANT my_user TO my_group WITH ADMIN OPTION GRANTED BY my_new_role; GRANT CONNECT, CREATE, TEMP, TEMPORARY ON DATABASE my_db TO xyz; GRANT CONNECT, CREATE, TEMP, TEMPORARY ON DATABASE my_db TO my_user, my_other_user WITH GRANT OPTION; GRANT SELECT ON abc TO xyz, mno; GRANT EXECUTE ON ALL ROUTINES IN SCHEMA my_schema TO my_user, my_other_user; sqlfluff-2.3.5/test/fixtures/dialects/postgres/grant_privileges.yml000066400000000000000000000200111451700765000257170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 18bb8898c12b56df1d5444a075784ef943a500aa96652408f8047750fa26cd2e file: - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: SCHEMA - object_reference: naked_identifier: api - keyword: TO - role_reference: naked_identifier: web_anon - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: FUNCTION - object_reference: - naked_identifier: api - dot: . - naked_identifier: test - keyword: TO - role_reference: naked_identifier: web_anon - statement_terminator: ; - statement: access_statement: - keyword: GRANT - object_reference: naked_identifier: web_anon - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CONNECT - comma: ',' - keyword: CREATE - comma: ',' - keyword: TEMP - comma: ',' - keyword: TEMPORARY - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: my_db - keyword: TO - role_reference: naked_identifier: app - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: TRIGGER - keyword: 'ON' - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: app - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DOMAIN - object_reference: naked_identifier: my_domain - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: FOREIGN - keyword: DATA - keyword: WRAPPER - object_reference: naked_identifier: my_fdw - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: FOREIGN - keyword: SERVER - object_reference: naked_identifier: fs - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: PROCEDURE - object_reference: naked_identifier: fn - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ROUTINE - object_reference: naked_identifier: fn - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ALL - keyword: FUNCTIONS - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ALL - keyword: PROCEDURES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ALL - keyword: ROUTINES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: LANGUAGE - object_reference: naked_identifier: my_lang - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: LARGE - keyword: OBJECT - numeric_literal: '564182' - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ALL - keyword: 'ON' - keyword: TABLESPACE - object_reference: naked_identifier: my_tblspc - keyword: TO - role_reference: naked_identifier: my_user - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: TYPE - object_reference: naked_identifier: my_type - keyword: TO - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: GRANT - object_reference: naked_identifier: my_user - keyword: TO - role_reference: naked_identifier: my_group - keyword: WITH - keyword: ADMIN - keyword: OPTION - keyword: GRANTED - keyword: BY - keyword: CURRENT_USER - statement_terminator: ; - statement: access_statement: - keyword: GRANT - object_reference: naked_identifier: my_user - keyword: TO - role_reference: naked_identifier: my_group - keyword: GRANTED - keyword: BY - keyword: SESSION_USER - statement_terminator: ; - statement: access_statement: - keyword: GRANT - object_reference: naked_identifier: my_user - keyword: TO - role_reference: naked_identifier: my_group - keyword: WITH - keyword: ADMIN - keyword: OPTION - keyword: GRANTED - keyword: BY - object_reference: naked_identifier: my_new_role - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CONNECT - comma: ',' - keyword: CREATE - comma: ',' - keyword: TEMP - comma: ',' - keyword: TEMPORARY - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: my_db - keyword: TO - role_reference: naked_identifier: xyz - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: CONNECT - comma: ',' - keyword: CREATE - comma: ',' - keyword: TEMP - comma: ',' - keyword: TEMPORARY - keyword: 'ON' - keyword: DATABASE - object_reference: naked_identifier: my_db - keyword: TO - role_reference: naked_identifier: my_user - comma: ',' - role_reference: naked_identifier: my_other_user - keyword: WITH - keyword: GRANT - keyword: OPTION - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - object_reference: naked_identifier: abc - keyword: TO - role_reference: naked_identifier: xyz - comma: ',' - role_reference: naked_identifier: mno - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: ALL - keyword: ROUTINES - keyword: IN - keyword: SCHEMA - object_reference: naked_identifier: my_schema - keyword: TO - role_reference: naked_identifier: my_user - comma: ',' - role_reference: naked_identifier: my_other_user - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/group_by.sql000066400000000000000000000015241451700765000242070ustar00rootroot00000000000000SELECT region, city, grouping(region, city) AS grp_idx, count(DISTINCT id) AS num_total, count(DISTINCT id) FILTER (WHERE is_poi) AS num_poi, count(DISTINCT id) FILTER (WHERE is_gov) AS num_gov FROM location_data GROUP BY GROUPING SETS ( (region), (city), (region, city), () ); SELECT region, city, grouping(region, city) AS grp_idx, count(DISTINCT id) AS num_total, count(DISTINCT id) FILTER (WHERE is_poi) AS num_poi, count(DISTINCT id) FILTER (WHERE is_gov) AS num_gov FROM location_data GROUP BY ROLLUP ( (region), (city) ); SELECT region, city, grouping(region, city) AS grp_idx, count(DISTINCT id) AS num_total, count(DISTINCT id) FILTER (WHERE is_poi) AS num_poi, count(DISTINCT id) FILTER (WHERE is_gov) AS num_gov FROM location_data GROUP BY CUBE ( (region), (city) ); sqlfluff-2.3.5/test/fixtures/dialects/postgres/group_by.yml000066400000000000000000000271521451700765000242160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 321d6946cde9c7461b59a38eae3899ab5543d06172d5a334c28155723c4cf45b file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: grouping bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: region - comma: ',' - expression: column_reference: naked_identifier: city - end_bracket: ) alias_expression: keyword: AS naked_identifier: grp_idx - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: keyword: AS naked_identifier: num_total - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: count - bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_poi end_bracket: ) alias_expression: keyword: AS naked_identifier: num_poi - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: count - bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_gov end_bracket: ) alias_expression: keyword: AS naked_identifier: num_gov from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: location_data groupby_clause: - keyword: GROUP - keyword: BY - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: region end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: region - comma: ',' - column_reference: naked_identifier: city - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: grouping bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: region - comma: ',' - expression: column_reference: naked_identifier: city - end_bracket: ) alias_expression: keyword: AS naked_identifier: grp_idx - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: keyword: AS naked_identifier: num_total - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: count - bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_poi end_bracket: ) alias_expression: keyword: AS naked_identifier: num_poi - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: count - bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_gov end_bracket: ) alias_expression: keyword: AS naked_identifier: num_gov from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: location_data groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: ROLLUP bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: region end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: region - comma: ',' - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: grouping bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: region - comma: ',' - expression: column_reference: naked_identifier: city - end_bracket: ) alias_expression: keyword: AS naked_identifier: grp_idx - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: keyword: AS naked_identifier: num_total - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: count - bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_poi end_bracket: ) alias_expression: keyword: AS naked_identifier: num_poi - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: count - bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: id end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: is_gov end_bracket: ) alias_expression: keyword: AS naked_identifier: num_gov from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: location_data groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: CUBE bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: region end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/import_foreign_schema.sql000066400000000000000000000003731451700765000267250ustar00rootroot00000000000000IMPORT FOREIGN SCHEMA foreign_films FROM SERVER film_server INTO films; IMPORT FOREIGN SCHEMA "TEST" FROM SERVER test_server INTO test; IMPORT FOREIGN SCHEMA foreign_films LIMIT TO (actors, directors) FROM SERVER film_server INTO films; sqlfluff-2.3.5/test/fixtures/dialects/postgres/import_foreign_schema.yml000066400000000000000000000033261451700765000267300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 07e21c879bb73b81e693b06dd837baeed944a5fb81ca3ea5230e7466117479e9 file: - statement: import_foreign_schema_statement: - keyword: IMPORT - keyword: FOREIGN - keyword: SCHEMA - schema_reference: naked_identifier: foreign_films - keyword: FROM - keyword: SERVER - server_reference: naked_identifier: film_server - keyword: INTO - schema_reference: naked_identifier: films - statement_terminator: ; - statement: import_foreign_schema_statement: - keyword: IMPORT - keyword: FOREIGN - keyword: SCHEMA - schema_reference: quoted_identifier: '"TEST"' - keyword: FROM - keyword: SERVER - server_reference: naked_identifier: test_server - keyword: INTO - schema_reference: naked_identifier: test - statement_terminator: ; - statement: import_foreign_schema_statement: - keyword: IMPORT - keyword: FOREIGN - keyword: SCHEMA - schema_reference: naked_identifier: foreign_films - keyword: LIMIT - keyword: TO - bracketed: - start_bracket: ( - naked_identifier_all: actors - comma: ',' - naked_identifier_all: directors - end_bracket: ) - keyword: FROM - keyword: SERVER - server_reference: naked_identifier: film_server - keyword: INTO - schema_reference: naked_identifier: films - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/insert.sql000066400000000000000000000036241451700765000236700ustar00rootroot00000000000000INSERT INTO foo (bar) VALUES(current_timestamp); INSERT INTO foo (bar, baz) VALUES(1, 2), (3, 4); INSERT INTO foo (bar, baz) VALUES(1 + 1, 2), (3, 4); INSERT INTO foo (bar) VALUES(DEFAULT); INSERT INTO distributors AS d (did, dname) VALUES (8, 'Anvil Distribution'); INSERT INTO test (id, col1) OVERRIDING SYSTEM VALUE VALUES (1, 'val'); INSERT INTO test (id, col1) OVERRIDING USER VALUE VALUES (1, 'val'); INSERT INTO foo (bar) DEFAULT VALUES; INSERT INTO films SELECT * FROM tmp_films WHERE date_prod < '2004-05-07'; INSERT INTO foo (bar) VALUES(current_timestamp) RETURNING *; INSERT INTO foo (bar) VALUES(current_timestamp) RETURNING bar; INSERT INTO foo (bar) VALUES(current_timestamp) RETURNING bar AS some_alias; INSERT INTO foo (bar, baz) VALUES(1, 2) RETURNING bar, baz; INSERT INTO foo (bar, baz) VALUES(1, 2) RETURNING bar AS alias1, baz AS alias2; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) DO UPDATE SET baz = EXCLUDED.baz; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) DO NOTHING; INSERT INTO foo AS f (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) DO UPDATE SET baz = EXCLUDED.baz || ' (formerly ' || f.baz || ')' WHERE f.zipcode != '21201'; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT ON CONSTRAINT foo_pkey DO NOTHING; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) WHERE is_active DO NOTHING; INSERT INTO foo (bar, baz) VALUES (1, 'var') ON CONFLICT (bar) DO UPDATE SET (baz) = (SELECT baz FROM foobar WHERE bar = 1); INSERT INTO megatable (megacolumn) SELECT * FROM ( VALUES ( 'megavalue' ) ) AS tmp (megacolumn) WHERE NOT EXISTS ( SELECT FROM megatable AS mt WHERE mt.megacolumn = tmp.megacolumn ) ON CONFLICT DO NOTHING; INSERT INTO abc (foo, bar) SELECT foo, bar FROM baz RETURNING quux ; INSERT INTO tbl_a ( val1 , val2 ) SELECT val1 , val2 FROM tbl_2 ON CONFLICT ( val1 , COALESCE(val2, '') ) DO NOTHING; sqlfluff-2.3.5/test/fixtures/dialects/postgres/insert.yml000066400000000000000000000537001451700765000236720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 19fd06450d8248e10751d35bf526338cfbc1e0a1160318a915c2acd111f7004c file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( keyword: DEFAULT end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: distributors - alias_expression: keyword: AS naked_identifier: d - bracketed: - start_bracket: ( - column_reference: naked_identifier: did - comma: ',' - column_reference: naked_identifier: dname - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '8' - comma: ',' - expression: quoted_literal: "'Anvil Distribution'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: col1 - end_bracket: ) - keyword: OVERRIDING - keyword: SYSTEM - keyword: VALUE - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'val'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: test - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: col1 - end_bracket: ) - keyword: OVERRIDING - keyword: USER - keyword: VALUE - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'val'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: films - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp_films where_clause: keyword: WHERE expression: column_reference: naked_identifier: date_prod comparison_operator: raw_comparison_operator: < quoted_literal: "'2004-05-07'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - keyword: RETURNING - star: '*' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: bar - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: bar - alias_expression: keyword: AS naked_identifier: some_alias - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: bar - comma: ',' - expression: column_reference: naked_identifier: baz - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - keyword: RETURNING - expression: column_reference: naked_identifier: bar - alias_expression: keyword: AS naked_identifier: alias1 - comma: ',' - expression: column_reference: naked_identifier: baz - alias_expression: keyword: AS naked_identifier: alias2 - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - expression: column_reference: - naked_identifier: EXCLUDED - dot: . - naked_identifier: baz - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - alias_expression: keyword: AS naked_identifier: f - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - expression: - column_reference: - naked_identifier: EXCLUDED - dot: . - naked_identifier: baz - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "' (formerly '" - binary_operator: - pipe: '|' - pipe: '|' - column_reference: - naked_identifier: f - dot: . - naked_identifier: baz - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "')'" - keyword: WHERE - expression: column_reference: - naked_identifier: f - dot: . - naked_identifier: zipcode comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' quoted_literal: "'21201'" - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: - keyword: 'ON' - keyword: CONSTRAINT - parameter: foo_pkey - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) keyword: WHERE expression: column_reference: naked_identifier: is_active - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'var'" - end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - conflict_action: - keyword: DO - keyword: UPDATE - keyword: SET - bracketed: start_bracket: ( column_reference: naked_identifier: baz end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foobar where_clause: keyword: WHERE expression: column_reference: naked_identifier: bar comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: megatable - bracketed: start_bracket: ( column_reference: naked_identifier: megacolumn end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'megavalue'" end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: tmp bracketed: start_bracket: ( identifier_list: naked_identifier: megacolumn end_bracket: ) where_clause: keyword: WHERE expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: megatable alias_expression: keyword: AS naked_identifier: mt where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: mt - dot: . - naked_identifier: megacolumn - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tmp - dot: . - naked_identifier: megacolumn end_bracket: ) - keyword: 'ON' - keyword: CONFLICT - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: abc - bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - comma: ',' - column_reference: naked_identifier: bar - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: baz - keyword: RETURNING - expression: column_reference: naked_identifier: quux - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: tbl_a - bracketed: - start_bracket: ( - column_reference: naked_identifier: val1 - comma: ',' - column_reference: naked_identifier: val2 - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: val1 - comma: ',' - select_clause_element: column_reference: naked_identifier: val2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl_2 - keyword: 'ON' - keyword: CONFLICT - conflict_target: bracketed: start_bracket: ( column_reference: naked_identifier: val1 comma: ',' function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val2 - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) end_bracket: ) - conflict_action: - keyword: DO - keyword: NOTHING - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/join_lateral.sql000066400000000000000000000006071451700765000250250ustar00rootroot00000000000000-- Postgres should work with standard joins select tbl1.id from tbl1 join tbl2 on tbl1.id = tbl2.id; -- ... but also with lateral joins select tbl1.id from tbl1 join lateral tbl2 on tbl1.id = tbl2.id; -- ... and mixed ones as well! select tbl1.id from tbl1 full outer join lateral tbl2 on tbl1.id = tbl2.id cross join tbl3 on tbl1.id = tbl3.id left join lateral tbl4 on tbl1.id = tbl4.id; sqlfluff-2.3.5/test/fixtures/dialects/postgres/join_lateral.yml000066400000000000000000000120251451700765000250240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e6004ee8ed27c696d29099874de40fe8832c7b10dfc5d946e8da9d1d8ef0566 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: tbl2 join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 join_clause: - keyword: join - keyword: lateral - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id from_clause: keyword: from from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - join_clause: - keyword: full - keyword: outer - keyword: join - keyword: lateral - from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl2 - dot: . - naked_identifier: id - join_clause: - keyword: cross - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: tbl3 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl3 - dot: . - naked_identifier: id - join_clause: - keyword: left - keyword: join - keyword: lateral - from_expression_element: table_expression: table_reference: naked_identifier: tbl4 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tbl1 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tbl4 - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/join_no_space.sql000066400000000000000000000002161451700765000251640ustar00rootroot00000000000000-- Not missing space before ON SELECT * FROM "my_table2" INNER JOIN "my_database"."my_schema"."my_table"ON ("my_table2".foo = "my_table".foo) sqlfluff-2.3.5/test/fixtures/dialects/postgres/join_no_space.yml000066400000000000000000000033451451700765000251740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 035e5deaa786c505125b234894eff38e45deeaebc72d8dd2b29c345d09871420 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"my_table2"' join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - quoted_identifier: '"my_database"' - dot: . - quoted_identifier: '"my_schema"' - dot: . - quoted_identifier: '"my_table"' - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: quoted_identifier: '"my_table2"' dot: . naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '"my_table"' dot: . naked_identifier: foo end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/postgres/json_operators.sql000066400000000000000000000012301451700765000254220ustar00rootroot00000000000000-- SQL from issue #2033 SELECT COALESCE(doc#>>'{fields}','') AS field FROM mytable WHERE doc ->> 'some_field' = 'some_value'; -- Get JSON array element (indexed from zero, negative integers count from the end) SELECT '[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json->2; -- Get JSON object field by key SELECT '{"a": {"b":"foo"}}'::json->'a'; -- Get JSON array element as text SELECT '[1,2,3]'::json->>2; -- Get JSON object field as text SELECT '{"a":1,"b":2}'::json->>'b'; -- Get JSON object at the specified path SELECT '{"a": {"b":{"c": "foo"}}}'::json#>'{a,b}'; -- Get JSON object at the specified path as text SELECT '{"a":[1,2,3],"b":[4,5,6]}'::json#>>'{a,2}'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/json_operators.yml000066400000000000000000000077121451700765000254370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48a081d4a74ecbf99ac89dadf074297fb904014b8f2b684a8e5b927ccc7cc7a0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: doc binary_operator: '#>>' quoted_literal: "'{fields}'" - comma: ',' - expression: quoted_literal: "''" - end_bracket: ) alias_expression: keyword: AS naked_identifier: field from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable where_clause: keyword: WHERE expression: - column_reference: naked_identifier: doc - binary_operator: ->> - quoted_literal: "'some_field'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'[{\"a\":\"foo\"},{\"b\":\"bar\"},{\"c\":\"baz\"}]'" casting_operator: '::' data_type: keyword: json binary_operator: -> numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\": {\"b\":\"foo\"}}'" casting_operator: '::' data_type: keyword: json binary_operator: -> quoted_literal: "'a'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'[1,2,3]'" casting_operator: '::' data_type: keyword: json binary_operator: ->> numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":1,\"b\":2}'" casting_operator: '::' data_type: keyword: json binary_operator: ->> quoted_literal: "'b'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\": {\"b\":{\"c\": \"foo\"}}}'" casting_operator: '::' data_type: keyword: json binary_operator: '#>' quoted_literal: "'{a,b}'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'{\"a\":[1,2,3],\"b\":[4,5,6]}'" casting_operator: '::' data_type: keyword: json binary_operator: '#>>' quoted_literal: "'{a,2}'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/limit_clause.sql000066400000000000000000000003211451700765000250250ustar00rootroot00000000000000SELECT col_a FROM test_table LIMIT 2 * 5 * 10 OFFSET (5 + 10); SELECT col_a FROM test_table LIMIT (10 / 10) OFFSET 10 - 5; SELECT col_a FROM test_table LIMIT 100; SELECT col_a FROM test_table LIMIT ALL; sqlfluff-2.3.5/test/fixtures/dialects/postgres/limit_clause.yml000066400000000000000000000056751451700765000250500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c001065984213084c6197e3e7c67ef167e71f952c7aff6a88f0a4be14228046d file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table limit_clause: - keyword: LIMIT - expression: - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '5' - binary_operator: '*' - numeric_literal: '10' - keyword: OFFSET - expression: bracketed: start_bracket: ( expression: - numeric_literal: '5' - binary_operator: + - numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table limit_clause: - keyword: LIMIT - bracketed: start_bracket: ( expression: - numeric_literal: '10' - binary_operator: / - numeric_literal: '10' end_bracket: ) - keyword: OFFSET - expression: - numeric_literal: '10' - binary_operator: '-' - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table limit_clause: keyword: LIMIT numeric_literal: '100' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col_a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table limit_clause: - keyword: LIMIT - keyword: ALL - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/load.sql000066400000000000000000000000711451700765000232740ustar00rootroot00000000000000LOAD 'funzioniGDB.so'; LOAD '/some/path/funzioniGDB.so'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/load.yml000066400000000000000000000012051451700765000232760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d7765299469a748d088e3edfca77fc480c6e422fe904acf1e0aa2adf6b20b167 file: - statement: load_statement: keyword: LOAD quoted_literal: "'funzioniGDB.so'" - statement_terminator: ; - statement: load_statement: keyword: LOAD quoted_literal: "'/some/path/funzioniGDB.so'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/lock_table.sql000066400000000000000000000003061451700765000244550ustar00rootroot00000000000000LOCK TABLE films IN SHARE MODE; LOCK TABLE films IN SHARE ROW EXCLUSIVE MODE; LOCK TABLE team IN ACCESS EXCLUSIVE MODE; lock table stud1 IN SHARE UPDATE EXCLUSIVE MODE; LOCK TABLE crontable NOWAIT; sqlfluff-2.3.5/test/fixtures/dialects/postgres/lock_table.yml000066400000000000000000000030511451700765000244570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0b634bc3cad1afb1344957fa51eeb1e7115d94e2c30635877c25bbed99daad50 file: - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: naked_identifier: films - keyword: IN - keyword: SHARE - keyword: MODE - statement_terminator: ; - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: naked_identifier: films - keyword: IN - keyword: SHARE - keyword: ROW - keyword: EXCLUSIVE - keyword: MODE - statement_terminator: ; - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: naked_identifier: team - keyword: IN - keyword: ACCESS - keyword: EXCLUSIVE - keyword: MODE - statement_terminator: ; - statement: lock_table_statement: - keyword: lock - keyword: table - table_reference: naked_identifier: stud1 - keyword: IN - keyword: SHARE - keyword: UPDATE - keyword: EXCLUSIVE - keyword: MODE - statement_terminator: ; - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: naked_identifier: crontable - keyword: NOWAIT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/notifications.sql000066400000000000000000000003701451700765000252300ustar00rootroot00000000000000LISTEN virtual; NOTIFY virtual; UNLISTEN virtual; LISTEN "virtual listener"; NOTIFY "virtual listener"; UNLISTEN "virtual listener"; LISTEN listener_a; LISTEN listener_b; NOTIFY listener_a, 'payload_a'; NOTIFY listener_b, 'payload_b'; UNLISTEN * sqlfluff-2.3.5/test/fixtures/dialects/postgres/notifications.yml000066400000000000000000000033751451700765000252420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1a8724603b31e1a4dde7c44d3582aa88829be16616255d5118659ffbdcb077aa file: - statement: listen_statement: keyword: LISTEN naked_identifier: virtual - statement_terminator: ; - statement: notify_statement: keyword: NOTIFY naked_identifier: virtual - statement_terminator: ; - statement: unlisten_statement: keyword: UNLISTEN naked_identifier: virtual - statement_terminator: ; - statement: listen_statement: keyword: LISTEN quoted_identifier: '"virtual listener"' - statement_terminator: ; - statement: notify_statement: keyword: NOTIFY quoted_identifier: '"virtual listener"' - statement_terminator: ; - statement: unlisten_statement: keyword: UNLISTEN quoted_identifier: '"virtual listener"' - statement_terminator: ; - statement: listen_statement: keyword: LISTEN naked_identifier: listener_a - statement_terminator: ; - statement: listen_statement: keyword: LISTEN naked_identifier: listener_b - statement_terminator: ; - statement: notify_statement: keyword: NOTIFY naked_identifier: listener_a comma: ',' quoted_literal: "'payload_a'" - statement_terminator: ; - statement: notify_statement: keyword: NOTIFY naked_identifier: listener_b comma: ',' quoted_literal: "'payload_b'" - statement_terminator: ; - statement: unlisten_statement: keyword: UNLISTEN star: '*' sqlfluff-2.3.5/test/fixtures/dialects/postgres/null_filters.sql000066400000000000000000000006421451700765000250630ustar00rootroot00000000000000-- Check nullability tests with standard and non-standard syntax SELECT nullable_field IS NULL as standard_is_null, nullable_field ISNULL as non_standard_is_null, nullable_field IS NOT NULL as standard_not_null, nullable_field NOTNULL as non_standard_not_null FROM t_test WHERE nullable_field IS NULL OR nullable_field ISNULL OR nullable_field IS NOT NULL OR nullable_field NOTNULL sqlfluff-2.3.5/test/fixtures/dialects/postgres/null_filters.yml000066400000000000000000000046631451700765000250740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f926ecc8538c230076c465d10eb56e484cf41d078050822760485b810841eb15 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: IS null_literal: 'NULL' alias_expression: keyword: as naked_identifier: standard_is_null - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: ISNULL alias_expression: keyword: as naked_identifier: non_standard_is_null - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: nullable_field - keyword: IS - keyword: NOT - null_literal: 'NULL' alias_expression: keyword: as naked_identifier: standard_not_null - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: nullable_field keyword: NOTNULL alias_expression: keyword: as naked_identifier: non_standard_not_null from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_test where_clause: keyword: WHERE expression: - column_reference: naked_identifier: nullable_field - keyword: IS - null_literal: 'NULL' - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: ISNULL - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: OR - column_reference: naked_identifier: nullable_field - keyword: NOTNULL sqlfluff-2.3.5/test/fixtures/dialects/postgres/overlaps.sql000066400000000000000000000004161451700765000242130ustar00rootroot00000000000000-- with DATE select start_date, end_date from test_overlaps where (start_date, end_date) overlaps (DATE '2023-02-15', DATE '2023-03-15'); select start_date, end_date from test_overlaps where (start_date, end_date) overlaps ('2023-02-15', '2023-03-15'); sqlfluff-2.3.5/test/fixtures/dialects/postgres/overlaps.yml000066400000000000000000000053241451700765000242200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b52f94d6189b518815f0ab38c9a623899123c23bf3e6a7a98647d57aecca1640 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: where expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: start_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) overlaps_clause: keyword: overlaps bracketed: - start_bracket: ( - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2023-02-15'" - comma: ',' - datetime_literal: datetime_type_identifier: keyword: DATE quoted_literal: "'2023-03-15'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: start_date - comma: ',' - select_clause_element: column_reference: naked_identifier: end_date from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_overlaps where_clause: keyword: where expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: start_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) overlaps_clause: keyword: overlaps bracketed: - start_bracket: ( - datetime_literal: quoted_literal: "'2023-02-15'" - comma: ',' - datetime_literal: quoted_literal: "'2023-03-15'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/pattern_match_expressions.sql000066400000000000000000000030231451700765000276500ustar00rootroot00000000000000-- postgres_pattern_match_expressions.sql /* examples of pattern match expressions ( https://www.postgresql.org/docs/14/functions-matching.html ) that are supported in postgres. */ -- LIKE/ILIKE expressions supported SELECT * FROM animals WHERE family LIKE '%ursidae%'; SELECT * FROM animals WHERE family NOT LIKE '%ursidae%'; SELECT * FROM animals WHERE genus ILIKE '%ursus%'; SELECT * FROM animals WHERE genus NOT ILIKE '%ursus%'; SELECT * FROM animals WHERE family LIKE '%ursidae%' ESCAPE '\\'; SELECT * FROM animals WHERE genus NOT ILIKE '%ursus%' ESCAPE '\\'; SELECT COALESCE(family LIKE '%ursidae%' ESCAPE '\\', FALSE) AS is_bear FROM animals; -- SIMILAR TO expressions supported SELECT * FROM animals WHERE family SIMILAR TO '%ursidae%'; SELECT * FROM animals WHERE family NOT SIMILAR TO '%ursidae%'; SELECT * FROM animals WHERE genus SIMILAR TO '%ursus%'; SELECT * FROM animals WHERE genus NOT SIMILAR TO '%ursus%'; SELECT * FROM animals WHERE family SIMILAR TO '%ursidae%' ESCAPE '\\'; SELECT * FROM animals WHERE genus NOT SIMILAR TO '%ursus%' ESCAPE '\\'; SELECT COALESCE(family SIMILAR TO '%ursidae%' ESCAPE '\\', FALSE) AS is_bear FROM animals; -- From https://github.com/sqlfluff/sqlfluff/issues/2722 WITH cleaned_bear_financial_branch AS ( SELECT branch_id, TO_NUMBER(CASE WHEN honey_numerical_code SIMILAR TO '[0-9]{0,7}.?[0-9]{0,2}' THEN honey_numerical_code ELSE NULL END, '24601') AS honey_numerical_code FROM bear_financial_branch ) SELECT branch_id FROM cleaned_bear_financial_branch LIMIT 10; sqlfluff-2.3.5/test/fixtures/dialects/postgres/pattern_match_expressions.yml000066400000000000000000000311761451700765000276640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aa70ac109a77bef6e01f8fba80b5c7474d2fd192720d43719d24f668ff025471 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: column_reference: naked_identifier: family keyword: LIKE quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: NOT - keyword: LIKE - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: column_reference: naked_identifier: genus keyword: ILIKE quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: ILIKE - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: LIKE - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: ILIKE - quoted_literal: "'%ursus%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: family - keyword: LIKE - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - comma: ',' - expression: boolean_literal: 'FALSE' - end_bracket: ) alias_expression: keyword: AS naked_identifier: is_bear from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - comma: ',' - expression: boolean_literal: 'FALSE' - end_bracket: ) alias_expression: keyword: AS naked_identifier: is_bear from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cleaned_bear_financial_branch keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TO_NUMBER bracketed: - start_bracket: ( - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: honey_numerical_code - keyword: SIMILAR - keyword: TO - quoted_literal: "'[0-9]{0,7}.?[0-9]{0,2}'" - keyword: THEN - expression: column_reference: naked_identifier: honey_numerical_code - else_clause: keyword: ELSE expression: null_literal: 'NULL' - keyword: END - comma: ',' - expression: quoted_literal: "'24601'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: honey_numerical_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_financial_branch end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: branch_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cleaned_bear_financial_branch limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/position.sql000066400000000000000000000002471451700765000242260ustar00rootroot00000000000000select u.user_id, u.user_email, p.product_id from user_tb as u inner join product_tb as p on u.user_id = p.user_id and position('@domain' in u.user_email) = 0 sqlfluff-2.3.5/test/fixtures/dialects/postgres/position.yml000066400000000000000000000051211451700765000242240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e6d06db599e49ebcc64c788b834d49b40648199852b1a274d69fc41ec08e530f file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: user_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: u - dot: . - naked_identifier: user_email - comma: ',' - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: product_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: user_tb alias_expression: keyword: as naked_identifier: u join_clause: - keyword: inner - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: product_tb alias_expression: keyword: as naked_identifier: p - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: u - dot: . - naked_identifier: user_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: user_id - binary_operator: and - function: function_name: function_name_identifier: position bracketed: start_bracket: ( quoted_literal: "'@domain'" keyword: in column_reference: - naked_identifier: u - dot: . - naked_identifier: user_email end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' sqlfluff-2.3.5/test/fixtures/dialects/postgres/postgis.sql000066400000000000000000000027321451700765000240530ustar00rootroot00000000000000CREATE TABLE public.foo ( quadkey TEXT, my_geometry_column GEOMETRY (GEOMETRY, 4326), my_point POINT(0 0), my_linestring LINESTRING(0 0, 1 1, 2 1, 2 2), my_simple_polygon POLYGON((0 0, 1 0, 1 1, 0 1, 0 0)), my_complex_polygon POLYGON((0 0, 10 0, 10 10, 0 10, 0 0),(1 1, 1 2, 2 2, 2 1, 1 1)), my_geometry_collection GEOMETRYCOLLECTION(POINT(2 0),POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))), my_3d_linestring LINESTRINGZ (0 0 0,1 0 0,1 1 2), my_geography_column GEOGRAPHY(GEOGRAPHY, 6679), my_4d_point POINTZM(1, 1, 1, 1), my_multicurve MULTICURVE( (0 0, 5 5), CIRCULARSTRING(4 0, 4 4, 8 4) ), my_tin TIN( ((0 0 0, 0 0 1, 0 1 0, 0 0 0)), ((0 0 0, 0 1 0, 1 1 0, 0 0 0)) ), my_triangle TRIANGLE ((0 0, 0 9, 9 0, 0 0)), my_polyhedral_surface POLYHEDRALSURFACE( ((0 0 0, 0 0 1, 0 1 1, 0 1 0, 0 0 0)), ((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)), ((0 0 0, 1 0 0, 1 0 1, 0 0 1, 0 0 0)), ((1 1 0, 1 1 1, 1 0 1, 1 0 0, 1 1 0)), ((0 1 0, 0 1 1, 1 1 1, 1 1 0, 0 1 0)), ((0 0 1, 1 0 1, 1 1 1, 0 1 1, 0 0 1)) ), my_3d_geometry_collection GEOMETRYCOLLECTIONM( POINTM(2 3 9), LINESTRINGM(2 3 4, 3 4 5) ), my_curve_polygon CURVEPOLYGON(CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0),(1 1, 3 3, 3 1, 1 1)), my_multisurface MULTISURFACE(CURVEPOLYGON(CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0),(1 1, 3 3, 3 1, 1 1)),((10 10, 14 12, 11 10, 10 10),(11 11, 11.5 11, 11 11.5, 11 11))), my_circularstring CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0), PRIMARY KEY (quadkey) ); sqlfluff-2.3.5/test/fixtures/dialects/postgres/postgis.yml000066400000000000000000000536211451700765000240600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c10308e95f79279d0b35450b6a45cea25fea6d3f47481659ccb09343bc63dca2 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: quadkey - data_type: keyword: TEXT - comma: ',' - column_reference: naked_identifier: my_geometry_column - data_type: wkt_geometry_type: keyword: GEOMETRY bracketed: start_bracket: ( keyword: GEOMETRY comma: ',' numeric_literal: '4326' end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_point - data_type: wkt_geometry_type: keyword: POINT bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_linestring - data_type: wkt_geometry_type: keyword: LINESTRING bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_simple_polygon - data_type: wkt_geometry_type: keyword: POLYGON bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_complex_polygon - data_type: wkt_geometry_type: keyword: POLYGON bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '10' - numeric_literal: '0' - comma: ',' - numeric_literal: '10' - numeric_literal: '10' - comma: ',' - numeric_literal: '0' - numeric_literal: '10' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - numeric_literal: '2' - comma: ',' - numeric_literal: '2' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_geometry_collection - data_type: wkt_geometry_type: keyword: GEOMETRYCOLLECTION bracketed: - start_bracket: ( - wkt_geometry_type: keyword: POINT bracketed: - start_bracket: ( - numeric_literal: '2' - numeric_literal: '0' - end_bracket: ) - comma: ',' - wkt_geometry_type: keyword: POLYGON bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_3d_linestring - data_type: wkt_geometry_type: keyword: LINESTRINGZ bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_geography_column - data_type: wkt_geometry_type: keyword: GEOGRAPHY bracketed: start_bracket: ( keyword: GEOGRAPHY comma: ',' numeric_literal: '6679' end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_4d_point - data_type: wkt_geometry_type: keyword: POINTZM bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_multicurve - data_type: wkt_geometry_type: keyword: MULTICURVE bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '5' - numeric_literal: '5' - end_bracket: ) comma: ',' wkt_geometry_type: keyword: CIRCULARSTRING bracketed: - start_bracket: ( - numeric_literal: '4' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '4' - comma: ',' - numeric_literal: '8' - numeric_literal: '4' - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_tin - data_type: wkt_geometry_type: keyword: TIN bracketed: - start_bracket: ( - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_triangle - data_type: wkt_geometry_type: keyword: TRIANGLE bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '9' - comma: ',' - numeric_literal: '9' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_polyhedral_surface - data_type: wkt_geometry_type: keyword: POLYHEDRALSURFACE bracketed: - start_bracket: ( - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '0' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - comma: ',' - bracketed: start_bracket: ( bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '0' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_3d_geometry_collection - data_type: wkt_geometry_type: keyword: GEOMETRYCOLLECTIONM bracketed: - start_bracket: ( - wkt_geometry_type: keyword: POINTM bracketed: - start_bracket: ( - numeric_literal: '2' - numeric_literal: '3' - numeric_literal: '9' - end_bracket: ) - comma: ',' - wkt_geometry_type: keyword: LINESTRINGM bracketed: - start_bracket: ( - numeric_literal: '2' - numeric_literal: '3' - numeric_literal: '4' - comma: ',' - numeric_literal: '3' - numeric_literal: '4' - numeric_literal: '5' - end_bracket: ) - end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_curve_polygon - data_type: wkt_geometry_type: keyword: CURVEPOLYGON bracketed: start_bracket: ( wkt_geometry_type: keyword: CIRCULARSTRING bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) comma: ',' bracketed: - start_bracket: ( - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - numeric_literal: '3' - comma: ',' - numeric_literal: '3' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_multisurface - data_type: wkt_geometry_type: keyword: MULTISURFACE bracketed: start_bracket: ( wkt_geometry_type: keyword: CURVEPOLYGON bracketed: start_bracket: ( wkt_geometry_type: keyword: CIRCULARSTRING bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) comma: ',' bracketed: - start_bracket: ( - numeric_literal: '1' - numeric_literal: '1' - comma: ',' - numeric_literal: '3' - numeric_literal: '3' - comma: ',' - numeric_literal: '3' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - numeric_literal: '1' - end_bracket: ) end_bracket: ) comma: ',' bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - numeric_literal: '10' - numeric_literal: '10' - comma: ',' - numeric_literal: '14' - numeric_literal: '12' - comma: ',' - numeric_literal: '11' - numeric_literal: '10' - comma: ',' - numeric_literal: '10' - numeric_literal: '10' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - numeric_literal: '11' - numeric_literal: '11' - comma: ',' - numeric_literal: '11.5' - numeric_literal: '11' - comma: ',' - numeric_literal: '11' - numeric_literal: '11.5' - comma: ',' - numeric_literal: '11' - numeric_literal: '11' - end_bracket: ) - end_bracket: ) end_bracket: ) - comma: ',' - column_reference: naked_identifier: my_circularstring - data_type: wkt_geometry_type: keyword: CIRCULARSTRING bracketed: - start_bracket: ( - numeric_literal: '0' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '0' - comma: ',' - numeric_literal: '4' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '4' - comma: ',' - numeric_literal: '0' - numeric_literal: '0' - end_bracket: ) - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: quadkey end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/psql_meta_command.sql000066400000000000000000000003371451700765000260450ustar00rootroot00000000000000\echo "thing" \echo "thing" \x \\ \echo "thing" SELECT 1; SELECT 2; SELECT 1 + 3; SELECT 1; \echo "thing" \\ SELECT 1; \echo "thing" \echo "thing2" \prompt 'Region (1 - quebec, 2 - east, 3 - west): ' region_number sqlfluff-2.3.5/test/fixtures/dialects/postgres/psql_meta_command.yml000066400000000000000000000024521451700765000260470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 885466d038482d3e004642c24f43593a6aeedf01dca7978598694841001be7ec file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/psql_variable.sql000066400000000000000000000016271451700765000252110ustar00rootroot00000000000000\prompt 'From Member #: ' m1 \prompt 'To Member #: ' m2 \prompt 'Charge Account #: ' a SELECT 'from' AS direction, users.email, rona_mms_charge_accounts.account_number FROM memberships JOIN users ON users.id = memberships.user_id LEFT OUTER JOIN rona_mms_charge_accounts ON users.id = rona_mms_charge_accounts.customer_id WHERE memberships.code = (:m1)::text AND rona_mms_charge_accounts.account_number = lpad((:a)::text, 10, '0'); \prompt 'From Member #: ' m1 \prompt 'To Member #: ' m2 \prompt 'Charge Account #: ' a SELECT 'from' AS direction, users.email, rona_mms_charge_accounts.account_number FROM memberships JOIN users ON users.id = memberships.user_id LEFT OUTER JOIN rona_mms_charge_accounts ON users.id = rona_mms_charge_accounts.customer_id WHERE memberships.code = :'m1' AND rona_mms_charge_accounts.account_number = lpad(:'a', 10, '0'); sqlfluff-2.3.5/test/fixtures/dialects/postgres/psql_variable.yml000066400000000000000000000165301451700765000252120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 24020179e2b89bfd88feaad436dce27f0d6cc8af2fc76b3b7b55019bc9900d1c file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'from'" alias_expression: keyword: AS naked_identifier: direction - comma: ',' - select_clause_element: column_reference: - naked_identifier: users - dot: . - naked_identifier: email - comma: ',' - select_clause_element: column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: account_number from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: memberships - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: users join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: users - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: memberships - dot: . - naked_identifier: user_id - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: rona_mms_charge_accounts - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: users - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: customer_id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: memberships - dot: . - naked_identifier: code - comparison_operator: raw_comparison_operator: '=' - cast_expression: bracketed: start_bracket: ( expression: psql_variable: colon: ':' parameter: m1 end_bracket: ) casting_operator: '::' data_type: keyword: text - binary_operator: AND - column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: account_number - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: lpad bracketed: - start_bracket: ( - expression: cast_expression: bracketed: start_bracket: ( expression: psql_variable: colon: ':' parameter: a end_bracket: ) casting_operator: '::' data_type: keyword: text - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: quoted_literal: "'0'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'from'" alias_expression: keyword: AS naked_identifier: direction - comma: ',' - select_clause_element: column_reference: - naked_identifier: users - dot: . - naked_identifier: email - comma: ',' - select_clause_element: column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: account_number from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: memberships - join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: users join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: users - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: memberships - dot: . - naked_identifier: user_id - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: rona_mms_charge_accounts - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: users - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: customer_id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: memberships - dot: . - naked_identifier: code - comparison_operator: raw_comparison_operator: '=' - psql_variable: colon: ':' quoted_literal: "'m1'" - binary_operator: AND - column_reference: - naked_identifier: rona_mms_charge_accounts - dot: . - naked_identifier: account_number - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: lpad bracketed: - start_bracket: ( - expression: psql_variable: colon: ':' quoted_literal: "'a'" - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: quoted_literal: "'0'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/range_operators.sql000066400000000000000000000013331451700765000255510ustar00rootroot00000000000000SELECT word.*, paragraph.id AS paragraph_id FROM word INNER JOIN paragraph ON paragraph.page_id = word.page_id WHERE word.character_range @> paragraph.character_range AND word.character_range <@ paragraph.character_range AND word.character_range && paragraph.character_range AND word.character_range << paragraph.character_range AND word.character_range >> paragraph.character_range AND word.character_range &> paragraph.character_range AND word.character_range &< paragraph.character_range AND word.character_range -|- paragraph.character_range AND word.character_range + paragraph.character_range AND word.character_range * paragraph.character_range AND word.character_range - paragraph.character_range sqlfluff-2.3.5/test/fixtures/dialects/postgres/range_operators.yml000066400000000000000000000132661451700765000255630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c7f82e72dc37ec47d9013ba40c43b4e16f1722417eb887b0763fb164e4f3045e file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: word dot: . star: '*' - comma: ',' - select_clause_element: column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: id alias_expression: keyword: AS naked_identifier: paragraph_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: word join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: paragraph - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: page_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: word - dot: . - naked_identifier: page_id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: '@>' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: <@ - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - comparison_operator: - ampersand: '&' - ampersand: '&' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: - raw_comparison_operator: < - raw_comparison_operator: < - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '>' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - comparison_operator: ampersand: '&' raw_comparison_operator: '>' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - comparison_operator: ampersand: '&' raw_comparison_operator: < - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - comparison_operator: - binary_operator: '-' - pipe: '|' - binary_operator: '-' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: + - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: '*' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range - binary_operator: AND - column_reference: - naked_identifier: word - dot: . - naked_identifier: character_range - binary_operator: '-' - column_reference: - naked_identifier: paragraph - dot: . - naked_identifier: character_range sqlfluff-2.3.5/test/fixtures/dialects/postgres/reassign_owned.sql000066400000000000000000000004231451700765000253650ustar00rootroot00000000000000REASSIGN OWNED BY bob TO alice; REASSIGN OWNED BY bob, ted TO alice; REASSIGN OWNED BY bob, CURRENT_ROLE, ted, CURRENT_USER, sam, SESSION_USER TO alice; REASSIGN OWNED BY bob TO CURRENT_ROLE; REASSIGN OWNED BY bob TO CURRENT_USER; REASSIGN OWNED BY bob TO SESSION_USER; sqlfluff-2.3.5/test/fixtures/dialects/postgres/reassign_owned.yml000066400000000000000000000042221451700765000253700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7924caa50d4473f8535da52f0a3b8091a85c2cebb11fe03765ba868e31d034c6 file: - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: TO - role_reference: naked_identifier: alice - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - role_reference: naked_identifier: ted - keyword: TO - role_reference: naked_identifier: alice - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - comma: ',' - keyword: CURRENT_ROLE - comma: ',' - role_reference: naked_identifier: ted - comma: ',' - keyword: CURRENT_USER - comma: ',' - role_reference: naked_identifier: sam - comma: ',' - keyword: SESSION_USER - keyword: TO - role_reference: naked_identifier: alice - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: TO - keyword: CURRENT_ROLE - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: TO - keyword: CURRENT_USER - statement_terminator: ; - statement: reassign_owned_statement: - keyword: REASSIGN - keyword: OWNED - keyword: BY - role_reference: naked_identifier: bob - keyword: TO - keyword: SESSION_USER - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/refresh_materialized_view.sql000066400000000000000000000002401451700765000275750ustar00rootroot00000000000000REFRESH MATERIALIZED VIEW bar; REFRESH MATERIALIZED VIEW CONCURRENTLY bar; REFRESH MATERIALIZED VIEW bar WITH DATA; REFRESH MATERIALIZED VIEW bar WITH NO DATA; sqlfluff-2.3.5/test/fixtures/dialects/postgres/refresh_materialized_view.yml000066400000000000000000000025641451700765000276120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 011617b04ac29d2c81f9dde19306c07200d628114014dbb490659d79ed70013c file: - statement: refresh_materialized_view_statement: - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - statement_terminator: ; - statement: refresh_materialized_view_statement: - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - keyword: CONCURRENTLY - table_reference: naked_identifier: bar - statement_terminator: ; - statement: refresh_materialized_view_statement: - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - with_data_clause: - keyword: WITH - keyword: DATA - statement_terminator: ; - statement: refresh_materialized_view_statement: - keyword: REFRESH - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: bar - with_data_clause: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/reindex.sql000066400000000000000000000003521451700765000240150ustar00rootroot00000000000000REINDEX INDEX my_index; REINDEX TABLE my_table; REINDEX DATABASE broken_db; REINDEX TABLE CONCURRENTLY my_broken_table; REINDEX (CONCURRENTLY FALSE) SYSTEM mydb; REINDEX (TABLESPACE my_tablespace, VERBOSE TRUE) SCHEMA my_schema; sqlfluff-2.3.5/test/fixtures/dialects/postgres/reindex.yml000066400000000000000000000034601451700765000240220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 37b97d6e332473f78947f58784c940e24a02c75218973d039b289b1c103668e5 file: - statement: reindex_statement_segment: - keyword: REINDEX - keyword: INDEX - index_reference: naked_identifier: my_index - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - keyword: DATABASE - database_reference: naked_identifier: broken_db - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - keyword: TABLE - keyword: CONCURRENTLY - table_reference: naked_identifier: my_broken_table - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - bracketed: start_bracket: ( keyword: CONCURRENTLY boolean_literal: 'FALSE' end_bracket: ) - keyword: SYSTEM - database_reference: naked_identifier: mydb - statement_terminator: ; - statement: reindex_statement_segment: - keyword: REINDEX - bracketed: - start_bracket: ( - keyword: TABLESPACE - tablespace_reference: naked_identifier: my_tablespace - comma: ',' - keyword: VERBOSE - boolean_literal: 'TRUE' - end_bracket: ) - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/reset.sql000066400000000000000000000000471451700765000235020ustar00rootroot00000000000000RESET timezone; RESET ALL; RESET ROLE; sqlfluff-2.3.5/test/fixtures/dialects/postgres/reset.yml000066400000000000000000000012761451700765000235110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0addcc750a516055a2f60b81c5fb1476adfddf3f5c7ddbcf08ed68ea56d9872 file: - statement: reset_statement: keyword: RESET parameter: timezone - statement_terminator: ; - statement: reset_statement: - keyword: RESET - keyword: ALL - statement_terminator: ; - statement: reset_statement: - keyword: RESET - keyword: ROLE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/revoke.sql000066400000000000000000000000411451700765000236450ustar00rootroot00000000000000REVOKE lc_anonymous FROM lc_api; sqlfluff-2.3.5/test/fixtures/dialects/postgres/revoke.yml000066400000000000000000000011511451700765000236520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4e1d6e503ec62e4d3225c85fac6b6450391572150b70aa3cad19f8f64ab2a795 file: statement: access_statement: - keyword: REVOKE - object_reference: naked_identifier: lc_anonymous - keyword: FROM - object_reference: naked_identifier: lc_api statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/select.sql000066400000000000000000000052321451700765000236400ustar00rootroot00000000000000SELECT timestamp with time zone '2005-04-02 12:00:00-07' + interval '1 day'; -- DATEADD is not a function in postgres so this should parse day as column name SELECT DATEADD(day, -2, current_date); SELECT timestamptz '2013-07-01 12:00:00' - timestamptz '2013-03-01 12:00:00'; SELECT 1.0::int; SELECT '2015-10-24 16:38:46'::TIMESTAMP; SELECT '2015-10-24 16:38:46'::TIMESTAMP AT TIME ZONE 'UTC'; SELECT '2015-10-24 16:38:46'::TIMESTAMP WITH TIME ZONE; SELECT '2015-10-24 16:38:46'::TIMESTAMP WITH TIME ZONE AT TIME ZONE 'UTC'; SELECT '2015-10-24 16:38:46'::TIMESTAMP WITHOUT TIME ZONE; SELECT '2015-10-24 16:38:46'::TIMESTAMPTZ; SELECT '2015-10-24 16:38:46'::TIMESTAMPTZ AT TIME ZONE 'UTC'; -- Some more example from https://database.guide/how-at-time-zone-works-in-postgresql/ SELECT timestamp with time zone '2025-11-20 00:00:00+00' AT TIME ZONE 'Africa/Cairo'; SELECT timestamp with time zone '2025-11-20 00:00:00'; SELECT timestamp without time zone '2025-11-20 00:00:00' AT TIME ZONE 'Africa/Cairo'; SELECT timestamp without time zone '2025-11-20 00:00:00+12' AT TIME ZONE 'Africa/Cairo'; SELECT timestamp without time zone '2025-11-20 00:00:00+12'; SELECT time with time zone '00:00:00+00' AT TIME ZONE 'Africa/Cairo'; SELECT time without time zone '00:00:00' AT TIME ZONE 'Africa/Cairo'; SELECT c_timestamp AT TIME ZONE 'Africa/Cairo' FROM t_table; SELECT (c_timestamp AT TIME ZONE 'Africa/Cairo')::time FROM t_table; SELECT a::double precision FROM my_table; SELECT schema1.table1.columna, t.col2 FROM schema1.table1 CROSS JOIN LATERAL somefunc(tb.columnb) as t(col1 text, col2 bool); SELECT a COLLATE "de_DE" < b FROM test1; SELECT a < ('foo' COLLATE "fr_FR") FROM test1; SELECT a < b COLLATE "de_DE" FROM test1; SELECT a COLLATE "de_DE" < b FROM test1; SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; -- Select elements are optional in Postgres SELECT FROM test1; -- keywords can be used as column names without quotes if qualified select id, start, periods.end from periods; SELECT concat_lower_or_upper('Hello', 'World', true); SELECT concat_lower_or_upper(a => 'Hello', b => 'World'); SELECT concat_lower_or_upper('Hello', 'World', uppercase => true); -- row-level locks can be used in Selects SELECT * FROM mytable FOR UPDATE; SELECT * FROM (SELECT * FROM mytable FOR UPDATE) ss WHERE col1 = 5; SELECT col1, col2 FROM mytable1 JOIN mytable2 ON col1 = col2 ORDER BY sync_time ASC FOR SHARE OF mytable1, mytable2 SKIP LOCKED LIMIT 1; Select * from foo TABLESAMPLE SYSTEM (10); Select * from foo TABLESAMPLE BERNOULLI (10); -- use of dollar quote in query SELECT * FROM (SELECT * FROM mytable FOR UPDATE) ss WHERE col1 = $1; SELECT i + $1 INTO j from foo; sqlfluff-2.3.5/test/fixtures/dialects/postgres/select.yml000066400000000000000000000644101451700765000236450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 348309b797f6429b65e4a5f06569e534d325fefae4c392fe07f6d38a3f7c7a16 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone quoted_literal: "'2005-04-02 12:00:00-07'" - binary_operator: + - datetime_literal: datetime_type_identifier: keyword: interval quoted_literal: "'1 day'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATEADD bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: day - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '2' - comma: ',' - expression: bare_function: current_date - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - datetime_literal: datetime_type_identifier: keyword: timestamptz quoted_literal: "'2013-07-01 12:00:00'" - binary_operator: '-' - datetime_literal: datetime_type_identifier: keyword: timestamptz quoted_literal: "'2013-03-01 12:00:00'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: numeric_literal: '1.0' casting_operator: '::' data_type: keyword: int - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: quoted_literal: "'2015-10-24 16:38:46'" casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00+00'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: with - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00+12'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: datetime_literal: datetime_type_identifier: - keyword: timestamp - keyword: without - keyword: time - keyword: zone quoted_literal: "'2025-11-20 00:00:00+12'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: time - keyword: with - keyword: time - keyword: zone quoted_literal: "'00:00:00+00'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: datetime_literal: datetime_type_identifier: - keyword: time - keyword: without - keyword: time - keyword: zone quoted_literal: "'00:00:00'" time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: c_timestamp time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: c_timestamp time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'Africa/Cairo'" end_bracket: ) casting_operator: '::' data_type: datetime_type_identifier: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: a casting_operator: '::' data_type: - keyword: double - keyword: precision from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: schema1 - dot: . - naked_identifier: table1 - dot: . - naked_identifier: columna - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: table1 join_clause: - keyword: CROSS - keyword: JOIN - keyword: LATERAL - from_expression_element: table_expression: function: function_name: function_name_identifier: somefunc bracketed: start_bracket: ( expression: column_reference: - naked_identifier: tb - dot: . - naked_identifier: columnb end_bracket: ) alias_expression: keyword: as naked_identifier: t bracketed: - start_bracket: ( - parameter: col1 - data_type: keyword: text - comma: ',' - parameter: col2 - data_type: keyword: bool - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: a - keyword: COLLATE - column_reference: quoted_identifier: '"de_DE"' - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: < bracketed: start_bracket: ( expression: quoted_literal: "'foo'" keyword: COLLATE column_reference: quoted_identifier: '"fr_FR"' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - keyword: COLLATE - column_reference: quoted_identifier: '"de_DE"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - column_reference: naked_identifier: a - keyword: COLLATE - column_reference: quoted_identifier: '"de_DE"' - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 orderby_clause: - keyword: ORDER - keyword: BY - expression: - column_reference: naked_identifier: a - binary_operator: - pipe: '|' - pipe: '|' - column_reference: naked_identifier: b - keyword: COLLATE - column_reference: quoted_identifier: '"fr_FR"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: start - comma: ',' - select_clause_element: column_reference: naked_identifier: periods dot: . naked_identifier_all: end from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: periods - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: concat_lower_or_upper bracketed: - start_bracket: ( - expression: quoted_literal: "'Hello'" - comma: ',' - expression: quoted_literal: "'World'" - comma: ',' - expression: boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: concat_lower_or_upper bracketed: - start_bracket: ( - named_argument: naked_identifier: a right_arrow: => expression: quoted_literal: "'Hello'" - comma: ',' - named_argument: naked_identifier: b right_arrow: => expression: quoted_literal: "'World'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: concat_lower_or_upper bracketed: - start_bracket: ( - expression: quoted_literal: "'Hello'" - comma: ',' - expression: quoted_literal: "'World'" - comma: ',' - named_argument: naked_identifier: uppercase right_arrow: => expression: boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable for_clause: - keyword: FOR - keyword: UPDATE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable for_clause: - keyword: FOR - keyword: UPDATE end_bracket: ) alias_expression: naked_identifier: ss where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: mytable2 join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col2 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: sync_time - keyword: ASC for_clause: - keyword: FOR - keyword: SHARE - keyword: OF - table_reference: naked_identifier: mytable1 - comma: ',' - table_reference: naked_identifier: mytable2 - keyword: SKIP - keyword: LOCKED limit_clause: keyword: LIMIT numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: Select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sample_expression: - keyword: TABLESAMPLE - keyword: BERNOULLI - bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable for_clause: - keyword: FOR - keyword: UPDATE end_bracket: ) alias_expression: naked_identifier: ss where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' dollar_numeric_literal: $1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: i binary_operator: + dollar_numeric_literal: $1 into_clause: keyword: INTO table_reference: naked_identifier: j from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/select_case_cast.sql000066400000000000000000000005541451700765000256470ustar00rootroot00000000000000select col0, case when col1 then col2 else col3 end::text as mycol from table1; select col0, case when col1 then col2 else col3 end::int::float as mycol from table1; select col0, cast(case when col1 then col2 else col3 end as text) as mycol from table1; sqlfluff-2.3.5/test/fixtures/dialects/postgres/select_case_cast.yml000066400000000000000000000105541451700765000256520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 62ccc35af08b6bf76cd70ea215f1624b9b96478daeec28618b4dca5510747a59 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end casting_operator: '::' data_type: keyword: text alias_expression: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: expression: cast_expression: - case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end - casting_operator: '::' - data_type: keyword: int - casting_operator: '::' - data_type: keyword: float alias_expression: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col0 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: col1 - keyword: then - expression: column_reference: naked_identifier: col2 - else_clause: keyword: else expression: column_reference: naked_identifier: col3 - keyword: end keyword: as data_type: keyword: text end_bracket: ) alias_expression: keyword: as naked_identifier: mycol from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/select_frame_clause.sql000066400000000000000000000011301451700765000263370ustar00rootroot00000000000000select venuestate, venueseats, venuename, first_value(venuename ignore nulls) over(partition by venuestate order by venueseats desc rows between unbounded preceding and unbounded following) as col_name from table_name; SELECT rank () OVER (ORDER BY my_column RANGE BETWEEN 12 FOLLOWING AND CURRENT ROW EXCLUDE NO OTHERS); SELECT rank () OVER (ORDER BY my_column GROUPS UNBOUNDED PRECEDING EXCLUDE GROUP); SELECT rank () OVER (ORDER BY my_column RANGE BETWEEN INTERVAL '1 YEAR - 1 DAYS' PRECEDING AND INTERVAL '15 DAYS' PRECEDING); sqlfluff-2.3.5/test/fixtures/dialects/postgres/select_frame_clause.yml000066400000000000000000000126111451700765000263470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2048f3f99a5a611fca4057c716cca3368f283650e74ae2d59125235ee04634e file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: venuestate - comma: ',' - select_clause_element: column_reference: naked_identifier: venueseats - comma: ',' - select_clause_element: column_reference: naked_identifier: venuename - comma: ',' - select_clause_element: function: function_name: function_name_identifier: first_value bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: venuename - keyword: ignore - keyword: nulls - end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: venuestate orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: venueseats - keyword: desc frame_clause: - keyword: rows - keyword: between - keyword: unbounded - keyword: preceding - keyword: and - keyword: unbounded - keyword: following end_bracket: ) alias_expression: keyword: as naked_identifier: col_name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: rank bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: my_column frame_clause: - keyword: RANGE - keyword: BETWEEN - numeric_literal: '12' - keyword: FOLLOWING - keyword: AND - keyword: CURRENT - keyword: ROW - keyword: EXCLUDE - keyword: 'NO' - keyword: OTHERS end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: rank bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: my_column frame_clause: - keyword: GROUPS - keyword: UNBOUNDED - keyword: PRECEDING - keyword: EXCLUDE - keyword: GROUP end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: rank bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: my_column frame_clause: - keyword: RANGE - keyword: BETWEEN - keyword: INTERVAL - quoted_literal: "'1 YEAR - 1 DAYS'" - keyword: PRECEDING - keyword: AND - keyword: INTERVAL - quoted_literal: "'15 DAYS'" - keyword: PRECEDING end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/select_into.sql000066400000000000000000000002051451700765000246640ustar00rootroot00000000000000SELECT foo, bar INTO baz FROM qux; SELECT * INTO TEMP TABLE baz; SELECT * INTO TEMPORARY baz; SELECT * INTO UNLOGGED baz; sqlfluff-2.3.5/test/fixtures/dialects/postgres/select_into.yml000066400000000000000000000040261451700765000246730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 39721c9c2ae192671c8136c9efac4feeb59675570b953e2893832ed3be408259 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar into_clause: keyword: INTO table_reference: naked_identifier: baz from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: qux - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: INTO - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: baz - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: INTO - keyword: TEMPORARY - table_reference: naked_identifier: baz - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: - keyword: INTO - keyword: UNLOGGED - table_reference: naked_identifier: baz - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/set.sql000066400000000000000000000006131451700765000231520ustar00rootroot00000000000000SET LOCAL search_path = DEFAULT; SET search_path TO my_schema, public; SET datestyle TO postgres, dmy; SET SESSION datestyle TO postgres, 'dmy'; SET value = on, off, auto; SET value = TRUE, FALSE; SET TIME ZONE 'PST8PDT'; SET TIME ZONE 'Europe/Rome'; SET TIME ZONE LOCAL; SET TIME ZONE DEFAULT; SET SCHEMA 'my_schema'; SET SCHEMA 'public'; SET ROLE my_role; SET ROLE "my role"; SET ROLE NONE; sqlfluff-2.3.5/test/fixtures/dialects/postgres/set.yml000066400000000000000000000057461451700765000231700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1f838a5987921dc5b619c4bd6293baa5f135cd12fa558d2f18a6198dc6578d3d file: - statement: set_statement: - keyword: SET - keyword: LOCAL - parameter: search_path - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: set_statement: - keyword: SET - parameter: search_path - keyword: TO - naked_identifier: my_schema - comma: ',' - naked_identifier: public - statement_terminator: ; - statement: set_statement: - keyword: SET - parameter: datestyle - keyword: TO - naked_identifier: postgres - comma: ',' - naked_identifier: dmy - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SESSION - parameter: datestyle - keyword: TO - naked_identifier: postgres - comma: ',' - quoted_literal: "'dmy'" - statement_terminator: ; - statement: set_statement: - keyword: SET - parameter: value - comparison_operator: raw_comparison_operator: '=' - naked_identifier: 'on' - comma: ',' - naked_identifier: 'off' - comma: ',' - naked_identifier: auto - statement_terminator: ; - statement: set_statement: - keyword: SET - parameter: value - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - boolean_literal: 'FALSE' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - quoted_literal: "'PST8PDT'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - quoted_literal: "'Europe/Rome'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - keyword: LOCAL - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: TIME - keyword: ZONE - keyword: DEFAULT - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SCHEMA - quoted_literal: "'my_schema'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: SCHEMA - quoted_literal: "'public'" - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: ROLE - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: ROLE - role_reference: quoted_identifier: '"my role"' - statement_terminator: ; - statement: set_statement: - keyword: SET - keyword: ROLE - keyword: NONE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/single_quote.sql000066400000000000000000000003041451700765000250520ustar00rootroot00000000000000SELECT ''; SELECT ''''; SELECT ' '; SELECT '''aaa'''; SELECT ' '' '; SELECT '\'; SELECT 'foo' 'bar'; SELECT 'foo' 'bar'; SELECT 'foo' 'bar'; SELECT 'foo' -- some comment 'bar'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/single_quote.yml000066400000000000000000000043031451700765000250570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4308147bbc98560af6bb3876254e2a121b14a42b4cbec69f86006b63c8dd00f5 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'''aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/subquery.sql000066400000000000000000000002731451700765000242400ustar00rootroot00000000000000INSERT INTO target_table (target_column) SELECT table1.column1 FROM table1 INNER JOIN ( SELECT table2.join_column FROM table2 ) AS temp3 ON table1.join_column = temp3.join_column sqlfluff-2.3.5/test/fixtures/dialects/postgres/subquery.yml000066400000000000000000000051221451700765000242400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2f1520e8812dc6b95c25f73d1700df0e41e2db8b0352863f4e26851e591ecc08 file: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: target_table - bracketed: start_bracket: ( column_reference: naked_identifier: target_column end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: column1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table2 - dot: . - naked_identifier: join_column from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table2 end_bracket: ) alias_expression: keyword: AS naked_identifier: temp3 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: join_column - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: temp3 - dot: . - naked_identifier: join_column sqlfluff-2.3.5/test/fixtures/dialects/postgres/table_functions.sql000066400000000000000000000003651451700765000255420ustar00rootroot00000000000000select * from unnest(array['123', '456']); select * from unnest(array['123', '456']) as a(val, row_num); select * from unnest(array['123', '456']) with ordinality; select * from unnest(array['123', '456']) with ordinality as a(val, row_num); sqlfluff-2.3.5/test/fixtures/dialects/postgres/table_functions.yml000066400000000000000000000116541451700765000255470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 87790be6cd29e35d6b49f004079316a4d1a8ce524ff8545019db85b2247c4b25 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: unnest bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'123'" - comma: ',' - quoted_literal: "'456'" - end_square_bracket: ']' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: unnest bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'123'" - comma: ',' - quoted_literal: "'456'" - end_square_bracket: ']' end_bracket: ) alias_expression: keyword: as naked_identifier: a bracketed: start_bracket: ( identifier_list: - naked_identifier: val - comma: ',' - naked_identifier: row_num end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: - function: function_name: function_name_identifier: unnest bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'123'" - comma: ',' - quoted_literal: "'456'" - end_square_bracket: ']' end_bracket: ) - keyword: with - keyword: ordinality - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: - function: function_name: function_name_identifier: unnest bracketed: start_bracket: ( expression: typed_array_literal: array_type: keyword: array array_literal: - start_square_bracket: '[' - quoted_literal: "'123'" - comma: ',' - quoted_literal: "'456'" - end_square_bracket: ']' end_bracket: ) - keyword: with - keyword: ordinality alias_expression: keyword: as naked_identifier: a bracketed: start_bracket: ( identifier_list: - naked_identifier: val - comma: ',' - naked_identifier: row_num end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/truncate.sql000066400000000000000000000007651451700765000242140ustar00rootroot00000000000000TRUNCATE bigtable; TRUNCATE some_schema.bigtable; TRUNCATE TABLE bigtable; TRUNCATE ONLY bigtable; TRUNCATE TABLE ONLY bigtable; TRUNCATE bigtable *; TRUNCATE TABLE bigtable *; TRUNCATE bigtable, fattable; TRUNCATE TABLE bigtable, fattable; TRUNCATE ONLY bigtable, fattable *; TRUNCATE bigtable RESTART IDENTITY; TRUNCATE bigtable CONTINUE IDENTITY; TRUNCATE bigtable CASCADE; TRUNCATE bigtable RESTRICT; TRUNCATE TABLE ONLY bigtable, fattable *, ONLY slimtable CONTINUE IDENTITY CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/postgres/truncate.yml000066400000000000000000000065341451700765000242160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5b3af45b738a10f8b39d63c9d068e8931c6b259b24bda06e49e1797314c99d7f file: - statement: truncate_table: keyword: TRUNCATE table_reference: naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: keyword: TRUNCATE table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: ONLY - table_reference: naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - keyword: ONLY - table_reference: naked_identifier: bigtable - statement_terminator: ; - statement: truncate_table: keyword: TRUNCATE table_reference: naked_identifier: bigtable star: '*' - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: bigtable - star: '*' - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - comma: ',' - table_reference: naked_identifier: fattable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: bigtable - comma: ',' - table_reference: naked_identifier: fattable - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: ONLY - table_reference: naked_identifier: bigtable - comma: ',' - table_reference: naked_identifier: fattable - star: '*' - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - keyword: RESTART - keyword: IDENTITY - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - keyword: CONTINUE - keyword: IDENTITY - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - keyword: CASCADE - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - table_reference: naked_identifier: bigtable - keyword: RESTRICT - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - keyword: ONLY - table_reference: naked_identifier: bigtable - comma: ',' - table_reference: naked_identifier: fattable - star: '*' - comma: ',' - keyword: ONLY - table_reference: naked_identifier: slimtable - keyword: CONTINUE - keyword: IDENTITY - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/unicode_double_quote.sql000066400000000000000000000001151451700765000265510ustar00rootroot00000000000000SELECT U&"a"; SELECT U&"aaaa" UESCAPE '!'; SELECT U&"aaaa" UESCAPE '!'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/unicode_double_quote.yml000066400000000000000000000020131451700765000265520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d00383d8995415732be5182002939807cff91562ee2d226861396d5a73b65210 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_literal: U&"a" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_literal: "U&\"aaaa\" UESCAPE '!'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_literal: "U&\"aaaa\"\n\n UESCAPE\n '!'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/unicode_single_quote.sql000066400000000000000000000002601451700765000265610ustar00rootroot00000000000000SELECT U&''; SELECT U&' '; SELECT U&''''; SELECT U&'aaa'''; SELECT U&' '' '; SELECT U&'' UESCAPE '!'; SELECT U&'asdf' UESCAPE 'P'; SELECT U&' somestuff ' UESCAPE '?'; sqlfluff-2.3.5/test/fixtures/dialects/postgres/unicode_single_quote.yml000066400000000000000000000034441451700765000265720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 09f1c96dbf54c8f69b60a676d2bc40e9fc0b0ec9a0194347d3190d82d047ca1e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'' UESCAPE '!'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'asdf' UESCAPE 'P'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "U&'\nsomestuff\n'\nUESCAPE\n\n\n'?'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/update_table.sql000066400000000000000000000027011451700765000250100ustar00rootroot00000000000000UPDATE films SET kind = 'Dramatic' WHERE kind = 'Drama'; UPDATE weather SET temp_lo = temp_lo+1, temp_hi = temp_lo+15, prcp = DEFAULT WHERE city = 'San Francisco' AND date = '2003-07-03'; UPDATE weather SET temp_lo = temp_lo+1, temp_hi = temp_lo+15, prcp = DEFAULT WHERE city = 'San Francisco' AND date = '2003-07-03' RETURNING temp_lo, temp_hi, prcp; UPDATE weather SET (temp_lo, temp_hi, prcp) = (temp_lo+1, temp_lo+15, DEFAULT) WHERE city = 'San Francisco' AND date = '2003-07-03'; UPDATE employees SET sales_count = sales_count + 1 FROM accounts WHERE accounts.name = 'Acme Corporation' AND employees.id = accounts.sales_person; UPDATE employees SET sales_count = sales_count + 1 WHERE id = (SELECT sales_person FROM accounts WHERE name = 'Acme Corporation'); UPDATE accounts SET (contact_first_name, contact_last_name) = (SELECT first_name, last_name FROM salesmen WHERE salesmen.id = accounts.sales_id); UPDATE accounts SET contact_first_name = first_name, contact_last_name = last_name FROM salesmen WHERE salesmen.id = accounts.sales_id; UPDATE summary s SET (sum_x, sum_y, avg_x, avg_y) = (SELECT sum(x), sum(y), avg(x), avg(y) FROM data d WHERE d.group_id = s.group_id); UPDATE films SET kind = 'Dramatic' WHERE CURRENT OF c_films; UPDATE my_table SET my_column = "SQLFluff rules!" RETURNING my_column; UPDATE employees SET deleted_at = NOW() WHERE uuid = $1 RETURNING short_name AS employee_name; sqlfluff-2.3.5/test/fixtures/dialects/postgres/update_table.yml000066400000000000000000000413221451700765000250140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 26b055849f4a57835725cace78c91ced7d84f04fea5b9cb0fcf57778b25602bd file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: films set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: kind comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Dramatic'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: kind comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Drama'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: weather set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: temp_lo comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: temp_hi comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '15' - comma: ',' - set_clause: column_reference: naked_identifier: prcp comparison_operator: raw_comparison_operator: '=' keyword: DEFAULT where_clause: keyword: WHERE expression: - column_reference: naked_identifier: city - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'San Francisco'" - binary_operator: AND - column_reference: naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2003-07-03'" - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: weather - set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: temp_lo comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '1' - comma: ',' - set_clause: column_reference: naked_identifier: temp_hi comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '15' - comma: ',' - set_clause: column_reference: naked_identifier: prcp comparison_operator: raw_comparison_operator: '=' keyword: DEFAULT - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: city - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'San Francisco'" - binary_operator: AND - column_reference: naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2003-07-03'" - keyword: RETURNING - expression: column_reference: naked_identifier: temp_lo - comma: ',' - expression: column_reference: naked_identifier: temp_hi - comma: ',' - expression: column_reference: naked_identifier: prcp - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: weather set_clause_list: keyword: SET set_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: temp_lo - comma: ',' - column_reference: naked_identifier: temp_hi - comma: ',' - column_reference: naked_identifier: prcp - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '1' - comma: ',' - expression: column_reference: naked_identifier: temp_lo binary_operator: + numeric_literal: '15' - comma: ',' - keyword: DEFAULT - end_bracket: ) where_clause: keyword: WHERE expression: - column_reference: naked_identifier: city - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'San Francisco'" - binary_operator: AND - column_reference: naked_identifier: date - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2003-07-03'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: sales_count comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: sales_count binary_operator: + numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: accounts where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: accounts - dot: . - naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Acme Corporation'" - binary_operator: AND - column_reference: - naked_identifier: employees - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: accounts - dot: . - naked_identifier: sales_person - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: employees set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: sales_count comparison_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: sales_count binary_operator: + numeric_literal: '1' where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: sales_person from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: accounts where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Acme Corporation'" end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: accounts set_clause_list: keyword: SET set_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: contact_first_name - comma: ',' - column_reference: naked_identifier: contact_last_name - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: first_name - comma: ',' - select_clause_element: column_reference: naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: salesmen where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: salesmen - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: accounts - dot: . - naked_identifier: sales_id end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: accounts set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: contact_first_name - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: first_name - comma: ',' - set_clause: - column_reference: naked_identifier: contact_last_name - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: last_name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: salesmen where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: salesmen - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: accounts - dot: . - naked_identifier: sales_id - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: summary alias_expression: naked_identifier: s set_clause_list: keyword: SET set_clause: - bracketed: - start_bracket: ( - column_reference: naked_identifier: sum_x - comma: ',' - column_reference: naked_identifier: sum_y - comma: ',' - column_reference: naked_identifier: avg_x - comma: ',' - column_reference: naked_identifier: avg_y - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: y end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: x end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: y end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: data alias_expression: naked_identifier: d where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: d - dot: . - naked_identifier: group_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: group_id end_bracket: ) - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: films - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: kind comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Dramatic'" - keyword: WHERE - keyword: CURRENT - keyword: OF - object_reference: naked_identifier: c_films - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: my_table - set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: my_column - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '"SQLFluff rules!"' - keyword: RETURNING - expression: column_reference: naked_identifier: my_column - statement_terminator: ; - statement: update_statement: - keyword: UPDATE - table_reference: naked_identifier: employees - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: deleted_at comparison_operator: raw_comparison_operator: '=' function: function_name: function_name_identifier: NOW bracketed: start_bracket: ( end_bracket: ) - where_clause: keyword: WHERE expression: column_reference: naked_identifier: uuid comparison_operator: raw_comparison_operator: '=' dollar_numeric_literal: $1 - keyword: RETURNING - expression: column_reference: naked_identifier: short_name - alias_expression: keyword: AS naked_identifier: employee_name - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/vacuum.sql000066400000000000000000000013221451700765000236550ustar00rootroot00000000000000-- Old-style vacuum commands VACUUM; VACUUM FULL; VACUUM FREEZE; VACUUM VERBOSE; VACUUM ANALYZE; VACUUM ANALYSE; VACUUM FULL FREEZE VERBOSE ANALYSE; VACUUM tbl; VACUUM tbl1, tbl2; VACUUM FULL FREEZE VERBOSE ANALYSE tbl1, tbl2; VACUUM FULL tbl1 (col1, col2), tbl2; VACUUM FULL tbl1 (col1), tbl2 (col1, col2); -- New-style vacuum commands VACUUM (FULL); VACUUM (FULL, FREEZE) tbl1; VACUUM (FULL, FREEZE) tbl1 (col1, col2), tbl2 (col3); VACUUM (FULL TRUE, FREEZE); VACUUM ( FULL TRUE, FREEZE FALSE, VERBOSE, ANALYZE, ANALYSE, DISABLE_PAGE_SKIPPING, SKIP_LOCKED, INDEX_CLEANUP on, PROCESS_TOAST, TRUNCATE, PARALLEL 70 ); VACUUM (INDEX_CLEANUP off); VACUUM (INDEX_CLEANUP auto); sqlfluff-2.3.5/test/fixtures/dialects/postgres/vacuum.yml000066400000000000000000000123241451700765000236630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 617238e8c1decb78501479b84b05ee00fae0aca517bd0d5d3d0b112a6dcd7f29 file: - statement: vacuum_statement: keyword: VACUUM - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FREEZE - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: VERBOSE - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: ANALYZE - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: ANALYSE - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - keyword: FREEZE - keyword: VERBOSE - keyword: ANALYSE - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM table_reference: naked_identifier: tbl - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - table_reference: naked_identifier: tbl1 - comma: ',' - table_reference: naked_identifier: tbl2 - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - keyword: FREEZE - keyword: VERBOSE - keyword: ANALYSE - table_reference: naked_identifier: tbl1 - comma: ',' - table_reference: naked_identifier: tbl2 - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - table_reference: naked_identifier: tbl1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - table_reference: naked_identifier: tbl2 - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - keyword: FULL - table_reference: naked_identifier: tbl1 - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - comma: ',' - table_reference: naked_identifier: tbl2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: start_bracket: ( keyword: FULL end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: - start_bracket: ( - keyword: FULL - comma: ',' - keyword: FREEZE - end_bracket: ) table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - bracketed: - start_bracket: ( - keyword: FULL - comma: ',' - keyword: FREEZE - end_bracket: ) - table_reference: naked_identifier: tbl1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - comma: ',' - table_reference: naked_identifier: tbl2 - bracketed: start_bracket: ( column_reference: naked_identifier: col3 end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: - start_bracket: ( - keyword: FULL - boolean_literal: 'TRUE' - comma: ',' - keyword: FREEZE - end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: - start_bracket: ( - keyword: FULL - boolean_literal: 'TRUE' - comma: ',' - keyword: FREEZE - boolean_literal: 'FALSE' - comma: ',' - keyword: VERBOSE - comma: ',' - keyword: ANALYZE - comma: ',' - keyword: ANALYSE - comma: ',' - keyword: DISABLE_PAGE_SKIPPING - comma: ',' - keyword: SKIP_LOCKED - comma: ',' - keyword: INDEX_CLEANUP - naked_identifier: 'on' - comma: ',' - keyword: PROCESS_TOAST - comma: ',' - keyword: TRUNCATE - comma: ',' - keyword: PARALLEL - numeric_literal: '70' - end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: start_bracket: ( keyword: INDEX_CLEANUP naked_identifier: 'off' end_bracket: ) - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM bracketed: start_bracket: ( keyword: INDEX_CLEANUP naked_identifier: auto end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/values.sql000066400000000000000000000004511451700765000236560ustar00rootroot00000000000000values (1, 2); VALUES (1+1, 2); values (1+1, 2::TEXT); values (1, 2), (3, 4); values (1, 2), (3, 4), (greatest(5, 6), least(7, 8)); values (1, 2), (3, 4) limit 1; values (1, 2), (3, 4) limit 1 offset 1; values (1, 2), (3, 4) order by 1 desc; values (1, 2), (3, 4) order by 1 desc limit 1; sqlfluff-2.3.5/test/fixtures/dialects/postgres/values.yml000066400000000000000000000126441451700765000236670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db76603e2a397e9b7c520ed4f0e09342feb06f377aa28ba15e1fb91c31a2bf73 file: - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '1' - comma: ',' - expression: cast_expression: numeric_literal: '2' casting_operator: '::' data_type: keyword: TEXT - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: greatest bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: least bracketed: - start_bracket: ( - expression: numeric_literal: '7' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - limit_clause: keyword: limit numeric_literal: '1' - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - limit_clause: - keyword: limit - numeric_literal: '1' - keyword: offset - numeric_literal: '1' - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - keyword: desc - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - keyword: desc - limit_clause: keyword: limit numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/values_alias.sql000066400000000000000000000000721451700765000250260ustar00rootroot00000000000000select * from ( values (1, 2), (3, 4) ) as t(c1, c2); sqlfluff-2.3.5/test/fixtures/dialects/postgres/values_alias.yml000066400000000000000000000034241451700765000250340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 524022b1cbeea95a98dd9051cc80866af2297b19a850252bad74252b11173be8 file: statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/values_in_subquery.sql000066400000000000000000000003061451700765000263020ustar00rootroot00000000000000WITH t (col_1, col_2) AS ( VALUES ('08RIX0', 0.435::NUMERIC(4, 3)) ) SELECT * FROM t; SELECT * FROM ( VALUES (1) ) AS t(c1); SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2); sqlfluff-2.3.5/test/fixtures/dialects/postgres/values_in_subquery.yml000066400000000000000000000103711451700765000263070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 933cfd65728859ad86b8bd1b29e76c524ff542d39de19ac6cd7ec3587b4814a6 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: col_1 - comma: ',' - naked_identifier: col_2 end_bracket: ) keyword: AS bracketed: start_bracket: ( values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'08RIX0'" - comma: ',' - expression: cast_expression: numeric_literal: '0.435' casting_operator: '::' data_type: keyword: NUMERIC bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '4' - comma: ',' - numeric_literal: '3' - end_bracket: ) - end_bracket: ) end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: c1 - comma: ',' - naked_identifier: c2 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/with.sql000066400000000000000000000025451451700765000233400ustar00rootroot00000000000000WITH w AS MATERIALIZED ( SELECT * FROM other_table ) SELECT * FROM w AS w1 JOIN w AS w2 ON w1.key = w2.ref WHERE w2.key = 123; WITH w AS NOT MATERIALIZED ( SELECT * FROM big_table ) SELECT * FROM w AS w1 JOIN w AS w2 ON w1.key = w2.ref WHERE w2.key = 123; WITH RECURSIVE search_tree(id, link, data) AS ( SELECT t.id, t.link, t.data FROM tree t UNION ALL SELECT t.id, t.link, t.data FROM tree t, search_tree st WHERE t.id = st.link ) SEARCH DEPTH FIRST BY id SET ordercol SELECT * FROM search_tree ORDER BY ordercol; WITH RECURSIVE search_tree(id, link, data) AS ( SELECT t.id, t.link, t.data FROM tree t UNION ALL SELECT t.id, t.link, t.data FROM tree t, search_tree st WHERE t.id = st.link ) SEARCH BREADTH FIRST BY id SET ordercol SELECT * FROM search_tree ORDER BY ordercol; WITH RECURSIVE search_graph(id, link, data, depth) AS ( SELECT g.id, g.link, g.data, 1 FROM graph g UNION ALL SELECT g.id, g.link, g.data, sg.depth + 1 FROM graph g, search_graph sg WHERE g.id = sg.link ) CYCLE id SET is_cycle USING path SELECT * FROM search_graph; -- test that DML queries are also selectable WITH tbl AS ( INSERT INTO a VALUES (5) RETURNING * ) SELECT * FROM tbl; WITH tbl AS ( UPDATE a SET b = 5 RETURNING * ) SELECT * FROM tbl; WITH tbl AS ( DELETE FROM a RETURNING * ) SELECT * FROM tbl; sqlfluff-2.3.5/test/fixtures/dialects/postgres/with.yml000066400000000000000000000516521451700765000233450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7dd7fa95cac9b0cf5224a8f2a0d007882f19ee2e7424f1339a6b0611e55df7b9 file: - statement: with_compound_statement: keyword: WITH common_table_expression: - naked_identifier: w - keyword: AS - keyword: MATERIALIZED - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: w alias_expression: keyword: AS naked_identifier: w1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: w alias_expression: keyword: AS naked_identifier: w2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: w1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: w2 - dot: . - naked_identifier: ref where_clause: keyword: WHERE expression: column_reference: - naked_identifier: w2 - dot: . - naked_identifier: key comparison_operator: raw_comparison_operator: '=' numeric_literal: '123' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: - naked_identifier: w - keyword: AS - keyword: NOT - keyword: MATERIALIZED - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: big_table end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: w alias_expression: keyword: AS naked_identifier: w1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: w alias_expression: keyword: AS naked_identifier: w2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: w1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: w2 - dot: . - naked_identifier: ref where_clause: keyword: WHERE expression: column_reference: - naked_identifier: w2 - dot: . - naked_identifier: key comparison_operator: raw_comparison_operator: '=' numeric_literal: '123' - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: - naked_identifier: search_tree - cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: link - comma: ',' - naked_identifier: data end_bracket: ) - keyword: AS - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: data from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tree alias_expression: naked_identifier: t - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: data from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tree alias_expression: naked_identifier: t - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_tree alias_expression: naked_identifier: st where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: st - dot: . - naked_identifier: link end_bracket: ) - keyword: SEARCH - keyword: DEPTH - keyword: FIRST - keyword: BY - column_reference: naked_identifier: id - keyword: SET - column_reference: naked_identifier: ordercol - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_tree orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ordercol - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: - naked_identifier: search_tree - cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: link - comma: ',' - naked_identifier: data end_bracket: ) - keyword: AS - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: data from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tree alias_expression: naked_identifier: t - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: data from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tree alias_expression: naked_identifier: t - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_tree alias_expression: naked_identifier: st where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: st - dot: . - naked_identifier: link end_bracket: ) - keyword: SEARCH - keyword: BREADTH - keyword: FIRST - keyword: BY - column_reference: naked_identifier: id - keyword: SET - column_reference: naked_identifier: ordercol - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_tree orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ordercol - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - keyword: RECURSIVE - common_table_expression: - naked_identifier: search_graph - cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: link - comma: ',' - naked_identifier: data - comma: ',' - naked_identifier: depth end_bracket: ) - keyword: AS - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: data - comma: ',' - select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: graph alias_expression: naked_identifier: g - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: link - comma: ',' - select_clause_element: column_reference: - naked_identifier: g - dot: . - naked_identifier: data - comma: ',' - select_clause_element: expression: column_reference: - naked_identifier: sg - dot: . - naked_identifier: depth binary_operator: + numeric_literal: '1' from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: graph alias_expression: naked_identifier: g - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_graph alias_expression: naked_identifier: sg where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: g - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sg - dot: . - naked_identifier: link end_bracket: ) - keyword: CYCLE - column_reference: naked_identifier: id - keyword: SET - column_reference: naked_identifier: is_cycle - keyword: USING - column_reference: naked_identifier: path - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: search_graph - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tbl keyword: AS bracketed: start_bracket: ( insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: a - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - keyword: RETURNING - star: '*' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tbl keyword: AS bracketed: start_bracket: ( update_statement: - keyword: UPDATE - table_reference: naked_identifier: a - set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - keyword: RETURNING - star: '*' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: tbl keyword: AS bracketed: start_bracket: ( delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: a - keyword: RETURNING - star: '*' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/postgres/within_group.sql000066400000000000000000000002001451700765000250650ustar00rootroot00000000000000-- Postgres style WITHIN GROUP window functions SELECT ARRAY_AGG(o_orderkey) WITHIN GROUP (ORDER BY o_orderkey ASC) FROM orders sqlfluff-2.3.5/test/fixtures/dialects/postgres/within_group.yml000066400000000000000000000025521451700765000251030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f6e928977dc64f6d50b765793b36530e5619c76dd380c3a033dc340676fac3e1 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG bracketed: start_bracket: ( expression: column_reference: naked_identifier: o_orderkey end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: o_orderkey - keyword: ASC end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders sqlfluff-2.3.5/test/fixtures/dialects/redshift/000077500000000000000000000000001451700765000216005ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/redshift/.sqlfluff000066400000000000000000000000361451700765000234220ustar00rootroot00000000000000[sqlfluff] dialect = redshift sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_datashare.sql000066400000000000000000000007221451700765000254450ustar00rootroot00000000000000ALTER DATASHARE salesshare SET PUBLICACCESSIBLE FALSE; ALTER DATASHARE salesshare SET INCLUDENEW = TRUE FOR SCHEMA public; ALTER DATASHARE salesshare ADD TABLE public.tbl1; ALTER DATASHARE salesshare ADD TABLE public.tbl1, public.tbl2; ALTER DATASHARE salesshare ADD SCHEMA public; ALTER DATASHARE salesshare ADD FUNCTION public.fn1, public.fn2; ALTER DATASHARE salesshare ADD ALL TABLES IN SCHEMA public; ALTER DATASHARE salesshare REMOVE TABLE public.tbl1; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_datashare.yml000066400000000000000000000062161451700765000254530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b1acf41db31b9f7890e2887ba06c4c546817087234a30c10f9474eb2df487d0 file: - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: SET - keyword: PUBLICACCESSIBLE - boolean_literal: 'FALSE' - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: SET - keyword: INCLUDENEW - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: FOR - keyword: SCHEMA - schema_reference: naked_identifier: public - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: tbl1 - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: tbl1 - comma: ',' - table_reference: - naked_identifier: public - dot: . - naked_identifier: tbl2 - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: SCHEMA - schema_reference: naked_identifier: public - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: fn1 - comma: ',' - function_name: naked_identifier: public dot: . function_name_identifier: fn2 - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: ADD - keyword: ALL - keyword: TABLES - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: public - statement_terminator: ; - statement: create_datashare_statement: - keyword: ALTER - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: REMOVE - keyword: TABLE - table_reference: - naked_identifier: public - dot: . - naked_identifier: tbl1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_default_privileges.sql000066400000000000000000000003341451700765000273650ustar00rootroot00000000000000ALTER DEFAULT PRIVILEGES FOR USER the_user IN SCHEMA the_schema GRANT EXECUTE ON PROCEDURES TO the_other_user; ALTER DEFAULT PRIVILEGES FOR USER the_user IN SCHEMA the_schema GRANT EXECUTE ON ROUTINES TO the_other_user; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_default_privileges.yml000066400000000000000000000034641451700765000273760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f4eba93c6a11b233fad3114a515737c0e48856a894636163fe46be9f6c82c520 file: - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: the_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: the_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: PROCEDURES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: the_other_user - statement_terminator: ; - statement: alter_default_privileges_statement: - keyword: ALTER - keyword: DEFAULT - keyword: PRIVILEGES - keyword: FOR - keyword: USER - object_reference: naked_identifier: the_user - keyword: IN - keyword: SCHEMA - schema_reference: naked_identifier: the_schema - alter_default_privileges_grant: - keyword: GRANT - alter_default_privileges_object_privilege: keyword: EXECUTE - keyword: 'ON' - alter_default_privileges_schema_object: keyword: ROUTINES - keyword: TO - alter_default_privileges_to_from_roles: role_reference: naked_identifier: the_other_user - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_group.sql000066400000000000000000000005161451700765000246460ustar00rootroot00000000000000alter group admin_group add user dwuser; alter group admin_group add user dwuser1, dwuser2; alter group admin_group drop user dwuser; alter group admin_group drop user dwuser1, dwuser2; alter group admin_group rename to administrators; alter group admin_group add user "test.user"; alter group "admin_group" add user "test.user"; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_group.yml000066400000000000000000000045021451700765000246470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dea9d3bfed3f28f59a598e1495b52b9a2051aa4609ca86239a161077c46b3f33 file: - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: add - keyword: user - object_reference: naked_identifier: dwuser - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: add - keyword: user - object_reference: naked_identifier: dwuser1 - comma: ',' - object_reference: naked_identifier: dwuser2 - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: drop - keyword: user - object_reference: naked_identifier: dwuser - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: drop - keyword: user - object_reference: naked_identifier: dwuser1 - comma: ',' - object_reference: naked_identifier: dwuser2 - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: rename - keyword: to - object_reference: naked_identifier: administrators - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: naked_identifier: admin_group - keyword: add - keyword: user - object_reference: quoted_identifier: '"test.user"' - statement_terminator: ; - statement: alter_group: - keyword: alter - keyword: group - object_reference: quoted_identifier: '"admin_group"' - keyword: add - keyword: user - object_reference: quoted_identifier: '"test.user"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_procedure.sql000066400000000000000000000004511451700765000255000ustar00rootroot00000000000000ALTER PROCEDURE first_quarter_revenue(volume INOUT bigint, at_price IN numeric, result OUT int) RENAME TO quarterly_revenue; ALTER PROCEDURE first_quarter_revenue(bigint, numeric) RENAME TO quarterly_revenue; ALTER PROCEDURE quarterly_revenue(volume bigint, at_price numeric) OWNER TO etl_user; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_procedure.yml000066400000000000000000000042421451700765000255040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca8018c5a2ffeed0b69802d6765871e436128425ebe00598e3ad114e71b9e227 file: - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: first_quarter_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: volume - keyword: INOUT - data_type: keyword: bigint - comma: ',' - parameter: at_price - keyword: IN - data_type: keyword: numeric - comma: ',' - parameter: result - keyword: OUT - data_type: keyword: int - end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: quarterly_revenue - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: first_quarter_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - data_type: keyword: bigint - comma: ',' - data_type: keyword: numeric - end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: quarterly_revenue - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: quarterly_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: volume - data_type: keyword: bigint - comma: ',' - parameter: at_price - data_type: keyword: numeric - end_bracket: ) - keyword: OWNER - keyword: TO - parameter: etl_user - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_schema.sql000066400000000000000000000002311451700765000247440ustar00rootroot00000000000000ALTER SCHEMA schema1 RENAME TO schema2; ALTER SCHEMA schema1 OWNER TO new_owner; ALTER SCHEMA schema1 QUOTA 50 GB; ALTER SCHEMA schema1 QUOTA UNLIMITED; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_schema.yml000066400000000000000000000025361451700765000247600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd0a3f56eabf13406394006718db8bdeb0d09fe9ad3bfdbb476619984fe809b8 file: - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: RENAME - keyword: TO - schema_reference: naked_identifier: schema2 - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: OWNER - keyword: TO - role_reference: naked_identifier: new_owner - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: QUOTA - numeric_literal: '50' - keyword: GB - statement_terminator: ; - statement: alter_schema_statement: - keyword: ALTER - keyword: SCHEMA - schema_reference: naked_identifier: schema1 - keyword: QUOTA - keyword: UNLIMITED - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_table.sql000066400000000000000000000024771451700765000246110ustar00rootroot00000000000000ALTER TABLE example_table ADD CONSTRAINT example_name PRIMARY KEY (example_sk); alter table users rename to users_bkup; alter table venue owner to dwuser; alter table vdate owner to vuser; alter table venue rename column venueseats to venuesize; alter table category drop constraint category_pkey; alter table event alter column eventname type varchar(300); create table t1(c0 int encode lzo, c1 bigint encode zstd, c2 varchar(16) encode lzo, c3 varchar(32) encode zstd); alter table t1 alter column c0 encode az64; alter table t1 alter column c1 encode az64; alter table t1 alter column c2 encode bytedict; alter table t1 alter column c3 encode runlength; alter table inventory alter diststyle key distkey inv_warehouse_sk; alter table inventory alter distkey inv_item_sk; alter table inventory alter diststyle all; alter table t1 alter sortkey(c0, c1); alter table t1 alter sortkey none; alter table t1 alter sortkey(c0, c1); alter table t1 alter encode auto; alter table t2 alter column c0 encode lzo; ALTER TABLE the_schema.the_table ADD COLUMN the_timestamp TIMESTAMP; ALTER TABLE the_schema.the_table ADD COLUMN the_boolean BOOLEAN DEFAULT FALSE; alter table users add column feedback_score int default NULL; alter table users drop column feedback_score; alter table users drop column feedback_score cascade; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_table.yml000066400000000000000000000242041451700765000246030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b358e5c9e83a669ac2d4bdf4ac461d1798190d2ff1004402370fd07685404904 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: example_table - alter_table_action_segment: keyword: ADD table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: example_name - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: example_sk end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: users - alter_table_action_segment: - keyword: rename - keyword: to - parameter: users_bkup - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: venue - alter_table_action_segment: - keyword: owner - keyword: to - parameter: dwuser - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: vdate - alter_table_action_segment: - keyword: owner - keyword: to - parameter: vuser - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: venue - keyword: rename - keyword: column - column_reference: naked_identifier: venueseats - keyword: to - column_reference: naked_identifier: venuesize - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: category - alter_table_action_segment: - keyword: drop - keyword: constraint - parameter: category_pkey - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: event - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: eventname - keyword: type - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '300' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c0 - data_type: keyword: int - column_attribute_segment: - keyword: encode - keyword: lzo - comma: ',' - column_reference: naked_identifier: c1 - data_type: keyword: bigint - column_attribute_segment: - keyword: encode - keyword: zstd - comma: ',' - column_reference: naked_identifier: c2 - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '16' end_bracket: ) - column_attribute_segment: - keyword: encode - keyword: lzo - comma: ',' - column_reference: naked_identifier: c3 - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '32' end_bracket: ) - column_attribute_segment: - keyword: encode - keyword: zstd - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c0 - keyword: encode - keyword: az64 - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c1 - keyword: encode - keyword: az64 - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c2 - keyword: encode - keyword: bytedict - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c3 - keyword: encode - keyword: runlength - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: inventory - alter_table_action_segment: - keyword: alter - keyword: diststyle - keyword: key - keyword: distkey - column_reference: naked_identifier: inv_warehouse_sk - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: inventory - alter_table_action_segment: - keyword: alter - keyword: distkey - column_reference: naked_identifier: inv_item_sk - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: inventory - alter_table_action_segment: - keyword: alter - keyword: diststyle - keyword: all - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: sortkey - bracketed: - start_bracket: ( - column_reference: naked_identifier: c0 - comma: ',' - column_reference: naked_identifier: c1 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: sortkey - keyword: none - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: sortkey - bracketed: - start_bracket: ( - column_reference: naked_identifier: c0 - comma: ',' - column_reference: naked_identifier: c1 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_action_segment: - keyword: alter - keyword: encode - keyword: auto - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t2 - alter_table_action_segment: - keyword: alter - keyword: column - column_reference: naked_identifier: c0 - keyword: encode - keyword: lzo - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_table - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: the_timestamp - data_type: datetime_type_identifier: keyword: TIMESTAMP - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: the_schema - dot: . - naked_identifier: the_table - alter_table_action_segment: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: the_boolean - data_type: keyword: BOOLEAN - keyword: DEFAULT - expression: boolean_literal: 'FALSE' - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: users - alter_table_action_segment: - keyword: add - keyword: column - column_reference: naked_identifier: feedback_score - data_type: keyword: int - keyword: default - expression: null_literal: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: users - alter_table_action_segment: - keyword: drop - keyword: column - column_reference: naked_identifier: feedback_score - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: users - alter_table_action_segment: - keyword: drop - keyword: column - column_reference: naked_identifier: feedback_score - keyword: cascade - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_user.sql000066400000000000000000000032531451700765000244710ustar00rootroot00000000000000alter user admin createdb; alter user admin with createdb; alter user admin nocreatedb; alter user admin with nocreatedb; alter user "dbuser" reset var; alter user "dbuser" with reset var; alter user admin createuser; alter user admin with createuser; alter user admin nocreateuser; alter user admin with nocreateuser; alter user admin syslog access restricted; alter user admin with syslog access restricted; alter user admin syslog access unrestricted; alter user admin with syslog access unrestricted; alter user iam_superuser password 'mdA51234567890123456780123456789012'; alter user iam_superuser with password 'mdA51234567890123456780123456789012'; alter user iam_superuser password DISABLE; alter user iam_superuser with password DISABLE; alter user admin password 'adminPass9' valid until '2017-12-31 23:59'; alter user admin with password 'adminPass9' valid until '2017-12-31 23:59'; alter user admin rename to sysadmin; alter user admin with rename to sysadmin; alter user admin connection limit 10; alter user admin with connection limit 10; alter user admin connection limit unlimited; alter user admin with connection limit unlimited; alter user dbuser session timeout 300; alter user dbuser with session timeout 300; alter user dbuser reset session timeout; alter user dbuser with reset session timeout; alter user dbuser set var to 100; alter user dbuser with set var to 100; alter user dbuser set var = 'hi'; alter user dbuser with set var = 'hi'; alter user dbuser set var to default; alter user dbuser with set var to default; alter user dbuser set var = default; alter user dbuser with set var = default; alter user dbuser reset var; alter user dbuser with reset var; sqlfluff-2.3.5/test/fixtures/dialects/redshift/alter_user.yml000066400000000000000000000240721451700765000244750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f5ee1ab7a9f0253825805dba5756a115c6d7f90029c64bc8c51bcdc7464624b3 file: - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: createdb - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: createdb - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: nocreatedb - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: nocreatedb - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: quoted_identifier: '"dbuser"' - keyword: reset - parameter: var - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: quoted_identifier: '"dbuser"' - keyword: with - keyword: reset - object_reference: naked_identifier: var - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: createuser - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: createuser - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: nocreateuser - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: nocreateuser - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: syslog - keyword: access - keyword: restricted - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: syslog - keyword: access - keyword: restricted - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: syslog - keyword: access - keyword: unrestricted - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: syslog - keyword: access - keyword: unrestricted - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: iam_superuser - keyword: password - quoted_literal: "'mdA51234567890123456780123456789012'" - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: iam_superuser - keyword: with - keyword: password - quoted_literal: "'mdA51234567890123456780123456789012'" - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: iam_superuser - keyword: password - keyword: DISABLE - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: iam_superuser - keyword: with - keyword: password - keyword: DISABLE - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: password - quoted_literal: "'adminPass9'" - keyword: valid - keyword: until - quoted_literal: "'2017-12-31 23:59'" - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: password - quoted_literal: "'adminPass9'" - keyword: valid - keyword: until - quoted_literal: "'2017-12-31 23:59'" - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: rename - keyword: to - role_reference: naked_identifier: sysadmin - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: rename - keyword: to - object_reference: naked_identifier: sysadmin - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: connection - keyword: limit - numeric_literal: '10' - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: connection - keyword: limit - numeric_literal: '10' - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: connection - keyword: limit - keyword: unlimited - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: admin - keyword: with - keyword: connection - keyword: limit - keyword: unlimited - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: session - keyword: timeout - numeric_literal: '300' - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: session - keyword: timeout - numeric_literal: '300' - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: reset - keyword: session - keyword: timeout - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: reset - keyword: session - keyword: timeout - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: set - parameter: var - keyword: to - numeric_literal: '100' - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: set - object_reference: naked_identifier: var - keyword: to - numeric_literal: '100' - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: set - parameter: var - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hi'" - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: set - object_reference: naked_identifier: var - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'hi'" - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: set - parameter: var - keyword: to - keyword: default - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: set - object_reference: naked_identifier: var - keyword: to - keyword: default - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: set - parameter: var - comparison_operator: raw_comparison_operator: '=' - keyword: default - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: set - object_reference: naked_identifier: var - comparison_operator: raw_comparison_operator: '=' - keyword: default - statement_terminator: ; - statement: alter_role_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: reset - parameter: var - statement_terminator: ; - statement: alter_user_statement: - keyword: alter - keyword: user - role_reference: naked_identifier: dbuser - keyword: with - keyword: reset - object_reference: naked_identifier: var - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/analyze_compression.sql000066400000000000000000000002311451700765000264010ustar00rootroot00000000000000analyze compression; analyze compression listing; analyse compression sales(qtysold, commission, saletime); analyse compression sales comprows 10000; sqlfluff-2.3.5/test/fixtures/dialects/redshift/analyze_compression.yml000066400000000000000000000026231451700765000264120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d0afed803f6f284e6ce947638cc96551cfb53fd8f8506989cf0f815f91da0ce5 file: - statement: analyze_statement: keyword: analyze table_reference: naked_identifier: compression - statement_terminator: ; - statement: analyze_compression_statement: - keyword: analyze - keyword: compression - table_reference: naked_identifier: listing - statement_terminator: ; - statement: analyze_compression_statement: - keyword: analyse - keyword: compression - table_reference: naked_identifier: sales - bracketed: - start_bracket: ( - column_reference: naked_identifier: qtysold - comma: ',' - column_reference: naked_identifier: commission - comma: ',' - column_reference: naked_identifier: saletime - end_bracket: ) - statement_terminator: ; - statement: analyze_compression_statement: - keyword: analyse - keyword: compression - table_reference: naked_identifier: sales - keyword: comprows - numeric_literal: '10000' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/approximate_functions.sql000066400000000000000000000005351451700765000267450ustar00rootroot00000000000000select top 10 date.caldate, count(totalprice), sum(totalprice), approximate percentile_disc(0.5) within group (order by totalprice) from listing join date on listing.dateid = date.dateid group by date.caldate; select approximate count(distinct pricepaid) from sales; select count(distinct pricepaid) from sales; select approximate(foo) from bar; sqlfluff-2.3.5/test/fixtures/dialects/redshift/approximate_functions.yml000066400000000000000000000117351451700765000267530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f007d4d4684eef483e2b9d07d1d3abc2b1b89b8a74181d51401722135210d25 file: - statement: select_statement: select_clause: - keyword: select - select_clause_modifier: keyword: top numeric_literal: '10' - select_clause_element: column_reference: - naked_identifier: date - dot: . - naked_identifier: caldate - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) - comma: ',' - select_clause_element: function: keyword: approximate function_name: function_name_identifier: percentile_disc bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: totalprice end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: listing join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: date join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: listing - dot: . - naked_identifier: dateid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: date - dot: . - naked_identifier: dateid groupby_clause: - keyword: group - keyword: by - column_reference: - naked_identifier: date - dot: . - naked_identifier: caldate - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: keyword: approximate function_name: function_name_identifier: count bracketed: start_bracket: ( keyword: distinct expression: column_reference: naked_identifier: pricepaid end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( keyword: distinct expression: column_reference: naked_identifier: pricepaid end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: approximate bracketed: start_bracket: ( expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/array_unnest.sql000066400000000000000000000014311451700765000250320ustar00rootroot00000000000000WITH example_data AS ( SELECT 10 AS shop_id , json_parse('[1, 2]') AS inventory UNION ALL SELECT 20 AS shop_id , json_parse('[3, 4, 5]') AS inventory UNION ALL SELECT 30 AS shop_id , json_parse('[6, 7, 8, 9]') AS inventory ) SELECT shop_id , value , index FROM example_data ed, ed.inventory AS value AT index; SELECT c_name, orders.o_orderkey AS orderkey, index AS orderkey_index FROM customer_orders_lineitem c, c.c_orders AS orders AT index ORDER BY orderkey_index; -- can extract the correlated values from multiple arrays using the index variable SELECT value_a::BIGINT, array_b[idx]::VARCHAR AS value_b, array_c[MOD(idx, 3) + 1]::FLOAT8 AS value_c FROM mytable t, t.array_a AS value_a AT idx; sqlfluff-2.3.5/test/fixtures/dialects/redshift/array_unnest.yml000066400000000000000000000204751451700765000250450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a6fb7fabf3e33524cd99938920ae8641cac9c8140dec8551a6e5496327f9db51 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: example_data keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '10' alias_expression: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse bracketed: start_bracket: ( expression: quoted_literal: "'[1, 2]'" end_bracket: ) alias_expression: keyword: AS naked_identifier: inventory - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '20' alias_expression: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse bracketed: start_bracket: ( expression: quoted_literal: "'[3, 4, 5]'" end_bracket: ) alias_expression: keyword: AS naked_identifier: inventory - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '30' alias_expression: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse bracketed: start_bracket: ( expression: quoted_literal: "'[6, 7, 8, 9]'" end_bracket: ) alias_expression: keyword: AS naked_identifier: inventory end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: shop_id - comma: ',' - select_clause_element: column_reference: naked_identifier: value - comma: ',' - select_clause_element: column_reference: naked_identifier: index from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example_data alias_expression: naked_identifier: ed - comma: ',' - from_expression: from_expression_element: table_expression: array_unnesting: - object_reference: - naked_identifier: ed - dot: . - naked_identifier: inventory - keyword: AS - naked_identifier: value - keyword: AT - naked_identifier: index - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c_name - comma: ',' - select_clause_element: column_reference: - naked_identifier: orders - dot: . - naked_identifier: o_orderkey alias_expression: keyword: AS naked_identifier: orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: index alias_expression: keyword: AS naked_identifier: orderkey_index from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: array_unnesting: - object_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders - keyword: AS - naked_identifier: orders - keyword: AT - naked_identifier: index orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: orderkey_index - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value_a casting_operator: '::' data_type: keyword: BIGINT - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: array_b array_accessor: start_square_bracket: '[' expression: column_reference: naked_identifier: idx end_square_bracket: ']' casting_operator: '::' data_type: keyword: VARCHAR alias_expression: keyword: AS naked_identifier: value_b - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: array_c array_accessor: start_square_bracket: '[' expression: function: function_name: function_name_identifier: MOD bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: idx - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) binary_operator: + numeric_literal: '1' end_square_bracket: ']' casting_operator: '::' data_type: keyword: FLOAT8 alias_expression: keyword: AS naked_identifier: value_c from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t - comma: ',' - from_expression: from_expression_element: table_expression: array_unnesting: - object_reference: - naked_identifier: t - dot: . - naked_identifier: array_a - keyword: AS - naked_identifier: value_a - keyword: AT - naked_identifier: idx - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/attach_rls_policy.sql000066400000000000000000000002331451700765000260220ustar00rootroot00000000000000ATTACH RLS POLICY policy_concerts ON tickit_category_redshift TO ROLE analyst, ROLE dbadmin; ATTACH RLS POLICY policy_name ON TABLE table_name TO PUBLIC; sqlfluff-2.3.5/test/fixtures/dialects/redshift/attach_rls_policy.yml000066400000000000000000000023371451700765000260330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 132626be2d39562d6e274d1fc81e4129e591519f73f638a62e5cfb1d6bb87a15 file: - statement: manage_rls_policy_statement: - keyword: ATTACH - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - keyword: 'ON' - table_reference: naked_identifier: tickit_category_redshift - keyword: TO - keyword: ROLE - role_reference: naked_identifier: analyst - comma: ',' - keyword: ROLE - role_reference: naked_identifier: dbadmin - statement_terminator: ; - statement: manage_rls_policy_statement: - keyword: ATTACH - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_name - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: TO - role_reference: naked_identifier: PUBLIC - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/call.sql000066400000000000000000000002001451700765000232240ustar00rootroot00000000000000CALL test_proc(); CALL test_proc(pg_last_query_id()); CALL outer_proc(5); call test_sp1(3,'book'); call test_sp2(2,'2019'); sqlfluff-2.3.5/test/fixtures/dialects/redshift/call.yml000066400000000000000000000041021451700765000232330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c94c3332600a0ceeb0466193b68aa5edca05af8384194dde34ff56b5eb84876 file: - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: test_proc bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: test_proc bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: pg_last_query_id bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: CALL function: function_name: function_name_identifier: outer_proc bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: function_name_identifier: test_sp1 bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'book'" - end_bracket: ) - statement_terminator: ; - statement: call_statement: keyword: call function: function_name: function_name_identifier: test_sp2 bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'2019'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/cast_conversion.sql000066400000000000000000000007631451700765000255260ustar00rootroot00000000000000select cast(col1 as integer) from tbl1; select convert(integer, col1) from tbl1; select col1::integer from tbl1; select cast(col1 as timestamptz) from tbl1; select convert(timestamptz, col1) from tbl1; select col1::timestamptz from tbl1; select cast(col1 as decimal(38, 2)) from tbl1; select convert(decimal(38, 2), col1) from tbl1; select col1::decimal(38, 2) from tbl1; select cast(col1 as interval) from tbl1; select convert(interval, col1) from tbl1; select col1::interval from tbl1; sqlfluff-2.3.5/test/fixtures/dialects/redshift/cast_conversion.yml000066400000000000000000000215211451700765000255230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00f304d6ee102003b6d8f10e865a978a0d8426ae757544b3c721bc29c25262db file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 keyword: as data_type: keyword: integer end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert bracketed: start_bracket: ( data_type: keyword: integer comma: ',' expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: col1 casting_operator: '::' data_type: keyword: integer from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 keyword: as data_type: datetime_type_identifier: keyword: timestamptz end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert bracketed: start_bracket: ( data_type: datetime_type_identifier: keyword: timestamptz comma: ',' expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: col1 casting_operator: '::' data_type: datetime_type_identifier: keyword: timestamptz from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 keyword: as data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '2' - end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert bracketed: start_bracket: ( data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '2' - end_bracket: ) comma: ',' expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: col1 casting_operator: '::' data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '2' - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 keyword: as data_type: keyword: interval end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert bracketed: start_bracket: ( data_type: keyword: interval comma: ',' expression: column_reference: naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: col1 casting_operator: '::' data_type: keyword: interval from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/cast_with_whitespaces.sql000066400000000000000000000021601451700765000267040ustar00rootroot00000000000000-- redshift_cast_with_whitespaces.sql /* Several valid queries where there is whitespace surrounding the Redshift cast operator (::) */ -- query from https://github.com/sqlfluff/sqlfluff/issues/2720 SELECT amount_of_honey :: FLOAT FROM bear_inventory; -- should be able to support an arbitrary amount of whitespace SELECT amount_of_honey :: FLOAT FROM bear_inventory; SELECT amount_of_honey:: FLOAT FROM bear_inventory; SELECT amount_of_honey ::FLOAT FROM bear_inventory; -- should support a wide variety of typecasts SELECT amount_of_honey :: time FROM bear_inventory; SELECT amount_of_honey :: text FROM bear_inventory; SELECT amount_of_honey :: VARCHAR( 512 ) FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMPTZ FROM bear_inventory; SELECT amount_of_honey :: TIMESTAMP WITHOUT TIME ZONE FROM bear_inventory; -- should support casts with arbitrary amount of whitespace in join statements SELECT bi.amount_of_honey FROM bear_inventory bi LEFT JOIN favorite_cola fc ON fc.bear_id :: VARCHAR(512) = bi.bear_id ::VARCHAR(512) WHERE fc.favorite_cola = 'RC Cola'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/cast_with_whitespaces.yml000066400000000000000000000201111451700765000267020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8aa7f240daeacea8a3fa688ddb3fea842e1a61cbfda2ff0d93b25e749ebb126b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: FLOAT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: keyword: time from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: text from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: amount_of_honey casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: bi - dot: . - naked_identifier: amount_of_honey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_inventory alias_expression: naked_identifier: bi join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: favorite_cola alias_expression: naked_identifier: fc - join_on_condition: keyword: 'ON' expression: - cast_expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - cast_expression: column_reference: - naked_identifier: bi - dot: . - naked_identifier: bear_id casting_operator: '::' data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '512' end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: fc - dot: . - naked_identifier: favorite_cola comparison_operator: raw_comparison_operator: '=' quoted_literal: "'RC Cola'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/close.sql000066400000000000000000000000151451700765000234220ustar00rootroot00000000000000close curs1; sqlfluff-2.3.5/test/fixtures/dialects/redshift/close.yml000066400000000000000000000010231451700765000234240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ea4035c9363b77eb1d272c3d92125790f2a32576a1bca03b66ebe37f00e7653e file: statement: close_statement: keyword: close object_reference: naked_identifier: curs1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/copy.sql000066400000000000000000000064431451700765000233020ustar00rootroot00000000000000-- Retrieved from https://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html copy favoritemovies from 'dynamodb://Movies' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' readratio 50; copy listing from 's3://mybucket/data/listing/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole'; copy sales from 'emr://j-SAMPLE2B500FC/myoutput/part-*' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter '\t' lzop; copy sales from 'emr://j-SAMPLE2B500FC/myoutput/json/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' JSON 's3://mybucket/jsonpaths.txt'; copy category from 's3://mybucket/custdata' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole'; copy customer from 's3://mybucket/cust.manifest' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' manifest; copy listing from 's3://mybucket/data/listings/parquet/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' format as parquet; copy event from 's3://mybucket/data/allevents_pipe.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' removequotes emptyasnull blanksasnull maxerror 5 delimiter '|' timeformat 'YYYY-MM-DD HH:MI:SS'; copy venue from 's3://mybucket/data/venue_fw.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'; copy category from 's3://mybucket/data/category_csv.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' csv; copy category from 's3://mybucket/data/category_csv.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' csv quote as '%'; copy venue from 's3://mybucket/data/venue.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' explicit_ids; copy time from 's3://mybucket/data/timerows.gz' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' gzip delimiter '|'; copy timestamp1 from 's3://mybucket/data/time.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' timeformat 'YYYY-MM-DD HH:MI:SS'; copy venue_new(venueid, venuename, venuecity, venuestate) from 's3://mybucket/data/venue_noseats.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter '|'; copy redshiftinfo from 's3://mybucket/data/redshiftinfo.txt' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter '|' escape; copy category from 's3://mybucket/category_object_auto.json' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' json 'auto'; copy category from 's3://mybucket/category_auto-ignorecase.avro' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' format as avro 'auto ignorecase'; COPY norway_natural FROM 's3://bucket_name/shapefiles/norway/gis_osm_natural_free_1.shp' FORMAT SHAPEFILE CREDENTIALS 'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'; COPY norway_water FROM 's3://bucket_name/shapefiles/norway/gis_osm_water_a_free_1.shp' FORMAT SHAPEFILE SIMPLIFY AUTO 1.1E-05 MAXERROR 2 CREDENTIALS 'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'; COPY norway_natural_order(wkb_geometry, osm_id, code, fclass, name) FROM 's3://bucket_name/shapefiles/norway/gis_osm_natural_free_1.shp' FORMAT SHAPEFILE CREDENTIALS 'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'; COPY some_table FROM 's3://some_bucket/some/path' IAM_ROLE 'some_iam_role' FORMAT AS CSV TRUNCATECOLUMNS IGNOREHEADER 1 ACCEPTINVCHARS ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/copy.yml000066400000000000000000000252511451700765000233020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 16d1d81ebb07f882713235759d2eee51420edaedce2a55340d9df654bd826536 file: - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: favoritemovies - keyword: from - quoted_literal: "'dynamodb://Movies'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: readratio - numeric_literal: '50' - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: listing - keyword: from - quoted_literal: "'s3://mybucket/data/listing/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: sales - keyword: from - quoted_literal: "'emr://j-SAMPLE2B500FC/myoutput/part-*'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - quoted_literal: "'\\t'" - keyword: lzop - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: sales - keyword: from - quoted_literal: "'emr://j-SAMPLE2B500FC/myoutput/json/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: keyword: JSON quoted_literal: "'s3://mybucket/jsonpaths.txt'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/custdata'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: customer - keyword: from - quoted_literal: "'s3://mybucket/cust.manifest'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: manifest - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: listing - keyword: from - quoted_literal: "'s3://mybucket/data/listings/parquet/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: - keyword: format - keyword: as - keyword: parquet - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: event - keyword: from - quoted_literal: "'s3://mybucket/data/allevents_pipe.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: removequotes - keyword: emptyasnull - keyword: blanksasnull - keyword: maxerror - numeric_literal: '5' - keyword: delimiter - quoted_literal: "'|'" - keyword: timeformat - quoted_literal: "'YYYY-MM-DD HH:MI:SS'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: venue - keyword: from - quoted_literal: "'s3://mybucket/data/venue_fw.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: fixedwidth - quoted_literal: "'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/data/category_csv.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: keyword: csv - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/data/category_csv.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: - keyword: csv - keyword: quote - keyword: as - quoted_literal: "'%'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: venue - keyword: from - quoted_literal: "'s3://mybucket/data/venue.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: explicit_ids - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: time - keyword: from - quoted_literal: "'s3://mybucket/data/timerows.gz'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: gzip - keyword: delimiter - quoted_literal: "'|'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: timestamp1 - keyword: from - quoted_literal: "'s3://mybucket/data/time.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: timeformat - quoted_literal: "'YYYY-MM-DD HH:MI:SS'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: venue_new - bracketed: - start_bracket: ( - column_reference: naked_identifier: venueid - comma: ',' - column_reference: naked_identifier: venuename - comma: ',' - column_reference: naked_identifier: venuecity - comma: ',' - column_reference: naked_identifier: venuestate - end_bracket: ) - keyword: from - quoted_literal: "'s3://mybucket/data/venue_noseats.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - quoted_literal: "'|'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: redshiftinfo - keyword: from - quoted_literal: "'s3://mybucket/data/redshiftinfo.txt'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - quoted_literal: "'|'" - keyword: escape - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/category_object_auto.json'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: keyword: json quoted_literal: "'auto'" - statement_terminator: ; - statement: copy_statement: - keyword: copy - table_reference: naked_identifier: category - keyword: from - quoted_literal: "'s3://mybucket/category_auto-ignorecase.avro'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - data_format_segment: - keyword: format - keyword: as - keyword: avro - quoted_literal: "'auto ignorecase'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: norway_natural - keyword: FROM - quoted_literal: "'s3://bucket_name/shapefiles/norway/gis_osm_natural_free_1.shp'" - data_format_segment: - keyword: FORMAT - keyword: SHAPEFILE - authorization_segment: keyword: CREDENTIALS quoted_literal: "'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: norway_water - keyword: FROM - quoted_literal: "'s3://bucket_name/shapefiles/norway/gis_osm_water_a_free_1.shp'" - data_format_segment: - keyword: FORMAT - keyword: SHAPEFILE - keyword: SIMPLIFY - keyword: AUTO - numeric_literal: '1.1E-05' - keyword: MAXERROR - numeric_literal: '2' - authorization_segment: keyword: CREDENTIALS quoted_literal: "'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: norway_natural_order - bracketed: - start_bracket: ( - column_reference: naked_identifier: wkb_geometry - comma: ',' - column_reference: naked_identifier: osm_id - comma: ',' - column_reference: naked_identifier: code - comma: ',' - column_reference: naked_identifier: fclass - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - keyword: FROM - quoted_literal: "'s3://bucket_name/shapefiles/norway/gis_osm_natural_free_1.shp'" - data_format_segment: - keyword: FORMAT - keyword: SHAPEFILE - authorization_segment: keyword: CREDENTIALS quoted_literal: "'aws_iam_role=arn:aws:iam::123456789012:role/MyRoleName'" - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: some_table - keyword: FROM - quoted_literal: "'s3://some_bucket/some/path'" - authorization_segment: keyword: IAM_ROLE quoted_literal: "'some_iam_role'" - data_format_segment: - keyword: FORMAT - keyword: AS - keyword: CSV - keyword: TRUNCATECOLUMNS - keyword: IGNOREHEADER - numeric_literal: '1' - keyword: ACCEPTINVCHARS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_datashare.sql000066400000000000000000000003001451700765000255710ustar00rootroot00000000000000CREATE DATASHARE salesshare; CREATE DATASHARE demoshare SET PUBLICACCESSIBLE = TRUE; CREATE DATASHARE demoshare PUBLICACCESSIBLE = FALSE; CREATE DATASHARE demoshare PUBLICACCESSIBLE FALSE; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_datashare.yml000066400000000000000000000026121451700765000256030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cdec9c9737e6edf34ea910601dd0cc0ee6a5568d3af81aa4361f92a84074eeeb file: - statement: create_datashare_statement: - keyword: CREATE - keyword: DATASHARE - object_reference: naked_identifier: salesshare - statement_terminator: ; - statement: create_datashare_statement: - keyword: CREATE - keyword: DATASHARE - object_reference: naked_identifier: demoshare - keyword: SET - keyword: PUBLICACCESSIBLE - comparison_operator: raw_comparison_operator: '=' - keyword: 'TRUE' - statement_terminator: ; - statement: create_datashare_statement: - keyword: CREATE - keyword: DATASHARE - object_reference: naked_identifier: demoshare - keyword: PUBLICACCESSIBLE - comparison_operator: raw_comparison_operator: '=' - keyword: 'FALSE' - statement_terminator: ; - statement: create_datashare_statement: - keyword: CREATE - keyword: DATASHARE - object_reference: naked_identifier: demoshare - keyword: PUBLICACCESSIBLE - keyword: 'FALSE' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_external_function.sql000066400000000000000000000007071451700765000273770ustar00rootroot00000000000000CREATE EXTERNAL FUNCTION exfunc_sum(INT,INT) RETURNS INT STABLE LAMBDA 'lambda_sum' IAM_ROLE 'arn:aws:iam::123456789012:role/Redshift-Exfunc-Test'; CREATE OR REPLACE EXTERNAL FUNCTION exfunc_upper() RETURNS varchar STABLE LAMBDA 'exfunc_sleep_3' IAM_ROLE 'arn:aws:iam::123456789012:role/Redshift-Exfunc-Test' RETRY_TIMEOUT 0; CREATE OR REPLACE EXTERNAL FUNCTION exfunc_foo(varchar) RETURNS int IMMUTABLE SAGEMAKER 'some_endpoint_name' IAM_ROLE default; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_external_function.yml000066400000000000000000000042561451700765000274040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e9e8c7b96572640d071f118c262ce88d4bbc53f8871dd14bcb4d4e7da2ff0b0e file: - statement: create_external_function_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: FUNCTION - function_name: function_name_identifier: exfunc_sum - bracketed: - start_bracket: ( - data_type: keyword: INT - comma: ',' - data_type: keyword: INT - end_bracket: ) - keyword: RETURNS - data_type: keyword: INT - keyword: STABLE - keyword: LAMBDA - quoted_literal: "'lambda_sum'" - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/Redshift-Exfunc-Test'" - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: function_name_identifier: exfunc_upper - bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: keyword: varchar - keyword: STABLE - keyword: LAMBDA - quoted_literal: "'exfunc_sleep_3'" - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/Redshift-Exfunc-Test'" - keyword: RETRY_TIMEOUT - numeric_literal: '0' - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: function_name_identifier: exfunc_foo - bracketed: start_bracket: ( data_type: keyword: varchar end_bracket: ) - keyword: RETURNS - data_type: keyword: int - keyword: IMMUTABLE - keyword: SAGEMAKER - quoted_literal: "'some_endpoint_name'" - keyword: IAM_ROLE - keyword: default - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_external_schema.sql000066400000000000000000000031131451700765000270040ustar00rootroot00000000000000create external schema spectrum_schema from data catalog database 'sampledb' region 'us-west-2' iam_role 'arn:aws:iam::123456789012:role/MySpectrumRole'; create external schema spectrum_schema from data catalog database 'spectrum_db' iam_role 'arn:aws:iam::123456789012:role/MySpectrumRole' create external database if not exists; create external schema hive_schema from hive metastore database 'hive_db' uri '172.10.10.10' port 99 iam_role 'arn:aws:iam::123456789012:role/MySpectrumRole'; create external schema spectrum_schema from data catalog database 'spectrum_db' iam_role 'arn:aws:iam::123456789012:role/myRedshiftRole,arn:aws:iam::123456789012:role/myS3Role' catalog_role 'arn:aws:iam::123456789012:role/myAthenaRole' create external database if not exists; CREATE EXTERNAL SCHEMA IF NOT EXISTS myRedshiftSchema FROM POSTGRES DATABASE 'my_aurora_db' SCHEMA 'my_aurora_schema' URI 'endpoint to aurora hostname' PORT 5432 IAM_ROLE 'arn:aws:iam::123456789012:role/MyAuroraRole' SECRET_ARN 'arn:aws:secretsmanager:us-east-2:123456789012:secret:development/MyTestDatabase-AbCdEf'; CREATE EXTERNAL SCHEMA sales_schema FROM REDSHIFT DATABASE 'sales_db' SCHEMA 'public'; CREATE EXTERNAL SCHEMA IF NOT EXISTS myRedshiftSchema FROM MYSQL DATABASE 'my_aurora_db' URI 'endpoint to aurora hostname' IAM_ROLE 'arn:aws:iam::123456789012:role/MyAuroraRole' SECRET_ARN 'arn:aws:secretsmanager:us-east-2:123456789012:secret:development/MyTestDatabase-AbCdEf'; create external schema spectrum_schema from data catalog database 'sampledb' region 'us-west-2' iam_role 'arn:aws:iam::123456789012:role/MySpectrumRole'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_external_schema.yml000066400000000000000000000116161451700765000270150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e538d865e2518f99c09256ff8198187db3ac1e21ce743e8652d5d264b9a3778 file: - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: spectrum_schema - keyword: from - keyword: data - keyword: catalog - keyword: database - quoted_literal: "'sampledb'" - keyword: region - quoted_literal: "'us-west-2'" - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/MySpectrumRole'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: spectrum_schema - keyword: from - keyword: data - keyword: catalog - keyword: database - quoted_literal: "'spectrum_db'" - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/MySpectrumRole'" - keyword: create - keyword: external - keyword: database - keyword: if - keyword: not - keyword: exists - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: hive_schema - keyword: from - keyword: hive - keyword: metastore - keyword: database - quoted_literal: "'hive_db'" - keyword: uri - quoted_literal: "'172.10.10.10'" - keyword: port - numeric_literal: '99' - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/MySpectrumRole'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: spectrum_schema - keyword: from - keyword: data - keyword: catalog - keyword: database - quoted_literal: "'spectrum_db'" - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/myRedshiftRole,arn:aws:iam::123456789012:role/myS3Role'" - keyword: catalog_role - quoted_literal: "'arn:aws:iam::123456789012:role/myAthenaRole'" - keyword: create - keyword: external - keyword: database - keyword: if - keyword: not - keyword: exists - statement_terminator: ; - statement: create_external_schema_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: myRedshiftSchema - keyword: FROM - keyword: POSTGRES - keyword: DATABASE - quoted_literal: "'my_aurora_db'" - keyword: SCHEMA - quoted_literal: "'my_aurora_schema'" - keyword: URI - quoted_literal: "'endpoint to aurora hostname'" - keyword: PORT - numeric_literal: '5432' - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/MyAuroraRole'" - keyword: SECRET_ARN - quoted_literal: "'arn:aws:secretsmanager:us-east-2:123456789012:secret:development/MyTestDatabase-AbCdEf'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: SCHEMA - schema_reference: naked_identifier: sales_schema - keyword: FROM - keyword: REDSHIFT - keyword: DATABASE - quoted_literal: "'sales_db'" - keyword: SCHEMA - quoted_literal: "'public'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: myRedshiftSchema - keyword: FROM - keyword: MYSQL - keyword: DATABASE - quoted_literal: "'my_aurora_db'" - keyword: URI - quoted_literal: "'endpoint to aurora hostname'" - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/MyAuroraRole'" - keyword: SECRET_ARN - quoted_literal: "'arn:aws:secretsmanager:us-east-2:123456789012:secret:development/MyTestDatabase-AbCdEf'" - statement_terminator: ; - statement: create_external_schema_statement: - keyword: create - keyword: external - keyword: schema - schema_reference: naked_identifier: spectrum_schema - keyword: from - keyword: data - keyword: catalog - keyword: database - quoted_literal: "'sampledb'" - keyword: region - quoted_literal: "'us-west-2'" - keyword: iam_role - quoted_literal: "'arn:aws:iam::123456789012:role/MySpectrumRole'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_external_table.sql000066400000000000000000000051671451700765000266460ustar00rootroot00000000000000CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER ) STORED AS PARQUET LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) STORED AS PARQUET LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) STORED AS ORC LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) STORED AS AVRO LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) STORED AS TEXTFILE LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) PARTITIONED BY (col3 integer) STORED AS PARQUET LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER, col2 TEXT ) PARTITIONED BY (col3 INTEGER, col4 INTEGER) STORED AS PARQUET LOCATION 's3://bucket/folder' ; CREATE EXTERNAL TABLE external_schema.table_name ( col1 INTEGER ) STORED AS PARQUET LOCATION 's3://bucket/folder' TABLE PROPERTIES ('some_property1'='some_value1', 'some_property2'='some_value2') ; create external table spectrum.sales( salesid integer, saledate date, qtysold smallint, pricepaid decimal(8,2), saletime timestamp) row format delimited fields terminated by '\t' stored as textfile location 's3://awssampledbuswest2/tickit/spectrum/sales/' table properties ('numRows'='170000'); create external table spectrum.cloudtrail_json ( event_version int, event_id bigint, event_time timestamp, event_type varchar(10), recipientaccountid bigint) row format serde 'org.openx.data.jsonserde.JsonSerDe' with serdeproperties ( 'dots.in.keys' = 'true', 'mapping.requesttime' = 'requesttimestamp' ) stored as textfile location 's3://mybucket/json/cloudtrail'; CREATE EXTERNAL TABLE schema_spectrum_uddh.soccer_league ( league_rank smallint, club_name varchar(15), league_spi decimal(6,2), league_nspi smallint ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n\l' stored as textfile LOCATION 's3://spectrum-uddh/league/' table properties ('skip.header.line.count'='1'); CREATE EXTERNAL TABLE tbl1 (col1 int, col2 varchar(10)) ROW FORMAT SERDE 'com.amazon.ionhiveserde.IonHiveSerDe' STORED AS INPUTFORMAT 'com.amazon.ionhiveserde.formats.IonInputFormat' OUTPUTFORMAT 'com.amazon.ionhiveserde.formats.IonOutputFormat' LOCATION 's3://s3-bucket/prefix'; CREATE EXTERNAL TABLE spectrum.partitioned_lineitem PARTITIONED BY (l_shipdate, l_shipmode) STORED AS parquet LOCATION 'S3://mybucket/cetas/partitioned_lineitem/' AS SELECT 1; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_external_table.yml000066400000000000000000000336631451700765000266520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5b6df3353f64d90fb128da7d2a5d1a94e0961673673ec9d3ad2feb1de1c6d8ff file: - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: INTEGER end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: AVRO - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: TEXTFILE - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col3 data_type: keyword: integer end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: TEXT - end_bracket: ) - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col3 - data_type: keyword: INTEGER - comma: ',' - column_reference: naked_identifier: col4 - data_type: keyword: INTEGER - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: INTEGER end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder'" - keyword: TABLE - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'some_property1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value1'" - comma: ',' - quoted_literal: "'some_property2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value2'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: create - keyword: external - keyword: table - table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: sales - bracketed: - start_bracket: ( - column_reference: naked_identifier: salesid - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: saledate - data_type: datetime_type_identifier: keyword: date - comma: ',' - column_reference: naked_identifier: qtysold - data_type: keyword: smallint - comma: ',' - column_reference: naked_identifier: pricepaid - data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '8' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: saletime - data_type: datetime_type_identifier: keyword: timestamp - end_bracket: ) - keyword: row - keyword: format - keyword: delimited - row_format_delimited_segment: - keyword: fields - keyword: terminated - keyword: by - quoted_literal: "'\\t'" - keyword: stored - keyword: as - keyword: textfile - keyword: location - quoted_literal: "'s3://awssampledbuswest2/tickit/spectrum/sales/'" - keyword: table - keyword: properties - bracketed: - start_bracket: ( - quoted_literal: "'numRows'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'170000'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: create - keyword: external - keyword: table - table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: cloudtrail_json - bracketed: - start_bracket: ( - column_reference: naked_identifier: event_version - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: event_id - data_type: keyword: bigint - comma: ',' - column_reference: naked_identifier: event_time - data_type: datetime_type_identifier: keyword: timestamp - comma: ',' - column_reference: naked_identifier: event_type - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - column_reference: naked_identifier: recipientaccountid - data_type: keyword: bigint - end_bracket: ) - keyword: row - keyword: format - keyword: serde - quoted_literal: "'org.openx.data.jsonserde.JsonSerDe'" - keyword: with - keyword: serdeproperties - bracketed: - start_bracket: ( - quoted_literal: "'dots.in.keys'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'true'" - comma: ',' - quoted_literal: "'mapping.requesttime'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'requesttimestamp'" - end_bracket: ) - keyword: stored - keyword: as - keyword: textfile - keyword: location - quoted_literal: "'s3://mybucket/json/cloudtrail'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: schema_spectrum_uddh - dot: . - naked_identifier: soccer_league - bracketed: - start_bracket: ( - column_reference: naked_identifier: league_rank - data_type: keyword: smallint - comma: ',' - column_reference: naked_identifier: club_name - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '15' end_bracket: ) - comma: ',' - column_reference: naked_identifier: league_spi - data_type: keyword: decimal bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '6' - comma: ',' - numeric_literal: '2' - end_bracket: ) - comma: ',' - column_reference: naked_identifier: league_nspi - data_type: keyword: smallint - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - row_format_delimited_segment: - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n\\l'" - keyword: stored - keyword: as - keyword: textfile - keyword: LOCATION - quoted_literal: "'s3://spectrum-uddh/league/'" - keyword: table - keyword: properties - bracketed: - start_bracket: ( - quoted_literal: "'skip.header.line.count'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: tbl1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: int - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'com.amazon.ionhiveserde.IonHiveSerDe'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'com.amazon.ionhiveserde.formats.IonInputFormat'" - keyword: OUTPUTFORMAT - quoted_literal: "'com.amazon.ionhiveserde.formats.IonOutputFormat'" - keyword: LOCATION - quoted_literal: "'s3://s3-bucket/prefix'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: partitioned_lineitem - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: l_shipdate - comma: ',' - column_reference: naked_identifier: l_shipmode - end_bracket: ) - keyword: STORED - keyword: AS - keyword: parquet - keyword: LOCATION - quoted_literal: "'S3://mybucket/cetas/partitioned_lineitem/'" - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_external_table_as.sql000066400000000000000000000044501451700765000273230ustar00rootroot00000000000000CREATE EXTERNAL TABLE external_schema.table_name STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT col1, col2 FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT * FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT col1, col2 FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT * FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT col1, col2 FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS SELECT * FROM external_schema.source_table ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) STORED AS PARQUET LOCATION 's3://bucket/folder/' AS ( SELECT col1, col2 FROM external_schema.source_table ) ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) ROW FORMAT DELIMITED LINES TERMINATED BY '\007' STORED AS PARQUET LOCATION 's3://bucket/folder/' AS ( SELECT col1, col2 FROM external_schema.source_table ) ; CREATE EXTERNAL TABLE external_schema.table_name PARTITIONED BY (col1 integer, col2 integer) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\007' STORED AS PARQUET LOCATION 's3://bucket/folder/' AS ( SELECT col1, col2 FROM external_schema.source_table ) ; CREATE EXTERNAL TABLE external_schema.table_name STORED AS PARQUET LOCATION 's3://bucket/folder/' TABLE PROPERTIES ('some_property1'='some_value1', 'some_property2'='some_value2') AS SELECT col1, col2 FROM external_schema.source_table ; CREATE EXTERNAL TABLE spectrum.partitioned_lineitem PARTITIONED BY (l_shipdate date, l_shipmode varchar(24)) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n\l' STORED AS textfile LOCATION 'S3://mybucket/cetas/partitioned_lineitem/' AS SELECT l_orderkey, l_shipmode, l_shipdate, l_partkey FROM local_table; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_external_table_as.yml000066400000000000000000000371341451700765000273320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5a2eac3d5e8513beb33d3032b380eb39d87c37ababa89c2006b45da8aecf0b5e file: - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: integer end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: integer end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - row_format_delimited_segment: - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\007'" - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: integer - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: integer - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - row_format_delimited_segment: - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\007'" - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table end_bracket: ) - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: table_name - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: "'s3://bucket/folder/'" - keyword: TABLE - keyword: PROPERTIES - bracketed: - start_bracket: ( - quoted_literal: "'some_property1'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value1'" - comma: ',' - quoted_literal: "'some_property2'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'some_value2'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: external_schema - dot: . - naked_identifier: source_table - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: partitioned_lineitem - partitioned_by_segment: - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: l_shipdate - data_type: datetime_type_identifier: keyword: date - comma: ',' - column_reference: naked_identifier: l_shipmode - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '24' end_bracket: ) - end_bracket: ) - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - row_format_delimited_segment: - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n\\l'" - keyword: STORED - keyword: AS - keyword: textfile - keyword: LOCATION - quoted_literal: "'S3://mybucket/cetas/partitioned_lineitem/'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: l_orderkey - comma: ',' - select_clause_element: column_reference: naked_identifier: l_shipmode - comma: ',' - select_clause_element: column_reference: naked_identifier: l_shipdate - comma: ',' - select_clause_element: column_reference: naked_identifier: l_partkey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: local_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_function.sql000066400000000000000000000003651451700765000254750ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION public.iif( condition BOOLEAN , true_result ANYELEMENT , false_result ANYELEMENT) RETURNS ANYELEMENT STABLE AS $$ if condition: return true_result return false_result $$ LANGUAGE plpythonu; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_function.yml000066400000000000000000000025741451700765000255030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4cd966e4f6b9f99639f7964d1e1c7c25c561e2ae594f6a7dfd4ed7d69c869998 file: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: naked_identifier: public dot: . function_name_identifier: iif - function_parameter_list: bracketed: - start_bracket: ( - parameter: condition - data_type: keyword: BOOLEAN - comma: ',' - parameter: true_result - data_type: keyword: ANYELEMENT - comma: ',' - parameter: false_result - data_type: keyword: ANYELEMENT - end_bracket: ) - keyword: RETURNS - data_type: keyword: ANYELEMENT - function_definition: - keyword: STABLE - keyword: AS - quoted_literal: "$$\n if condition:\n return true_result\n return\ \ false_result\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpythonu statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_group.sql000066400000000000000000000003531451700765000250010ustar00rootroot00000000000000create group admin_group; create group "admin_group"; create group admin_group user admin1; create group admin_group with user admin1; create group admin_group user admin1, admin2; create group admin_group with user admin1, admin2; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_group.yml000066400000000000000000000035131451700765000250040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: efa69dfefc61d1b9b842bb77160caadfe278edadf91a158c444f01a1e4c0e16a file: - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: quoted_identifier: '"admin_group"' - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - keyword: user - object_reference: naked_identifier: admin1 - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - keyword: with - keyword: user - object_reference: naked_identifier: admin1 - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - keyword: user - object_reference: naked_identifier: admin1 - comma: ',' - object_reference: naked_identifier: admin2 - statement_terminator: ; - statement: create_group: - keyword: create - keyword: group - object_reference: naked_identifier: admin_group - keyword: with - keyword: user - object_reference: naked_identifier: admin1 - comma: ',' - object_reference: naked_identifier: admin2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_library.sql000066400000000000000000000032351451700765000253130ustar00rootroot00000000000000create library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' credentials 'aws_iam_role=arn:aws:iam::123456789:role/role_name' region as 'us-east-1'; create library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' region as 'us-east-1' credentials 'aws_iam_role=arn:aws:iam::123456789:role/role_name'; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' with credentials as 'aws_iam_role=arn:aws:iam::123456789:role/role_name' region as 'us-east-1'; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' credentials as 'aws_access_key_id=;aws_secret_access_key=;token=' region as 'us-east-1'; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' with credentials 'aws_access_key_id=;aws_secret_access_key=;token=' region as 'us-east-1'; create library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' iam_role 'aws_iam_role=arn:aws:iam::123456789:role/role_name'; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' ACCESS_KEY_ID '' SECRET_ACCESS_KEY ''; create or replace library lib1 language plpythonu from 's3://s3bucket/lib1.0.3.zip' ACCESS_KEY_ID '' SECRET_ACCESS_KEY '' SESSION_TOKEN '' region 'us-east-1'; create library lib1 language plpythonu from 'https://example.com/packages/lib1.0.3.zip'; create or replace library lib1 language plpythonu from 'https://example.com/packages/lib1.0.3.zip'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_library.yml000066400000000000000000000126061451700765000253170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3a9c7dc28769781f5e2d0eaa5b9fb4b98284f7d08d9c84666860af478f90663e file: - statement: create_library_statement: - keyword: create - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: keyword: credentials quoted_literal: "'aws_iam_role=arn:aws:iam::123456789:role/role_name'" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - authorization_segment: keyword: credentials quoted_literal: "'aws_iam_role=arn:aws:iam::123456789:role/role_name'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: with - keyword: credentials - keyword: as - quoted_literal: "'aws_iam_role=arn:aws:iam::123456789:role/role_name'" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: credentials - keyword: as - quoted_literal: "'aws_access_key_id=;aws_secret_access_key=;token='" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: with - keyword: credentials - quoted_literal: "'aws_access_key_id=;aws_secret_access_key=;token='" - keyword: region - keyword: as - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: keyword: iam_role quoted_literal: "'aws_iam_role=arn:aws:iam::123456789:role/role_name'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: ACCESS_KEY_ID - quoted_literal: "''" - keyword: SECRET_ACCESS_KEY - quoted_literal: "''" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'s3://s3bucket/lib1.0.3.zip'" - authorization_segment: - keyword: ACCESS_KEY_ID - quoted_literal: "''" - keyword: SECRET_ACCESS_KEY - quoted_literal: "''" - keyword: SESSION_TOKEN - quoted_literal: "''" - keyword: region - quoted_literal: "'us-east-1'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'https://example.com/packages/lib1.0.3.zip'" - statement_terminator: ; - statement: create_library_statement: - keyword: create - keyword: or - keyword: replace - keyword: library - object_reference: naked_identifier: lib1 - keyword: language - keyword: plpythonu - keyword: from - quoted_literal: "'https://example.com/packages/lib1.0.3.zip'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_materialized_view.sql000066400000000000000000000012161451700765000273500ustar00rootroot00000000000000create materialized view mat_view_example backup yes auto refresh no as select col1 from example_table; CREATE MATERIALIZED VIEW tickets_mv AS select catgroup, sum(qtysold) as sold from category c, event e, sales s where c.catid = e.catid and e.eventid = s.eventid group by catgroup; CREATE MATERIALIZED VIEW mv_sales_vw as select salesid, qtysold, pricepaid, commission, saletime from public.sales union all select salesid, qtysold, pricepaid, commission, saletime from spectrum.sales ; CREATE MATERIALIZED VIEW mv_baseball DISTSTYLE ALL AUTO REFRESH YES AS SELECT ball AS baseball FROM baseball_table; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_materialized_view.yml000066400000000000000000000153651451700765000273640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f4fc5e9a9a3feb883e5e9d3e8f30d085b3a91f040f1ccac3fef5063a7229cda file: - statement: create_materialized_view_statement: - keyword: create - keyword: materialized - keyword: view - table_reference: naked_identifier: mat_view_example - keyword: backup - keyword: 'yes' - keyword: auto - keyword: refresh - keyword: 'no' - keyword: as - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example_table - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: tickets_mv - keyword: AS - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: catgroup - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: qtysold end_bracket: ) alias_expression: keyword: as naked_identifier: sold from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: category alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: event alias_expression: naked_identifier: e - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales alias_expression: naked_identifier: s where_clause: keyword: where expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: catid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: catid - binary_operator: and - column_reference: - naked_identifier: e - dot: . - naked_identifier: eventid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: eventid groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: catgroup - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: mv_sales_vw - keyword: as - set_expression: - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: salesid - comma: ',' - select_clause_element: column_reference: naked_identifier: qtysold - comma: ',' - select_clause_element: column_reference: naked_identifier: pricepaid - comma: ',' - select_clause_element: column_reference: naked_identifier: commission - comma: ',' - select_clause_element: column_reference: naked_identifier: saletime from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: sales - set_operator: - keyword: union - keyword: all - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: salesid - comma: ',' - select_clause_element: column_reference: naked_identifier: qtysold - comma: ',' - select_clause_element: column_reference: naked_identifier: pricepaid - comma: ',' - select_clause_element: column_reference: naked_identifier: commission - comma: ',' - select_clause_element: column_reference: naked_identifier: saletime from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: sales - statement_terminator: ; - statement: create_materialized_view_statement: - keyword: CREATE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: mv_baseball - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: AUTO - keyword: REFRESH - keyword: 'YES' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ball alias_expression: keyword: AS naked_identifier: baseball from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: baseball_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_model.sql000066400000000000000000000024611451700765000247470ustar00rootroot00000000000000CREATE MODEL abalone_xgboost_multi_predict_age FROM ( SELECT length_val, diameter, height, whole_weight, shucked_weight, viscera_weight, shell_weight, rings FROM abalone_xgb WHERE record_number < 2500 ) TARGET rings FUNCTION ml_fn_abalone_xgboost_multi_predict_age IAM_ROLE 'arn:aws:iam::XXXXXXXXXXXX:role/Redshift-ML' AUTO OFF MODEL_TYPE XGBOOST OBJECTIVE 'multi:softmax' PREPROCESSORS 'none' HYPERPARAMETERS DEFAULT EXCEPT (NUM_ROUND '100', NUM_CLASS '30') SETTINGS (S3_BUCKET 'bucket'); CREATE MODEL customer_churn FROM 'training-job-customer-churn-v4' FUNCTION customer_churn_predict (varchar, int, float, float) RETURNS int IAM_ROLE 'arn:aws:iam::123456789012:role/Redshift-ML' SETTINGS (S3_BUCKET 'bucket'); CREATE MODEL remote_customer_churn FUNCTION remote_fn_customer_churn_predict (varchar, int, float, float) RETURNS int SAGEMAKER 'customer-churn-endpoint' IAM_ROLE 'arn:aws:iam::0123456789012:role/Redshift-ML'; CREATE MODEL customers_clusters FROM customers FUNCTION customers_cluster IAM_ROLE 'iam-role-arn' AUTO OFF MODEL_TYPE KMEANS PREPROCESSORS '[ { "ColumnSet": [ "*" ], "Transformers": [ "NumericPassthrough" ] } ]' HYPERPARAMETERS DEFAULT EXCEPT ( K '5' ) SETTINGS (S3_BUCKET 'bucket'); sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_model.yml000066400000000000000000000135641451700765000247570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 564b6c84bb7c7f8eee7dbf8604db15036f6c94d6f6347d7d79f86d1974956aa4 file: - statement: create_model_statement: - keyword: CREATE - keyword: MODEL - object_reference: naked_identifier: abalone_xgboost_multi_predict_age - keyword: FROM - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: length_val - comma: ',' - select_clause_element: column_reference: naked_identifier: diameter - comma: ',' - select_clause_element: column_reference: naked_identifier: height - comma: ',' - select_clause_element: column_reference: naked_identifier: whole_weight - comma: ',' - select_clause_element: column_reference: naked_identifier: shucked_weight - comma: ',' - select_clause_element: column_reference: naked_identifier: viscera_weight - comma: ',' - select_clause_element: column_reference: naked_identifier: shell_weight - comma: ',' - select_clause_element: column_reference: naked_identifier: rings from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: abalone_xgb where_clause: keyword: WHERE expression: column_reference: naked_identifier: record_number comparison_operator: raw_comparison_operator: < numeric_literal: '2500' end_bracket: ) - keyword: TARGET - column_reference: naked_identifier: rings - keyword: FUNCTION - object_reference: naked_identifier: ml_fn_abalone_xgboost_multi_predict_age - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::XXXXXXXXXXXX:role/Redshift-ML'" - keyword: AUTO - keyword: 'OFF' - keyword: MODEL_TYPE - keyword: XGBOOST - keyword: OBJECTIVE - quoted_literal: "'multi:softmax'" - keyword: PREPROCESSORS - quoted_literal: "'none'" - keyword: HYPERPARAMETERS - keyword: DEFAULT - keyword: EXCEPT - bracketed: - start_bracket: ( - word: NUM_ROUND - single_quote: "'100'" - comma: ',' - word: NUM_CLASS - single_quote: "'30'" - end_bracket: ) - keyword: SETTINGS - bracketed: start_bracket: ( keyword: S3_BUCKET quoted_literal: "'bucket'" end_bracket: ) - statement_terminator: ; - statement: create_model_statement: - keyword: CREATE - keyword: MODEL - object_reference: naked_identifier: customer_churn - keyword: FROM - quoted_literal: "'training-job-customer-churn-v4'" - keyword: FUNCTION - object_reference: naked_identifier: customer_churn_predict - bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: int - comma: ',' - data_type: keyword: float - comma: ',' - data_type: keyword: float - end_bracket: ) - keyword: RETURNS - data_type: keyword: int - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::123456789012:role/Redshift-ML'" - keyword: SETTINGS - bracketed: start_bracket: ( keyword: S3_BUCKET quoted_literal: "'bucket'" end_bracket: ) - statement_terminator: ; - statement: create_model_statement: - keyword: CREATE - keyword: MODEL - object_reference: naked_identifier: remote_customer_churn - keyword: FUNCTION - object_reference: naked_identifier: remote_fn_customer_churn_predict - bracketed: - start_bracket: ( - data_type: keyword: varchar - comma: ',' - data_type: keyword: int - comma: ',' - data_type: keyword: float - comma: ',' - data_type: keyword: float - end_bracket: ) - keyword: RETURNS - data_type: keyword: int - keyword: SAGEMAKER - quoted_literal: "'customer-churn-endpoint'" - keyword: IAM_ROLE - quoted_literal: "'arn:aws:iam::0123456789012:role/Redshift-ML'" - statement_terminator: ; - statement: create_model_statement: - keyword: CREATE - keyword: MODEL - object_reference: naked_identifier: customers_clusters - keyword: FROM - object_reference: naked_identifier: customers - keyword: FUNCTION - object_reference: naked_identifier: customers_cluster - keyword: IAM_ROLE - quoted_literal: "'iam-role-arn'" - keyword: AUTO - keyword: 'OFF' - keyword: MODEL_TYPE - keyword: KMEANS - keyword: PREPROCESSORS - quoted_literal: "'[\n {\n \"ColumnSet\": [ \"*\" ],\n \"Transformers\"\ : [ \"NumericPassthrough\" ]\n }\n]'" - keyword: HYPERPARAMETERS - keyword: DEFAULT - keyword: EXCEPT - bracketed: start_bracket: ( word: K single_quote: "'5'" end_bracket: ) - keyword: SETTINGS - bracketed: start_bracket: ( keyword: S3_BUCKET quoted_literal: "'bucket'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_procedure.sql000066400000000000000000000015201451700765000256320ustar00rootroot00000000000000CREATE OR REPLACE PROCEDURE test_sp1(f1 int, f2 varchar(20)) AS $$ DECLARE min_val int; BEGIN DROP TABLE IF EXISTS tmp_tbl; CREATE TEMP TABLE tmp_tbl(id int); INSERT INTO tmp_tbl values (f1),(10001),(10002); SELECT INTO min_val MIN(id) FROM tmp_tbl; RAISE INFO 'min_val = %, f2 = %', min_val, f2; END; $$ LANGUAGE plpgsql SECURITY INVOKER; CREATE OR REPLACE PROCEDURE test_sp2(f1 IN int, f2 INOUT varchar(256), out_var OUT varchar(256)) AS $$ DECLARE loop_var int; BEGIN IF f1 is null OR f2 is null THEN RAISE EXCEPTION 'input cannot be null'; END IF; DROP TABLE if exists my_etl; CREATE TEMP TABLE my_etl(a int, b varchar); FOR loop_var IN 1..f1 LOOP insert into my_etl values (loop_var, f2); f2 := f2 || '+' || f2; END LOOP; SELECT INTO out_var count(*) from my_etl; END; $$ LANGUAGE plpgsql; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_procedure.yml000066400000000000000000000061321451700765000256400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 28b9d6a0b11c121a1f22aabba6fb4737c14511348e978c8ad2a58cf98697c1d7 file: - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: test_sp1 - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: f1 - data_type: keyword: int - comma: ',' - parameter: f2 - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - end_bracket: ) - function_definition: - keyword: AS - quoted_literal: "$$\nDECLARE\n min_val int;\nBEGIN\n DROP TABLE IF EXISTS\ \ tmp_tbl;\n CREATE TEMP TABLE tmp_tbl(id int);\n INSERT INTO tmp_tbl\ \ values (f1),(10001),(10002);\n SELECT INTO min_val MIN(id) FROM tmp_tbl;\n\ \ RAISE INFO 'min_val = %, f2 = %', min_val, f2;\nEND;\n$$" - language_clause: keyword: LANGUAGE naked_identifier: plpgsql - keyword: SECURITY - keyword: INVOKER - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: test_sp2 - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: f1 - keyword: IN - data_type: keyword: int - comma: ',' - parameter: f2 - keyword: INOUT - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '256' end_bracket: ) - comma: ',' - parameter: out_var - keyword: OUT - data_type: keyword: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '256' end_bracket: ) - end_bracket: ) - function_definition: keyword: AS quoted_literal: "$$\nDECLARE\n loop_var int;\nBEGIN\n IF f1 is null OR f2\ \ is null THEN\n RAISE EXCEPTION 'input cannot be null';\n END IF;\n\ \ DROP TABLE if exists my_etl;\n CREATE TEMP TABLE my_etl(a int, b varchar);\n\ \ FOR loop_var IN 1..f1 LOOP\n insert into my_etl values (loop_var,\ \ f2);\n f2 := f2 || '+' || f2;\n END LOOP;\n SELECT INTO out_var\ \ count(*) from my_etl;\nEND;\n$$" language_clause: keyword: LANGUAGE naked_identifier: plpgsql - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_rls_policy.sql000066400000000000000000000003361451700765000260250ustar00rootroot00000000000000CREATE RLS POLICY policy_concerts WITH (catgroup VARCHAR(10)) USING (catgroup = 'Concerts'); CREATE RLS POLICY policy_name WITH (foo VARCHAR(10), bar DECIMAL(10, 2)) AS relation_alias USING (bar >= 12 AND foo = 'user1'); sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_rls_policy.yml000066400000000000000000000052011451700765000260230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4a3d668d485fa7a049937fa617411a44a32ddfe24d99dfd50558f6d64e04d58f file: - statement: create_rls_policy_statement: - keyword: CREATE - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - keyword: WITH - bracketed: start_bracket: ( column_reference: naked_identifier: catgroup data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) end_bracket: ) - keyword: USING - bracketed: start_bracket: ( expression: column_reference: naked_identifier: catgroup comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Concerts'" end_bracket: ) - statement_terminator: ; - statement: create_rls_policy_statement: - keyword: CREATE - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_name - keyword: WITH - bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) - comma: ',' - column_reference: naked_identifier: bar - data_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) - keyword: AS - alias_expression: naked_identifier: relation_alias - keyword: USING - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: bar - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '12' - binary_operator: AND - column_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'user1'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_schema.sql000066400000000000000000000015331451700765000251060ustar00rootroot00000000000000CREATE SCHEMA s1; CREATE SCHEMA IF NOT EXISTS s1; CREATE SCHEMA s1 AUTHORIZATION dwuser; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser; CREATE SCHEMA s1 AUTHORIZATION dwuser QUOTA 100 MB; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser QUOTA 100 MB; CREATE SCHEMA s1 AUTHORIZATION dwuser QUOTA 5 GB; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser QUOTA 5 GB; CREATE SCHEMA s1 AUTHORIZATION dwuser QUOTA 0.1 TB; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser QUOTA 0.1 TB; CREATE SCHEMA s1 AUTHORIZATION dwuser QUOTA UNLIMITED; CREATE SCHEMA IF NOT EXISTS s1 AUTHORIZATION dwuser QUOTA UNLIMITED; CREATE SCHEMA AUTHORIZATION dwuser; CREATE SCHEMA AUTHORIZATION dwuser QUOTA 100 MB; CREATE SCHEMA AUTHORIZATION dwuser QUOTA 5 GB; CREATE SCHEMA AUTHORIZATION dwuser QUOTA 0.1 TB; CREATE SCHEMA AUTHORIZATION dwuser QUOTA UNLIMITED; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_schema.yml000066400000000000000000000122541451700765000251120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5884da012d93c06f3600f3449b67610e88369ece73a415609623e7b8d6781281 file: - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '100' - keyword: MB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '100' - keyword: MB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '5' - keyword: GB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '5' - keyword: GB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '0.1' - keyword: TB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '0.1' - keyword: TB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - keyword: UNLIMITED - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - schema_reference: naked_identifier: s1 - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - keyword: UNLIMITED - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '100' - keyword: MB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '5' - keyword: GB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - numeric_literal: '0.1' - keyword: TB - statement_terminator: ; - statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - keyword: AUTHORIZATION - role_reference: naked_identifier: dwuser - keyword: QUOTA - keyword: UNLIMITED - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_table.sql000066400000000000000000000054111451700765000247340ustar00rootroot00000000000000CREATE TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) NOT NULL ) DISTKEY(col1) SORTKEY(col1) ; CREATE TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) GENERATED BY DEFAULT AS IDENTITY (1, 1) ) DISTKEY(col1) SORTKEY(col1) ; CREATE TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5), col3 VARCHAR(5), col4 VARCHAR(5), col5 VARCHAR(5), col6 VARCHAR(5) ) DISTKEY (col1) COMPOUND SORTKEY (col4, col5, col6) ; CREATE TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) REFERENCES t2 (col1) ) DISTKEY(col1) SORTKEY(col1) ; CREATE TABLE IF NOT EXISTS t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) ) DISTKEY(col1) SORTKEY(col1) ; CREATE TEMPORARY TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) ) DISTKEY(col1) SORTKEY(col1) ; CREATE TEMP TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) ) DISTKEY(col1) SORTKEY(col1) ; CREATE LOCAL TEMPORARY TABLE t1 ( col1 INTEGER UNIQUE, col2 VARCHAR(5) ) BACKUP YES ; CREATE TEMPORARY TABLE t1 ( col1 INTEGER PRIMARY KEY, col2 VARCHAR(5) ) BACKUP NO DISTKEY(col1) SORTKEY(col1, col2) ; CREATE TABLE t1 ( col1 INTEGER ENCODE AZ64 PRIMARY KEY, col2 VARCHAR(5) ENCODE TEXT255 ) DISTKEY(col1) SORTKEY AUTO DISTSTYLE EVEN ; CREATE TABLE schema1.t1 ( col1 INTEGER ENCODE AZ64 PRIMARY KEY, col2 VARCHAR(5) ENCODE TEXT255, col3 VARCHAR(5) COLLATE CASE_SENSITIVE, col3 VARCHAR(5) COLLATE CASE_INSENSITIVE ) ; CREATE TABLE UniqueKey_demo ( col1 INT NOT NULL UNIQUE ,col2 DATE ,col3 VARCHAR(60 ) , UNIQUE (col1) ) DISTKEY(col1) COMPOUND SORTKEY(col1, col2); CREATE TABLE UniqueKey_demo ( col1 INT NOT NULL UNIQUE ,col2 DATE ,col3 VARCHAR(60 ) , PRIMARY KEY (col1) ) DISTKEY(col1) INTERLEAVED SORTKEY (col1, col2); CREATE TEMP TABLE IF NOT EXISTS UniqueKey_demo ( col1 INT NOT NULL UNIQUE ,col2 DATE ,col3 VARCHAR(60 ) , FOREIGN KEY (col3) REFERENCES t2 (col5) ) ; CREATE TEMP TABLE t1 (LIKE schema1.t2); CREATE TEMP TABLE t1 (LIKE schema1.t2 INCLUDING DEFAULTS); CREATE TABLE t1 (LIKE schema1.t2 EXCLUDING DEFAULTS); CREATE TABLE some_schema.example_table ( LIKE some_schema.another_table INCLUDING DEFAULTS , LIKE some_schema.next_table EXCLUDING DEFAULTS ); CREATE TABLE some_schema.example_table ( LIKE some_schema.another_table INCLUDING DEFAULTS , col_name VARCHAR(5) ); CREATE TABLE some_table ( some_column INTEGER NOT NULL DEFAULT 1 ); CREATE TABLE IdentityColumn_demo ( col1 BIGINT IDENTITY ); CREATE TABLE IdentityColumnGeneratedByDefault_demo ( col1 BIGINT GENERATED BY DEFAULT AS IDENTITY ); CREATE TABLE IdentityColumnNotNull_demo ( col1 BIGINT IDENTITY NOT NULL ); CREATE TABLE IdentityColumnGeneratedByDefaultNotNull_demo ( col1 BIGINT GENERATED BY DEFAULT AS IDENTITY NOT NULL ); sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_table.yml000066400000000000000000000575671451700765000247610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d3fe19e4caf58d383de1e5d252eb2b50348dfcb5fa8a3a0a44196995e2453ce file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col4 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col5 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - comma: ',' - column_reference: naked_identifier: col6 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: COMPOUND - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col4 - comma: ',' - column_reference: naked_identifier: col5 - comma: ',' - column_reference: naked_identifier: col6 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_constraint_segment: keyword: REFERENCES table_reference: naked_identifier: t2 bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: LOCAL - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - keyword: BACKUP - keyword: 'YES' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - end_bracket: ) - keyword: BACKUP - keyword: 'NO' - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_attribute_segment: - keyword: ENCODE - keyword: AZ64 - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: ENCODE - keyword: TEXT255 - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - keyword: AUTO - keyword: DISTSTYLE - keyword: EVEN - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INTEGER - column_attribute_segment: - keyword: ENCODE - keyword: AZ64 - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_reference: naked_identifier: col2 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: ENCODE - keyword: TEXT255 - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: COLLATE - keyword: CASE_SENSITIVE - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - column_attribute_segment: - keyword: COLLATE - keyword: CASE_INSENSITIVE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: UniqueKey_demo - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: col2 - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '60' end_bracket: ) - comma: ',' - table_constraint: keyword: UNIQUE bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: COMPOUND - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: UniqueKey_demo - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: col2 - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '60' end_bracket: ) - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - end_bracket: ) - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: INTERLEAVED - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: UniqueKey_demo - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - data_type: keyword: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: UNIQUE - comma: ',' - column_reference: naked_identifier: col2 - data_type: datetime_type_identifier: keyword: DATE - comma: ',' - column_reference: naked_identifier: col3 - data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '60' end_bracket: ) - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col3 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: t2 - bracketed: start_bracket: ( column_reference: naked_identifier: col5 end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( keyword: LIKE table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( keyword: LIKE table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t2 like_option_segment: - keyword: INCLUDING - keyword: DEFAULTS end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( keyword: LIKE table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t2 like_option_segment: - keyword: EXCLUDING - keyword: DEFAULTS end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: example_table - bracketed: - start_bracket: ( - keyword: LIKE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: another_table - like_option_segment: - keyword: INCLUDING - keyword: DEFAULTS - comma: ',' - keyword: LIKE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: next_table - like_option_segment: - keyword: EXCLUDING - keyword: DEFAULTS - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: example_table - bracketed: start_bracket: ( keyword: LIKE table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: another_table like_option_segment: - keyword: INCLUDING - keyword: DEFAULTS comma: ',' column_reference: naked_identifier: col_name data_type: keyword: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: some_table - bracketed: start_bracket: ( column_reference: naked_identifier: some_column data_type: keyword: INTEGER column_constraint_segment: - keyword: NOT - keyword: 'NULL' column_attribute_segment: keyword: DEFAULT expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: IdentityColumn_demo - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: BIGINT column_attribute_segment: keyword: IDENTITY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: IdentityColumnGeneratedByDefault_demo - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: BIGINT column_attribute_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: IdentityColumnNotNull_demo - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: BIGINT column_attribute_segment: keyword: IDENTITY column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: IdentityColumnGeneratedByDefaultNotNull_demo - bracketed: start_bracket: ( column_reference: naked_identifier: col1 data_type: keyword: BIGINT column_attribute_segment: - keyword: GENERATED - keyword: BY - keyword: DEFAULT - keyword: AS - keyword: IDENTITY column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_table_as.sql000066400000000000000000000027721451700765000254260ustar00rootroot00000000000000CREATE TEMP TABLE t1 AS ( SELECT something FROM t2 ); CREATE TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE TEMPORARY TABLE t1 AS SELECT something FROM t2 ; CREATE TABLE t1 AS ( SELECT something FROM t2 ); CREATE TABLE t1 AS SELECT something FROM t2 ; CREATE LOCAL TEMP TABLE t1 AS SELECT something FROM t2 ; CREATE TEMP TABLE t1 SORTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 SORTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTKEY(col1) SORTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTSTYLE EVEN AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTSTYLE ALL DISTKEY(col1) SORTKEY(col1) AS SELECT col1 FROM t2 ; CREATE TABLE t1 DISTSTYLE ALL DISTKEY(col1) COMPOUND SORTKEY(col1, col2) AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 DISTSTYLE ALL DISTKEY(col1) INTERLEAVED SORTKEY(col1, col2) AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 (col1, col2) AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 (col1, col2) BACKUP YES AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 (col1, col2) BACKUP NO AS SELECT col1 , col2 FROM t2 ; CREATE TABLE t1 (col1, col2) AS ( SELECT col1 FROM tbl2 ); CREATE TABLE t1 (col1, col2) BACKUP NO DISTSTYLE ALL DISTKEY(col1) INTERLEAVED SORTKEY(col1, col2) AS ( SELECT col1 FROM tbl2 UNION ALL SELECT col2 FROM tbl3 ); sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_table_as.yml000066400000000000000000000410241451700765000254210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b8f22c87a7c42b555349270f25b8e7a4678df7ba7ffbd79b0e85b983148a8bb5 file: - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: LOCAL - keyword: TEMP - keyword: TABLE - object_reference: naked_identifier: t1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: something from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMP - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: keyword: SORTKEY bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: keyword: SORTKEY bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: keyword: DISTKEY bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTSTYLE - keyword: EVEN - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: SORTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: COMPOUND - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: INTERLEAVED - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: BACKUP - keyword: 'YES' - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: BACKUP - keyword: 'NO' - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 end_bracket: ) - statement_terminator: ; - statement: create_table_as_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: BACKUP - keyword: 'NO' - table_constraint: - keyword: DISTSTYLE - keyword: ALL - keyword: DISTKEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: INTERLEAVED - keyword: SORTKEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl3 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_user.sql000066400000000000000000000023641451700765000246270ustar00rootroot00000000000000CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b'; CREATE USER admin WITH PASSWORD 'sha256|Mypassword1'; CREATE USER lazy PASSWORD DISABLE; CREATE USER lazy WITH PASSWORD DISABLE; CREATE USER "lazy" WITH PASSWORD DISABLE; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' CREATEDB; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' NOCREATEDB; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' CREATEUSER; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' NOCREATEUSER; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' SYSLOG ACCESS RESTRICTED; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' SYSLOG ACCESS UNRESTRICTED; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' IN GROUP group_1; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' IN GROUP group_1, group_2; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' VALID UNTIL '2017-06-10'; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' CONNECTION LIMIT 30; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' CONNECTION LIMIT UNLIMITED; CREATE USER user1 PASSWORD 'md5153c434b4b77c89e6b94f12c5393af5b' SESSION TIMEOUT 120; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_user.yml000066400000000000000000000123521451700765000246270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b927b56eaf9af8b279a5d3e08157a66324f3bfd85c0d0ec3ce36b3b949a229d1 file: - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: admin - keyword: WITH - keyword: PASSWORD - quoted_literal: "'sha256|Mypassword1'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: lazy - keyword: PASSWORD - keyword: DISABLE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: lazy - keyword: WITH - keyword: PASSWORD - keyword: DISABLE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: quoted_identifier: '"lazy"' - keyword: WITH - keyword: PASSWORD - keyword: DISABLE - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: CREATEDB - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: NOCREATEDB - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: CREATEUSER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: NOCREATEUSER - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: SYSLOG - keyword: ACCESS - keyword: RESTRICTED - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: SYSLOG - keyword: ACCESS - keyword: UNRESTRICTED - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: IN - keyword: GROUP - object_reference: naked_identifier: group_1 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: IN - keyword: GROUP - object_reference: naked_identifier: group_1 - comma: ',' - object_reference: naked_identifier: group_2 - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: VALID - keyword: UNTIL - quoted_literal: "'2017-06-10'" - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: CONNECTION - keyword: LIMIT - numeric_literal: '30' - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: CONNECTION - keyword: LIMIT - keyword: UNLIMITED - statement_terminator: ; - statement: create_user_statement: - keyword: CREATE - keyword: USER - role_reference: naked_identifier: user1 - keyword: PASSWORD - quoted_literal: "'md5153c434b4b77c89e6b94f12c5393af5b'" - keyword: SESSION - keyword: TIMEOUT - numeric_literal: '120' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_view.sql000066400000000000000000000001621451700765000246150ustar00rootroot00000000000000create view sales_vw as select * from public.sales union all select * from spectrum.sales with no schema binding; sqlfluff-2.3.5/test/fixtures/dialects/redshift/create_view.yml000066400000000000000000000034521451700765000246240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f74e44d078baf289d9899d89b523de92e3b9bbbc1e17970c385f14647931d931 file: statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: sales_vw - keyword: as - set_expression: - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: public - dot: . - naked_identifier: sales - set_operator: - keyword: union - keyword: all - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: spectrum - dot: . - naked_identifier: sales - with_no_schema_binding_clause: - keyword: with - keyword: 'no' - keyword: schema - keyword: binding statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/dateparts.sql000066400000000000000000000065721451700765000243220ustar00rootroot00000000000000select date_part(millennium, foo) from tbl1; select date_part(millennia, foo) from tbl1; select date_part(mil, foo) from tbl1; select date_part(mils, foo) from tbl1; select date_part(century, foo) from tbl1; select date_part(centuries, foo) from tbl1; select date_part(c, foo) from tbl1; select date_part(cent, foo) from tbl1; select date_part(cents, foo) from tbl1; select date_part(decade, foo) from tbl1; select date_part(decades, foo) from tbl1; select date_part(dec, foo) from tbl1; select date_part(decs, foo) from tbl1; select extract(epoch from foo) from tbl1; select extract(year from foo) from tbl1; select extract(years from foo) from tbl1; select extract(y from foo) from tbl1; select extract(yr from foo) from tbl1; select extract(yrs from foo) from tbl1; select extract(quarter from foo) from tbl1; select extract(quarters from foo) from tbl1; select extract(qtr from foo) from tbl1; select extract(qtrs from foo) from tbl1; select extract(month from foo) from tbl1; select extract(months from foo) from tbl1; select extract(mon from foo) from tbl1; select extract(mons from foo) from tbl1; select extract(week from foo) from tbl1; select extract(weeks from foo) from tbl1; select extract(w from foo) from tbl1; select extract(dayofweek from foo) from tbl1; select extract(dow from foo) from tbl1; select extract(dw from foo) from tbl1; select extract(weekday from foo) from tbl1; select extract(dayofyear from foo) from tbl1; select extract(doy from foo) from tbl1; select extract(dy from foo) from tbl1; select extract(yearday from foo) from tbl1; select extract(day from foo) from tbl1; select extract(days from foo) from tbl1; select extract(d from foo) from tbl1; select extract(hour from foo) from tbl1; select extract(hours from foo) from tbl1; select extract(h from foo) from tbl1; select extract(hr from foo) from tbl1; select extract(hrs from foo) from tbl1; select extract(minute from foo) from tbl1; select extract(minutes from foo) from tbl1; select extract(m from foo) from tbl1; select extract(min from foo) from tbl1; select extract(mins from foo) from tbl1; select extract(second from foo) from tbl1; select extract(seconds from foo) from tbl1; select dateadd(s, 123, foo) from tbl1; select dateadd(sec, 123, foo) from tbl1; select dateadd(secs, 123, foo) from tbl1; select dateadd(millisecond, 123, foo) from tbl1; select dateadd(milliseconds, 123, foo) from tbl1; select dateadd(ms, 123, foo) from tbl1; select dateadd(msec, 123, foo) from tbl1; select dateadd(msecs, 123, foo) from tbl1; select dateadd(msecond, 123, foo) from tbl1; select dateadd(mseconds, 123, foo) from tbl1; select dateadd(millisec, 123, foo) from tbl1; select dateadd(millisecs, 123, foo) from tbl1; select dateadd(millisecon, 123, foo) from tbl1; select dateadd(microsecond, 123, foo) from tbl1; select dateadd(microseconds, 123, foo) from tbl1; select datediff(microsec, foo, bar) from tbl1; select datediff(microsecs, foo, bar) from tbl1; select datediff(microsecond, foo, bar) from tbl1; select datediff(usecond, foo, bar) from tbl1; select datediff(useconds, foo, bar) from tbl1; select datediff(us, foo, bar) from tbl1; select datediff(usec, foo, bar) from tbl1; select datediff(usecs, foo, bar) from tbl1; select datediff(timezone, foo, bar) from tbl1; select datediff(timezone_hour, foo, bar) from tbl1; select datediff(timezone_minute, foo, bar) from tbl1; sqlfluff-2.3.5/test/fixtures/dialects/redshift/dateparts.yml000066400000000000000000001561271451700765000243260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2e09566434aadc2390a179b1b719e485f2e2d76b71c682c929350f8acffbeeea file: - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: millennium comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: millennia comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: mil comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: mils comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: century comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: centuries comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: c comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: cent comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: cents comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: decade comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: decades comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: dec comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: decs comma: ',' expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: epoch keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: year keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: years keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: y keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: yr keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: yrs keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: quarter keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: quarters keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: qtr keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: qtrs keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: month keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: months keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: mon keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: mons keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: week keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: weeks keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: w keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: dayofweek keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: dow keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: dw keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: weekday keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: dayofyear keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: doy keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: dy keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: yearday keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: day keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: days keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: d keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: hour keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: hours keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: h keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: hr keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: hrs keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: minute keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: minutes keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: m keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: min keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: mins keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: second keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: seconds keyword: from expression: column_reference: naked_identifier: foo end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: s - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: sec - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: secs - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: millisecond - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: milliseconds - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: ms - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: msec - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: msecs - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: msecond - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: mseconds - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: millisec - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: millisecs - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: millisecon - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: microsecond - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: microseconds - comma: ',' - expression: numeric_literal: '123' - comma: ',' - expression: column_reference: naked_identifier: foo - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: microsec - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: microsecs - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: microsecond - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: usecond - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: useconds - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: us - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: usec - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: usecs - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: timezone - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: timezone_hour - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: timezone_minute - comma: ',' - expression: column_reference: naked_identifier: foo - comma: ',' - expression: column_reference: naked_identifier: bar - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/datetime_cast.sql000066400000000000000000000020461451700765000251310ustar00rootroot00000000000000-- redshift_datetime_cast.sql /* Example that casts a column to several DATETIME types */ -- from https://github.com/sqlfluff/sqlfluff/issues/2649 SELECT b::DATETIME FROM a; -- DATE SELECT b::DATE FROM a; -- TIME SELECT b::TIME, c::TIME WITH TIME ZONE, d::TIME WITHOUT TIME ZONE FROM a; -- TIMETZ SELECT b::TIMETZ FROM a; -- TIMESTAMP SELECT b::TIMESTAMP, c::TIMESTAMP WITHOUT TIME ZONE, d::TIMESTAMP WITH TIME ZONE FROM a; -- TIMESTAMPTZ SELECT b::TIMESTAMPTZ FROM a; --- AT TIME ZONE SELECT raw_data.status::VARCHAR AS status, raw_data.start::TIMESTAMPTZ AT TIME ZONE 'UTC' AS started_at, raw_data."end"::TIMESTAMPTZ AT TIME ZONE 'UTC' AS ended_at, raw_data.created::TIMESTAMPTZ AT TIME ZONE 'UTC' AS created_at, raw_data.identifier[0].value::VARCHAR AS communication_request_fhir_reference_origin, extension_extraction.database_reference, GETDATE() AT TIME ZONE 'UTC', (GETDATE() AT TIME ZONE 'UTC') AT TIME ZONE 'AEST', ((GETDATE() AT TIME ZONE 'UTC') AT TIME ZONE 'AEST') FROM raw_data sqlfluff-2.3.5/test/fixtures/dialects/redshift/datetime_cast.yml000066400000000000000000000261551451700765000251420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9c3b8503c04d37b31fd23df096871fa914716b4e5b455d115c2c348ec678aba9 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: DATETIME from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: DATE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: TIME - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: c casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIME - keyword: WITH - keyword: TIME - keyword: ZONE - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: d casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIME - keyword: WITHOUT - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMETZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMP - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: c casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITHOUT - keyword: TIME - keyword: ZONE - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: d casting_operator: '::' data_type: datetime_type_identifier: - keyword: TIMESTAMP - keyword: WITH - keyword: TIME - keyword: ZONE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: b casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: raw_data - dot: . - naked_identifier: status casting_operator: '::' data_type: keyword: VARCHAR alias_expression: keyword: AS naked_identifier: status - comma: ',' - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: raw_data - dot: . - naked_identifier: start casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" alias_expression: keyword: AS naked_identifier: started_at - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: raw_data dot: . quoted_identifier: '"end"' casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" alias_expression: keyword: AS naked_identifier: ended_at - comma: ',' - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: raw_data - dot: . - naked_identifier: created casting_operator: '::' data_type: datetime_type_identifier: keyword: TIMESTAMPTZ time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" alias_expression: keyword: AS naked_identifier: created_at - comma: ',' - select_clause_element: expression: cast_expression: column_reference: - naked_identifier: raw_data - dot: . - naked_identifier: identifier array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: value casting_operator: '::' data_type: keyword: VARCHAR alias_expression: keyword: AS naked_identifier: communication_request_fhir_reference_origin - comma: ',' - select_clause_element: column_reference: - naked_identifier: extension_extraction - dot: . - naked_identifier: database_reference - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'AEST'" - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'UTC'" end_bracket: ) time_zone_grammar: - keyword: AT - keyword: TIME - keyword: ZONE - expression: quoted_literal: "'AEST'" end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_data sqlfluff-2.3.5/test/fixtures/dialects/redshift/declare.sql000066400000000000000000000003661451700765000237250ustar00rootroot00000000000000declare curs1 cursor for select col1, col2 from tbl1; declare lollapalooza cursor for select eventname, starttime, pricepaid/qtysold as costperticket, qtysold from sales, event where sales.eventid = event.eventid and eventname = 'lollapalooza'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/declare.yml000066400000000000000000000060361451700765000237270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 84dc8ff69f7310fee83249b1c94b3d81315bdecb8d12ff98a9e40e1b675e9cc1 file: - statement: declare_statement: - keyword: declare - object_reference: naked_identifier: curs1 - keyword: cursor - keyword: for - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: declare_statement: - keyword: declare - object_reference: naked_identifier: lollapalooza - keyword: cursor - keyword: for - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: eventname - comma: ',' - select_clause_element: column_reference: naked_identifier: starttime - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: pricepaid - binary_operator: / - column_reference: naked_identifier: qtysold alias_expression: keyword: as naked_identifier: costperticket - comma: ',' - select_clause_element: column_reference: naked_identifier: qtysold from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: event where_clause: keyword: where expression: - column_reference: - naked_identifier: sales - dot: . - naked_identifier: eventid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: event - dot: . - naked_identifier: eventid - binary_operator: and - column_reference: naked_identifier: eventname - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'lollapalooza'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/desc_datashare.sql000066400000000000000000000003221451700765000252500ustar00rootroot00000000000000DESC DATASHARE salesshare; DESC DATASHARE salesshare of ACCOUNT '123456789012' NAMESPACE '13b8833d-17c6-4f16-8fe4-1a018f5ed00d'; DESC DATASHARE salesshare of NAMESPACE '13b8833d-17c6-4f16-8fe4-1a018f5ed00d'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/desc_datashare.yml000066400000000000000000000022511451700765000252550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4691d7c04dfbf142b4bb020f5eae0d0ef92bda7559061c3005d1b80c0a5a8a1b file: - statement: desc_datashare_statement: - keyword: DESC - keyword: DATASHARE - object_reference: naked_identifier: salesshare - statement_terminator: ; - statement: desc_datashare_statement: - keyword: DESC - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: of - keyword: ACCOUNT - quoted_literal: "'123456789012'" - keyword: NAMESPACE - quoted_literal: "'13b8833d-17c6-4f16-8fe4-1a018f5ed00d'" - statement_terminator: ; - statement: desc_datashare_statement: - keyword: DESC - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: of - keyword: NAMESPACE - quoted_literal: "'13b8833d-17c6-4f16-8fe4-1a018f5ed00d'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/detach_rls_policy.sql000066400000000000000000000002741451700765000260130ustar00rootroot00000000000000DETACH RLS POLICY policy_concerts ON tickit_category_redshift FROM ROLE analyst, ROLE dbadmin; DETACH RLS POLICY policy_concerts ON TABLE tickit_category_redshift FROM ROLE role1, user1; sqlfluff-2.3.5/test/fixtures/dialects/redshift/detach_rls_policy.yml000066400000000000000000000025171451700765000260170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2acff9b85e6066bdd75036b61be3b4bbf875e9a6871849e701e489e9eeffcdf6 file: - statement: manage_rls_policy_statement: - keyword: DETACH - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - keyword: 'ON' - table_reference: naked_identifier: tickit_category_redshift - keyword: FROM - keyword: ROLE - role_reference: naked_identifier: analyst - comma: ',' - keyword: ROLE - role_reference: naked_identifier: dbadmin - statement_terminator: ; - statement: manage_rls_policy_statement: - keyword: DETACH - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - keyword: 'ON' - keyword: TABLE - table_reference: naked_identifier: tickit_category_redshift - keyword: FROM - keyword: ROLE - role_reference: naked_identifier: role1 - comma: ',' - role_reference: naked_identifier: user1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/drop_datashare.sql000066400000000000000000000000371451700765000253010ustar00rootroot00000000000000DROP DATASHARE datashare_name; sqlfluff-2.3.5/test/fixtures/dialects/redshift/drop_datashare.yml000066400000000000000000000010751451700765000253060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3b8ae2d1d669764b83baf7064840fefa0fdbb1d7be0cdfdbc7af34ab51d3eae9 file: statement: drop_datashare_statement: - keyword: DROP - keyword: DATASHARE - object_reference: naked_identifier: datashare_name statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/drop_procedure.sql000066400000000000000000000002521451700765000253340ustar00rootroot00000000000000DROP PROCEDURE quarterly_revenue(volume INOUT bigint, at_price IN numeric,result OUT int); DROP PROCEDURE quarterly_revenue(volume bigint, at_price numeric,result int); sqlfluff-2.3.5/test/fixtures/dialects/redshift/drop_procedure.yml000066400000000000000000000030661451700765000253440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ab6533c8e6ad875d468410a3ec892f0d3f49adbabad9a52194178d5898d77e7 file: - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: function_name_identifier: quarterly_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: volume - keyword: INOUT - data_type: keyword: bigint - comma: ',' - parameter: at_price - keyword: IN - data_type: keyword: numeric - comma: ',' - parameter: result - keyword: OUT - data_type: keyword: int - end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: function_name_identifier: quarterly_revenue - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: volume - data_type: keyword: bigint - comma: ',' - parameter: at_price - data_type: keyword: numeric - comma: ',' - parameter: result - data_type: keyword: int - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/drop_rls_policy.sql000066400000000000000000000001251451700765000255220ustar00rootroot00000000000000DROP RLS POLICY policy_concerts; DROP RLS POLICY IF EXISTS policy_concerts CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/redshift/drop_rls_policy.yml000066400000000000000000000015231451700765000255270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c159af16f7c7f42ae004cae6da4392cf60c21acd1ed7857aad4c199c47866af2 file: - statement: drop_rls_policy_statement: - keyword: DROP - keyword: RLS - keyword: POLICY - object_reference: naked_identifier: policy_concerts - statement_terminator: ; - statement: drop_rls_policy_statement: - keyword: DROP - keyword: RLS - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: policy_concerts - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/fetch.sql000066400000000000000000000001541451700765000234120ustar00rootroot00000000000000fetch next from curs1; fetch all from curs1; fetch forward 100 from curs1; fetch forward all from curs1; sqlfluff-2.3.5/test/fixtures/dialects/redshift/fetch.yml000066400000000000000000000022031451700765000234110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cc2d7173c01b14c8678d87572594149d6be691a41a16eb9cb3246d9cd5999561 file: - statement: fetch_statement: - keyword: fetch - keyword: next - keyword: from - object_reference: naked_identifier: curs1 - statement_terminator: ; - statement: fetch_statement: - keyword: fetch - keyword: all - keyword: from - object_reference: naked_identifier: curs1 - statement_terminator: ; - statement: fetch_statement: - keyword: fetch - keyword: forward - numeric_literal: '100' - keyword: from - object_reference: naked_identifier: curs1 - statement_terminator: ; - statement: fetch_statement: - keyword: fetch - keyword: forward - keyword: all - keyword: from - object_reference: naked_identifier: curs1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/grant_datashare_usage.sql000066400000000000000000000003521451700765000266340ustar00rootroot00000000000000GRANT USAGE ON DATASHARE salesshare TO ACCOUNT '123456789012'; GRANT USAGE ON DATASHARE salesshare TO ACCOUNT '123456789012' VIA DATA CATALOG; GRANT USAGE ON DATASHARE salesshare TO NAMESPACE '13b8833d-17c6-4f16-8fe4-1a018f5ed00d'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/grant_datashare_usage.yml000066400000000000000000000025401451700765000266370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eea43c984fa81253410c845607dcb4bd79ec3613757b73dccd4b1933ac0d1542 file: - statement: grant_datashare_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: TO - keyword: ACCOUNT - quoted_literal: "'123456789012'" - statement_terminator: ; - statement: grant_datashare_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: TO - keyword: ACCOUNT - quoted_literal: "'123456789012'" - keyword: VIA - keyword: DATA - keyword: CATALOG - statement_terminator: ; - statement: grant_datashare_statement: - keyword: GRANT - keyword: USAGE - keyword: 'ON' - keyword: DATASHARE - object_reference: naked_identifier: salesshare - keyword: TO - keyword: NAMESPACE - quoted_literal: "'13b8833d-17c6-4f16-8fe4-1a018f5ed00d'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/insert_into.sql000066400000000000000000000006401451700765000246560ustar00rootroot00000000000000INSERT INTO s1.t1 ( SELECT col1, col2, col3 FROM testtable ); INSERT INTO s1.t1 (col1, col2) ( select col1, col2, col3 from testtable ); INSERT INTO schema1.t1 SELECT col1, col2, col3 FROM testtable ; INSERT INTO schema1.t1 DEFAULT VALUES ; INSERT INTO s1.t1 (col1, col2) VALUES ('V1', 1), ('V2', 2) ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/insert_into.yml000066400000000000000000000105271451700765000246650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ee1a3b3030c6f05b25175b6ecdd50004fa36fd3e688e54e5f54c24c05e2c2969 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t1 - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: t1 - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: s1 - dot: . - naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'V1'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'V2'" - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/lock_table.sql000066400000000000000000000001211451700765000244120ustar00rootroot00000000000000lock event, sales; LOCK TABLE schema_name.table_name1, schema_name.table_name2; sqlfluff-2.3.5/test/fixtures/dialects/redshift/lock_table.yml000066400000000000000000000016751451700765000244330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d695887ab9247049a3b4f0e904609c3e52b445016e429664b97e11c1758cbae7 file: - statement: lock_table_statement: - keyword: lock - table_reference: naked_identifier: event - comma: ',' - table_reference: naked_identifier: sales - statement_terminator: ; - statement: lock_table_statement: - keyword: LOCK - keyword: TABLE - table_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name1 - comma: ',' - table_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/object_unpivot.sql000066400000000000000000000011641451700765000253550ustar00rootroot00000000000000WITH example_data AS ( SELECT 10 AS shop_id , json_parse('{"apple_count": 2, "orange_count": 6}') AS inventory UNION ALL SELECT 20 AS shop_id , json_parse('{"pear_count": 10, "other_data": 42}') AS inventory UNION ALL SELECT 30 AS shop_id , json_parse('{"apple_count": 3, "lemon_count": 5}') AS inventory ) SELECT shop_id , key , value FROM example_data ed, UNPIVOT ed.inventory AS value AT key; SELECT attr as attribute_name, val as object_value FROM customer_orders_lineitem c, c.c_orders AS o, UNPIVOT o AS val AT attr WHERE c_custkey = 9451; sqlfluff-2.3.5/test/fixtures/dialects/redshift/object_unpivot.yml000066400000000000000000000140751451700765000253640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 13b725638b2155b43d565a2538a62acd701b5466c0836b1c1f2664519f050262 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: example_data keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '10' alias_expression: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse bracketed: start_bracket: ( expression: quoted_literal: "'{\"apple_count\": 2, \"orange_count\": 6}'" end_bracket: ) alias_expression: keyword: AS naked_identifier: inventory - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '20' alias_expression: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse bracketed: start_bracket: ( expression: quoted_literal: "'{\"pear_count\": 10, \"other_data\": 42}'" end_bracket: ) alias_expression: keyword: AS naked_identifier: inventory - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '30' alias_expression: keyword: AS naked_identifier: shop_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: json_parse bracketed: start_bracket: ( expression: quoted_literal: "'{\"apple_count\": 3, \"lemon_count\": 5}'" end_bracket: ) alias_expression: keyword: AS naked_identifier: inventory end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: shop_id - comma: ',' - select_clause_element: column_reference: naked_identifier: key - comma: ',' - select_clause_element: column_reference: naked_identifier: value from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: example_data alias_expression: naked_identifier: ed - comma: ',' - from_expression: from_expression_element: table_expression: object_unpivoting: - keyword: UNPIVOT - object_reference: - naked_identifier: ed - dot: . - naked_identifier: inventory - keyword: AS - naked_identifier: value - keyword: AT - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: attr alias_expression: keyword: as naked_identifier: attribute_name - comma: ',' - select_clause_element: column_reference: naked_identifier: val alias_expression: keyword: as naked_identifier: object_value from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders alias_expression: keyword: AS naked_identifier: o - comma: ',' - from_expression: from_expression_element: table_expression: object_unpivoting: - keyword: UNPIVOT - object_reference: naked_identifier: o - keyword: AS - naked_identifier: val - keyword: AT - naked_identifier: attr where_clause: keyword: WHERE expression: column_reference: naked_identifier: c_custkey comparison_operator: raw_comparison_operator: '=' numeric_literal: '9451' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/pattern_match_expressions.sql000066400000000000000000000030501451700765000276120ustar00rootroot00000000000000-- redshift_pattern_match_expressions.sql /* examples of pattern match expressions ( https://docs.aws.amazon.com/redshift/latest/dg/pattern-matching-conditions.html ) that are supported in redshift. */ -- LIKE/ILIKE expressions supported SELECT * FROM animals WHERE family LIKE '%ursidae%'; SELECT * FROM animals WHERE family NOT LIKE '%ursidae%'; SELECT * FROM animals WHERE genus ILIKE '%ursus%'; SELECT * FROM animals WHERE genus NOT ILIKE '%ursus%'; SELECT * FROM animals WHERE family LIKE '%ursidae%' ESCAPE '\\'; SELECT * FROM animals WHERE genus NOT ILIKE '%ursus%' ESCAPE '\\'; SELECT COALESCE(family LIKE '%ursidae%' ESCAPE '\\', FALSE) AS is_bear FROM animals; -- SIMILAR TO expressions supported SELECT * FROM animals WHERE family SIMILAR TO '%ursidae%'; SELECT * FROM animals WHERE family NOT SIMILAR TO '%ursidae%'; SELECT * FROM animals WHERE genus SIMILAR TO '%ursus%'; SELECT * FROM animals WHERE genus NOT SIMILAR TO '%ursus%'; SELECT * FROM animals WHERE family SIMILAR TO '%ursidae%' ESCAPE '\\'; SELECT * FROM animals WHERE genus NOT SIMILAR TO '%ursus%' ESCAPE '\\'; SELECT COALESCE(family SIMILAR TO '%ursidae%' ESCAPE '\\', FALSE) AS is_bear FROM animals; -- From https://github.com/sqlfluff/sqlfluff/issues/2722 WITH cleaned_bear_financial_branch AS ( SELECT branch_id, TO_NUMBER(CASE WHEN honey_numerical_code SIMILAR TO '[0-9]{0,7}.?[0-9]{0,2}' THEN honey_numerical_code ELSE NULL END, '24601') AS honey_numerical_code FROM bear_financial_branch ) SELECT branch_id FROM cleaned_bear_financial_branch LIMIT 10; sqlfluff-2.3.5/test/fixtures/dialects/redshift/pattern_match_expressions.yml000066400000000000000000000311761451700765000276260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aa70ac109a77bef6e01f8fba80b5c7474d2fd192720d43719d24f668ff025471 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: column_reference: naked_identifier: family keyword: LIKE quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: NOT - keyword: LIKE - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: column_reference: naked_identifier: genus keyword: ILIKE quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: ILIKE - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: LIKE - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: ILIKE - quoted_literal: "'%ursus%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: family - keyword: LIKE - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - comma: ',' - expression: boolean_literal: 'FALSE' - end_bracket: ) alias_expression: keyword: AS naked_identifier: is_bear from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals where_clause: keyword: WHERE expression: - column_reference: naked_identifier: genus - keyword: NOT - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursus%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COALESCE bracketed: - start_bracket: ( - expression: - column_reference: naked_identifier: family - keyword: SIMILAR - keyword: TO - quoted_literal: "'%ursidae%'" - keyword: ESCAPE - quoted_literal: "'\\\\'" - comma: ',' - expression: boolean_literal: 'FALSE' - end_bracket: ) alias_expression: keyword: AS naked_identifier: is_bear from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: animals - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cleaned_bear_financial_branch keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TO_NUMBER bracketed: - start_bracket: ( - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: honey_numerical_code - keyword: SIMILAR - keyword: TO - quoted_literal: "'[0-9]{0,7}.?[0-9]{0,2}'" - keyword: THEN - expression: column_reference: naked_identifier: honey_numerical_code - else_clause: keyword: ELSE expression: null_literal: 'NULL' - keyword: END - comma: ',' - expression: quoted_literal: "'24601'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: honey_numerical_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_financial_branch end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: branch_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cleaned_bear_financial_branch limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/percentile_cont.sql000066400000000000000000000005131451700765000254750ustar00rootroot00000000000000select dataset_id, (percentile_cont(0.20) within group ( order by tract_percent_below_poverty asc ) over(partition by dataset_id)) as percentile_20, percentile_cont(0.40) within group (order by tract_percent_below_poverty asc) over(partition by dataset_id) as percentile_40 from dataset_with_census sqlfluff-2.3.5/test/fixtures/dialects/redshift/percentile_cont.yml000066400000000000000000000067411451700765000255100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02ff595f7414aaa7b2d4ba4d6602d3c51c72bee1801a8b0911aa961c95ef1f8b file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: dataset_id - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: percentile_cont bracketed: start_bracket: ( expression: numeric_literal: '0.20' end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: tract_percent_below_poverty - keyword: asc end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: dataset_id end_bracket: ) end_bracket: ) alias_expression: keyword: as naked_identifier: percentile_20 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: percentile_cont bracketed: start_bracket: ( expression: numeric_literal: '0.40' end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: tract_percent_below_poverty - keyword: asc end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: dataset_id end_bracket: ) alias_expression: keyword: as naked_identifier: percentile_40 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dataset_with_census sqlfluff-2.3.5/test/fixtures/dialects/redshift/pivot.sql000066400000000000000000000024631451700765000234670ustar00rootroot00000000000000-- redshift_pivot.sql /* Examples of SELECT statements that include PIVOT expressions. */ -- Below examples come from -- https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html SELECT * FROM (SELECT partname, price FROM part) PIVOT ( AVG(price) FOR partname IN ('P1', 'P2', 'P3') ); SELECT * FROM (SELECT quality, manufacturer FROM part) PIVOT ( COUNT(*) FOR quality IN (1, 2, NULL) ); SELECT * FROM (SELECT quality, manufacturer FROM part) PIVOT ( COUNT(*) AS count FOR quality IN (1 AS high, 2 AS low, NULL AS na) ); -- End of AWS-provided examples -- Can do PIVOTs for CTEs WITH bear_diet AS ( SELECT bear_id, bear_species, food_eaten FROM bear_facts ) SELECT * FROM bear_diet PIVOT ( COUNT(*) AS num_ate_food FOR bear_species IN ( 'polar bear', 'brown bear', 'american black bear', 'asian black bear', 'giant panda', 'spectacled bear', 'sloth bear', 'sun bear' ) ); -- Can do Pivots for tables SELECT * FROM orders PIVOT (COUNT(*) FOR color IN ('red', 'blue')); -- Can also alias the pivoted table SELECT * FROM (SELECT quality, manufacturer FROM part) PIVOT ( COUNT(*) FOR quality IN (1, 2, NULL) ) AS quality_matrix; sqlfluff-2.3.5/test/fixtures/dialects/redshift/pivot.yml000066400000000000000000000314451451700765000234730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ff85c92563dc231672cecea03274f814226ae925881e448676b60e73d0ffc384 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: partname - comma: ',' - select_clause_element: column_reference: naked_identifier: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: part end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - keyword: FOR - column_reference: naked_identifier: partname - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'P1'" - comma: ',' - expression: quoted_literal: "'P2'" - comma: ',' - expression: quoted_literal: "'P3'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: quality - comma: ',' - select_clause_element: column_reference: naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: part end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) - keyword: FOR - column_reference: naked_identifier: quality - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: quality - comma: ',' - select_clause_element: column_reference: naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: part end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: keyword: AS naked_identifier: count - keyword: FOR - column_reference: naked_identifier: quality - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - alias_expression: keyword: AS naked_identifier: high - comma: ',' - expression: numeric_literal: '2' - alias_expression: keyword: AS naked_identifier: low - comma: ',' - expression: null_literal: 'NULL' - alias_expression: keyword: AS naked_identifier: na - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: bear_diet keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: bear_id - comma: ',' - select_clause_element: column_reference: naked_identifier: bear_species - comma: ',' - select_clause_element: column_reference: naked_identifier: food_eaten from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_facts end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bear_diet from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: keyword: AS naked_identifier: num_ate_food - keyword: FOR - column_reference: naked_identifier: bear_species - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'polar bear'" - comma: ',' - expression: quoted_literal: "'brown bear'" - comma: ',' - expression: quoted_literal: "'american black bear'" - comma: ',' - expression: quoted_literal: "'asian black bear'" - comma: ',' - expression: quoted_literal: "'giant panda'" - comma: ',' - expression: quoted_literal: "'spectacled bear'" - comma: ',' - expression: quoted_literal: "'sloth bear'" - comma: ',' - expression: quoted_literal: "'sun bear'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'red'" - comma: ',' - expression: quoted_literal: "'blue'" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: quality - comma: ',' - select_clause_element: column_reference: naked_identifier: manufacturer from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: part end_bracket: ) from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) - keyword: FOR - column_reference: naked_identifier: quality - keyword: IN - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: null_literal: 'NULL' - end_bracket: ) - end_bracket: ) alias_expression: keyword: AS naked_identifier: quality_matrix - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/qualify.sql000066400000000000000000000011211451700765000237660ustar00rootroot00000000000000SELECT * FROM store_sales ss WHERE ss_sold_time > time '12:00:00' QUALIFY row_number() OVER (PARTITION BY ss_sold_date ORDER BY ss_sales_price DESC) <= 2 ; SELECT * FROM store_sales ss QUALIFY last_value(ss_item) OVER (PARTITION BY ss_sold_date ORDER BY ss_sold_time ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) = ss_item ; SELECT * FROM ( SELECT *, last_value(ss_item) OVER (PARTITION BY ss_sold_date ORDER BY ss_sold_time ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) ss_last_item FROM store_sales ss ) WHERE ss_last_item = ss_item ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/qualify.yml000066400000000000000000000163031451700765000240000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f8ec90aabd90231d62011306ce630348533b303dcde77e4ebcd094b905215b8 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store_sales alias_expression: naked_identifier: ss where_clause: keyword: WHERE expression: column_reference: naked_identifier: ss_sold_time comparison_operator: raw_comparison_operator: '>' datetime_literal: datetime_type_identifier: keyword: time quoted_literal: "'12:00:00'" qualify_clause: keyword: QUALIFY expression: function: function_name: function_name_identifier: row_number bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: ss_sold_date orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ss_sales_price - keyword: DESC end_bracket: ) comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store_sales alias_expression: naked_identifier: ss qualify_clause: keyword: QUALIFY expression: function: function_name: function_name_identifier: last_value bracketed: start_bracket: ( expression: column_reference: naked_identifier: ss_item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: ss_sold_date orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ss_sold_time - keyword: ASC frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) comparison_operator: raw_comparison_operator: '=' column_reference: naked_identifier: ss_item - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: last_value bracketed: start_bracket: ( expression: column_reference: naked_identifier: ss_item end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: ss_sold_date orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ss_sold_time - keyword: ASC frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) alias_expression: naked_identifier: ss_last_item from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store_sales alias_expression: naked_identifier: ss end_bracket: ) where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ss_last_item - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ss_item - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_dateadd.sql000066400000000000000000000000471451700765000252470ustar00rootroot00000000000000select dateadd(month,18,'2008-02-28'); sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_dateadd.yml000066400000000000000000000016371451700765000252570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 805fdc4283238f2e0ab9c733bb9be46cd988c249f80c0e0cf68ca3d67df8e98a file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: numeric_literal: '18' - comma: ',' - expression: quoted_literal: "'2008-02-28'" - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_datetime_functions.sql000066400000000000000000000005271451700765000275500ustar00rootroot00000000000000SELECT current_date; SELECT sysdate; SELECT current_timestamp; SELECT TRUNC(sysdate); -- As taken from: https://docs.aws.amazon.com/redshift/latest/dg/r_SYSDATE.html SELECT salesid, pricepaid, TRUNC(saletime) AS saletime, TRUNC(sysdate) AS now FROM sales WHERE saletime BETWEEN TRUNC(sysdate)-120 AND TRUNC(sysdate) ORDER BY saletime ASC; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_datetime_functions.yml000066400000000000000000000066461451700765000275620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0edddefa88485c2eef616da694d0bdf265eac65a122823622dc8b26f3a003e96 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bare_function: current_date - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bare_function: sysdate - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bare_function: current_timestamp - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: TRUNC bracketed: start_bracket: ( expression: bare_function: sysdate end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: salesid - comma: ',' - select_clause_element: column_reference: naked_identifier: pricepaid - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRUNC bracketed: start_bracket: ( expression: column_reference: naked_identifier: saletime end_bracket: ) alias_expression: keyword: AS naked_identifier: saletime - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRUNC bracketed: start_bracket: ( expression: bare_function: sysdate end_bracket: ) alias_expression: keyword: AS naked_identifier: now from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales where_clause: keyword: WHERE expression: - column_reference: naked_identifier: saletime - keyword: BETWEEN - function: function_name: function_name_identifier: TRUNC bracketed: start_bracket: ( expression: bare_function: sysdate end_bracket: ) - binary_operator: '-' - numeric_literal: '120' - keyword: AND - function: function_name: function_name_identifier: TRUNC bracketed: start_bracket: ( expression: bare_function: sysdate end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: saletime - keyword: ASC - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_first_value.sql000066400000000000000000000001121451700765000261750ustar00rootroot00000000000000select first_value(finalsaleprice ignore nulls) over () as c1 from table1 sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_first_value.yml000066400000000000000000000024071451700765000262100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5559c10330f7e208f3eab0356e5d79162d8d5c1a6b434c7de5fe68b101b77e37 file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: first_value bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: finalsaleprice - keyword: ignore - keyword: nulls - end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( end_bracket: ) alias_expression: keyword: as naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_from_with_parenthesis.sql000066400000000000000000000002501451700765000302600ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/3955 SELECT table_1.id FROM (table_1); SELECT table_1.id FROM (table_1 INNER JOIN table_2 ON table_2.id = table_1.id); sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_from_with_parenthesis.yml000066400000000000000000000042151451700765000302670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 09e3fcbf2cb0a63aeb30e1f39ff029bf24683b7644c11a9405b6ecc361315069 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: id from_clause: keyword: FROM bracketed: start_bracket: ( from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: id from_clause: keyword: FROM bracketed: start_bracket: ( from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table_2 - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: id end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_into.sql000066400000000000000000000003161451700765000246310ustar00rootroot00000000000000select * into newevent from event; select username, lastname, sum(pricepaid-commission) as profit into temp table profits from sales, users where sales.sellerid=users.userid group by 1, 2 order by 3 desc; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_into.yml000066400000000000000000000056021451700765000246360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 458e0eb72d649723778066a6cebd98c64ba6230b9853f9119f7603f2e5a79776 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' into_clause: keyword: into table_reference: naked_identifier: newevent from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: event - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: username - comma: ',' - select_clause_element: column_reference: naked_identifier: lastname - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: - column_reference: naked_identifier: pricepaid - binary_operator: '-' - column_reference: naked_identifier: commission end_bracket: ) alias_expression: keyword: as naked_identifier: profit into_clause: - keyword: into - keyword: temp - keyword: table - table_reference: naked_identifier: profits from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: users where_clause: keyword: where expression: - column_reference: - naked_identifier: sales - dot: . - naked_identifier: sellerid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: users - dot: . - naked_identifier: userid groupby_clause: - keyword: group - keyword: by - numeric_literal: '1' - comma: ',' - numeric_literal: '2' orderby_clause: - keyword: order - keyword: by - numeric_literal: '3' - keyword: desc - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_keywords.sql000066400000000000000000000011041451700765000255230ustar00rootroot00000000000000SELECT pg_namespace.nspname AS constraint_schema, pg_constraint.conname AS constraint_name FROM pg_namespace, pg_constraint WHERE pg_namespace.oid = pg_constraint.connamespace; -- As taken from: https://docs.aws.amazon.com/redshift/latest/dg/c_join_PG_examples.html create view tables_vw as select distinct(id) table_id ,trim(datname) db_name ,trim(nspname) schema_name ,trim(relname) table_name from stv_tbl_perm join pg_class on pg_class.oid = stv_tbl_perm.id join pg_namespace on pg_namespace.oid = relnamespace join pg_database on pg_database.oid = stv_tbl_perm.db_id; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_keywords.yml000066400000000000000000000135721451700765000255410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 66bd867c845d06047b20ecb4c54371d7ec41ffaa8a00d82a85dd0d4759c88c10 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: pg_namespace - dot: . - naked_identifier: nspname alias_expression: keyword: AS naked_identifier: constraint_schema - comma: ',' - select_clause_element: column_reference: - naked_identifier: pg_constraint - dot: . - naked_identifier: conname alias_expression: keyword: AS naked_identifier: constraint_name from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pg_namespace - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pg_constraint where_clause: keyword: WHERE expression: - column_reference: naked_identifier: pg_namespace dot: . naked_identifier_all: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: pg_constraint - dot: . - naked_identifier: connamespace - statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: tables_vw - keyword: as - select_statement: select_clause: - keyword: select - select_clause_modifier: keyword: distinct - select_clause_element: expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: naked_identifier: table_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: trim bracketed: start_bracket: ( expression: column_reference: naked_identifier: datname end_bracket: ) alias_expression: naked_identifier: db_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: trim bracketed: start_bracket: ( expression: column_reference: naked_identifier: nspname end_bracket: ) alias_expression: naked_identifier: schema_name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: trim bracketed: start_bracket: ( expression: column_reference: naked_identifier: relname end_bracket: ) alias_expression: naked_identifier: table_name from_clause: keyword: from from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: stv_tbl_perm - join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: pg_class join_on_condition: keyword: 'on' expression: - column_reference: naked_identifier: pg_class dot: . naked_identifier_all: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: stv_tbl_perm - dot: . - naked_identifier: id - join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: pg_namespace join_on_condition: keyword: 'on' expression: - column_reference: naked_identifier: pg_namespace dot: . naked_identifier_all: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: relnamespace - join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: pg_database join_on_condition: keyword: 'on' expression: - column_reference: naked_identifier: pg_database dot: . naked_identifier_all: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: stv_tbl_perm - dot: . - naked_identifier: db_id - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_top.sql000066400000000000000000000001331451700765000244570ustar00rootroot00000000000000SELECT TOP 10 example_value_col FROM example_schema.some_table ORDER BY example_value_col; sqlfluff-2.3.5/test/fixtures/dialects/redshift/select_top.yml000066400000000000000000000021261451700765000244650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 14acecbedbb6768c8a974060d9e7cefa43561e1c3d1b19a080e8ee8256aff133 file: statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP numeric_literal: '10' select_clause_element: column_reference: naked_identifier: example_value_col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: example_schema - dot: . - naked_identifier: some_table orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: example_value_col statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/show_datashares.sql000066400000000000000000000000611451700765000254750ustar00rootroot00000000000000SHOW DATASHARES; SHOW DATASHARES LIKE 'sales%'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/show_datashares.yml000066400000000000000000000012471451700765000255060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cdcc140569a5ab1628ad23eceea9565bba47b5fc6b2a8de9784a4f47b4988aa0 file: - statement: show_datashares_statement: - keyword: SHOW - keyword: DATASHARES - statement_terminator: ; - statement: show_datashares_statement: - keyword: SHOW - keyword: DATASHARES - keyword: LIKE - quoted_literal: "'sales%'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/show_model.sql000066400000000000000000000000411451700765000244540ustar00rootroot00000000000000SHOW MODEL ALL; SHOW MODEL mdl; sqlfluff-2.3.5/test/fixtures/dialects/redshift/show_model.yml000066400000000000000000000012471451700765000244670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a18435e378fb780efb8c6b0210f84365b31ee39918b8668bbca95d6e74cfd36b file: - statement: show_model_statement: - keyword: SHOW - keyword: MODEL - keyword: ALL - statement_terminator: ; - statement: show_model_statement: - keyword: SHOW - keyword: MODEL - object_reference: naked_identifier: mdl - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/single_quote.sql000066400000000000000000000002671451700765000250240ustar00rootroot00000000000000SELECT ''; SELECT ''''; SELECT ' '; SELECT '''aaa'''; SELECT ' '' '; SELECT 'foo' 'bar'; SELECT 'foo' 'bar'; SELECT 'foo' 'bar'; SELECT 'foo' -- some comment 'bar'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/single_quote.yml000066400000000000000000000040311451700765000250170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 737a716df4c07a67449fbe8f37caac2b4a08b8088c06f6861699dd1b7937ca1e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "''''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'''aaa'''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'\n''\n'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: - quoted_literal: "'foo'" - quoted_literal: "'bar'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/super_data_type.sql000066400000000000000000000030201451700765000255040ustar00rootroot00000000000000-- redshift_super_data_type.sql /* queries that implicitly and explicitly use the Redshift SUPER data type (https://docs.aws.amazon.com/redshift/latest/dg/super-overview.html). */ -- Example from https://github.com/sqlfluff/sqlfluff/issues/1672 SELECT c[0].col, o FROM customer_orders c, c.c_orders o; -- Can use SUPER data types in WHERE clauses SELECT COUNT(*) FROM customer_orders_lineitem WHERE c_orders[0].o_orderkey IS NOT NULL; SELECT c_custkey FROM customer_orders_lineitem WHERE CASE WHEN JSON_TYPEOF(c_orders[0].o_orderstatus) = 'string' THEN c_orders[0].o_orderstatus::VARCHAR <= 'P' ELSE NULL END; -- Can do multiple array accessors with SUPER data types SELECT c[0][1][2][3][4].col, o FROM customer_orders c, c.c_orders o; -- Can use wildcards SELECT c.*, o FROM customer_orders_lineitem c, c.c_orders o; -- Can access a single SUPER data type multiple times in a SELECT statement -- source: https://awscloudfeed.com/whats-new/big-data/work-with-semistructured-data-using-amazon-redshift-super SELECT messages[0].format, messages[0].topic FROM subscription_auto WHERE messages[0].payload.payload."assetId" > 0; -- Can perform functions and operations on SUPER data types. -- Adapted from: https://awscloudfeed.com/whats-new/big-data/work-with-semistructured-data-using-amazon-redshift-super SELECT messages[0].format, COUNT(messages[0].topic) FROM subscription_auto WHERE messages[0].payload.payload."assetId" > 'abc' GROUP BY messages[0].format; sqlfluff-2.3.5/test/fixtures/dialects/redshift/super_data_type.yml000066400000000000000000000260501451700765000255160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b1ad1348e7f28ff11211e11d120f56e880fc3d3b822ef6a6009b6ed0ecefb301 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: c array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: col - comma: ',' - select_clause_element: column_reference: naked_identifier: o from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders alias_expression: naked_identifier: o - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem where_clause: keyword: WHERE expression: - column_reference: naked_identifier: c_orders - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - semi_structured_expression: dot: . naked_identifier: o_orderkey - keyword: IS - keyword: NOT - null_literal: 'NULL' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c_custkey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem where_clause: keyword: WHERE expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: function: function_name: function_name_identifier: JSON_TYPEOF bracketed: start_bracket: ( expression: column_reference: naked_identifier: c_orders array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: o_orderstatus end_bracket: ) comparison_operator: raw_comparison_operator: '=' quoted_literal: "'string'" - keyword: THEN - expression: cast_expression: column_reference: naked_identifier: c_orders array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: o_orderstatus casting_operator: '::' data_type: keyword: VARCHAR comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' quoted_literal: "'P'" - else_clause: keyword: ELSE expression: null_literal: 'NULL' - keyword: END - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: c array_accessor: - start_square_bracket: '[' - numeric_literal: '0' - end_square_bracket: ']' - start_square_bracket: '[' - numeric_literal: '1' - end_square_bracket: ']' - start_square_bracket: '[' - numeric_literal: '2' - end_square_bracket: ']' - start_square_bracket: '[' - numeric_literal: '3' - end_square_bracket: ']' - start_square_bracket: '[' - numeric_literal: '4' - end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: col - comma: ',' - select_clause_element: column_reference: naked_identifier: o from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders alias_expression: naked_identifier: o - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: c dot: . star: '*' - comma: ',' - select_clause_element: column_reference: naked_identifier: o from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: customer_orders_lineitem alias_expression: naked_identifier: c - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: c - dot: . - naked_identifier: c_orders alias_expression: naked_identifier: o - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: format - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: topic from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: subscription_auto where_clause: keyword: WHERE expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: - dot: . - naked_identifier: payload - dot: . - naked_identifier: payload - dot: . - quoted_identifier: '"assetId"' comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: format - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: topic end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: subscription_auto where_clause: keyword: WHERE expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: - dot: . - naked_identifier: payload - dot: . - naked_identifier: payload - dot: . - quoted_identifier: '"assetId"' comparison_operator: raw_comparison_operator: '>' quoted_literal: "'abc'" groupby_clause: - keyword: GROUP - keyword: BY - expression: column_reference: naked_identifier: messages array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . naked_identifier: format - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/temporary_tables.sql000066400000000000000000000004051451700765000256740ustar00rootroot00000000000000CREATE TEMPORARY TABLE #temp_table AS SELECT name FROM other_table; CREATE TABLE #other_temp_table (id int); COPY #temp_table FROM 's3://mybucket/path' CREDENTIALS 'aws_access_key_id=SECRET;aws_secret_access_key=ALSO_SECRET' GZIP; SELECT * FROM #temp_table; sqlfluff-2.3.5/test/fixtures/dialects/redshift/temporary_tables.yml000066400000000000000000000041571451700765000257060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7d46b72b36acc34ecad4331b97fa771bc9b96540906dcbfb861b3ad8c79f582 file: - statement: create_table_as_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: TABLE - object_reference: naked_identifier: '#temp_table' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: '#other_temp_table' - bracketed: start_bracket: ( column_reference: naked_identifier: id data_type: keyword: int end_bracket: ) - statement_terminator: ; - statement: copy_statement: - keyword: COPY - table_reference: naked_identifier: '#temp_table' - keyword: FROM - quoted_literal: "'s3://mybucket/path'" - authorization_segment: keyword: CREDENTIALS quoted_literal: "'aws_access_key_id=SECRET;aws_secret_access_key=ALSO_SECRET'" - keyword: GZIP - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: '#temp_table' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/transactions.sql000066400000000000000000000004131451700765000250270ustar00rootroot00000000000000begin; start transaction; begin work; begin transaction isolation level serializable; begin transaction isolation level serializable read only; start transaction read write; commit; end work; commit transaction; rollback; abort work; rollback transaction; sqlfluff-2.3.5/test/fixtures/dialects/redshift/transactions.yml000066400000000000000000000035471451700765000250440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9aac8a199f0aa4375d5da5c433f6ec048d0294798c1a9ab7de0894885939dd1e file: - statement: transaction_statement: keyword: begin - statement_terminator: ; - statement: transaction_statement: - keyword: start - keyword: transaction - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: work - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: transaction - keyword: isolation - keyword: level - keyword: serializable - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: transaction - keyword: isolation - keyword: level - keyword: serializable - keyword: read - keyword: only - statement_terminator: ; - statement: transaction_statement: - keyword: start - keyword: transaction - keyword: read - keyword: write - statement_terminator: ; - statement: transaction_statement: keyword: commit - statement_terminator: ; - statement: transaction_statement: - keyword: end - keyword: work - statement_terminator: ; - statement: transaction_statement: - keyword: commit - keyword: transaction - statement_terminator: ; - statement: transaction_statement: keyword: rollback - statement_terminator: ; - statement: transaction_statement: - keyword: abort - keyword: work - statement_terminator: ; - statement: transaction_statement: - keyword: rollback - keyword: transaction - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/unload.sql000066400000000000000000000052071451700765000236070ustar00rootroot00000000000000unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole'; unload ('select * from lineitem') to 's3://mybucket/lineitem/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' PARQUET PARTITION BY (l_shipdate); unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' JSON; unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' CSV; unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' CSV DELIMITER AS '|'; unload ('select * from venue') to 's3://mybucket/venue_pipe_' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' manifest; unload ('select * from venue') to 's3://mybucket/unload_venue_folder/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' manifest verbose; unload ('select * from venue where venueseats > 75000') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' header parallel off; unload ('select * from venue') to 's3://mybucket/unload/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' maxfilesize 1 gb; unload ('select * from venue') to 's3://mybucket/venue_encrypt_kms' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' kms_key_id '1234abcd-12ab-34cd-56ef-1234567890ab' manifest encrypted; unload ('select * from venue') to 's3://mybucket/venue_encrypt_cmk' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' master_symmetric_key 'EXAMPLEMASTERKEYtkbjk/OpCwtYSx/M4/t7DMCDIK722' encrypted; unload ('select * from venue') to 's3://mybucket/venue_fw_' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' fixedwidth as 'venueid:3,venuename:39,venuecity:16,venuestate:2,venueseats:6'; unload ('select * from venue') to 's3://mybucket/venue_tab_' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter as '\t' gzip; unload ('select id, location from location') to 's3://mybucket/location_' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' delimiter ',' addquotes; unload ('select venuecity, venuestate, caldate, pricepaid, sum(pricepaid) over(partition by venuecity, venuestate order by caldate rows between 3 preceding and 3 following) as winsum from sales join date on sales.dateid=date.dateid join event on event.eventid=sales.eventid join venue on event.venueid=venue.venueid order by 1,2') to 's3://mybucket/tickit/winsum' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole'; unload ('select * from venue') to 's3://mybucket/nulls/' iam_role 'arn:aws:iam::0123456789012:role/MyRedshiftRole' null as 'fred'; sqlfluff-2.3.5/test/fixtures/dialects/redshift/unload.yml000066400000000000000000000201571451700765000236120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 478ce354cedf63b95a71e94c62bf69f24a44522ad4bec29ecd797140959e4525 file: - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from lineitem'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/lineitem/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: PARQUET - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: l_shipdate end_bracket: ) - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: JSON - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: CSV - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: CSV - keyword: DELIMITER - keyword: AS - quoted_literal: "'|'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_pipe_'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: manifest - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload_venue_folder/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: manifest - keyword: verbose - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue where venueseats > 75000'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: header - keyword: parallel - keyword: 'off' - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/unload/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: maxfilesize - numeric_literal: '1' - keyword: gb - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_encrypt_kms'" - authorization_segment: - keyword: iam_role - quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: kms_key_id - quoted_literal: "'1234abcd-12ab-34cd-56ef-1234567890ab'" - keyword: manifest - keyword: encrypted - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_encrypt_cmk'" - authorization_segment: - keyword: iam_role - quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: master_symmetric_key - quoted_literal: "'EXAMPLEMASTERKEYtkbjk/OpCwtYSx/M4/t7DMCDIK722'" - keyword: encrypted - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_fw_'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: fixedwidth - keyword: as - quoted_literal: "'venueid:3,venuename:39,venuecity:16,venuestate:2,venueseats:6'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/venue_tab_'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - keyword: as - quoted_literal: "'\\t'" - keyword: gzip - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select id, location from location'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/location_'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: delimiter - quoted_literal: "','" - keyword: addquotes - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select venuecity, venuestate, caldate, pricepaid,\nsum(pricepaid)\ \ over(partition by venuecity, venuestate\norder by caldate rows between\ \ 3 preceding and 3 following) as winsum\nfrom sales join date on sales.dateid=date.dateid\n\ join event on event.eventid=sales.eventid\njoin venue on event.venueid=venue.venueid\n\ order by 1,2'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/tickit/winsum'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - statement_terminator: ; - statement: unload_statement: - keyword: unload - bracketed: start_bracket: ( quoted_literal: "'select * from venue'" end_bracket: ) - keyword: to - quoted_literal: "'s3://mybucket/nulls/'" - authorization_segment: keyword: iam_role quoted_literal: "'arn:aws:iam::0123456789012:role/MyRedshiftRole'" - keyword: 'null' - keyword: as - quoted_literal: "'fred'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/unpivot.sql000066400000000000000000000025531451700765000240320ustar00rootroot00000000000000-- redshift_unpivot.sql /* Examples of SELECT statements that include UNPIVOT expressions. */ -- Below examples come from -- https://docs.aws.amazon.com/redshift/latest/dg/r_FROM_clause-pivot-unpivot-examples.html SELECT * FROM (SELECT red, green, blue FROM count_by_color) UNPIVOT ( cnt FOR color IN (red, green, blue) ); SELECT * FROM ( SELECT red, green, blue FROM count_by_color ) UNPIVOT INCLUDE NULLS ( cnt FOR color IN (red, green, blue) ); SELECT * FROM count_by_color UNPIVOT ( cnt FOR color IN (red, green, blue) ); SELECT * FROM count_by_color UNPIVOT ( cnt FOR color IN (red AS r, green AS g, blue AS b) ); -- Examples provided by AWS end here -- Can do EXCLUDE NULLS as well SELECT * FROM ( SELECT red, green, blue FROM count_by_color ) UNPIVOT EXCLUDE NULLS ( cnt FOR color IN (red, green, blue) ); -- Can do this on CTEs WITH subset_color_counts AS ( SELECT red, green, blue FROM count_by_color ) SELECT * FROM subset_color_counts UNPIVOT ( cnt FOR color IN (red, green, blue) ); -- Can do this on tables SELECT * FROM count_by_color UNPIVOT ( cnt FOR color IN (red, green, blue) ); -- Can alias output of unpivot statement SELECT * FROM count_of_bears UNPIVOT ( cnt FOR species IN (giant_panda, moon_bear) ) AS floofy_bears; sqlfluff-2.3.5/test/fixtures/dialects/redshift/unpivot.yml000066400000000000000000000324161451700765000240350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 58f86ae6e875aed843978608324e1d5dd05ac7fa1779722e60e6533aa71cb41f file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: red - comma: ',' - select_clause_element: column_reference: naked_identifier: green - comma: ',' - select_clause_element: column_reference: naked_identifier: blue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color end_bracket: ) from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: red - comma: ',' - select_clause_element: column_reference: naked_identifier: green - comma: ',' - select_clause_element: column_reference: naked_identifier: blue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color end_bracket: ) from_unpivot_expression: - keyword: UNPIVOT - keyword: INCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - alias_expression: keyword: AS naked_identifier: r - comma: ',' - column_reference: naked_identifier: green - alias_expression: keyword: AS naked_identifier: g - comma: ',' - column_reference: naked_identifier: blue - alias_expression: keyword: AS naked_identifier: b - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: red - comma: ',' - select_clause_element: column_reference: naked_identifier: green - comma: ',' - select_clause_element: column_reference: naked_identifier: blue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color end_bracket: ) from_unpivot_expression: - keyword: UNPIVOT - keyword: EXCLUDE - keyword: NULLS - bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: subset_color_counts keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: red - comma: ',' - select_clause_element: column_reference: naked_identifier: green - comma: ',' - select_clause_element: column_reference: naked_identifier: blue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: subset_color_counts from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_by_color from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: color - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: red - comma: ',' - column_reference: naked_identifier: green - comma: ',' - column_reference: naked_identifier: blue - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: count_of_bears from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - column_reference: naked_identifier: cnt - keyword: FOR - column_reference: naked_identifier: species - keyword: IN - bracketed: - start_bracket: ( - column_reference: naked_identifier: giant_panda - comma: ',' - column_reference: naked_identifier: moon_bear - end_bracket: ) - end_bracket: ) alias_expression: keyword: AS naked_identifier: floofy_bears - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/unreserved_keywords.sql000066400000000000000000000006621451700765000264360ustar00rootroot00000000000000-- Issue #2299 -- All these columns are unreserved keywords and should parse. SELECT auto, avro, backup, bzip2, case_insensitive, case_sensitive, compound, defaults, deflate, distkey, diststyle, encode, even, excluding, explicit, gzip, including, interleaved, language, lzop, offline, partitioned, sortkey, wallet, zstd FROM foo; sqlfluff-2.3.5/test/fixtures/dialects/redshift/unreserved_keywords.yml000066400000000000000000000070101451700765000264320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9c2855fe7dd06fc4e5c99824e16eb736d7534e70017c2cdf5c83d50ad587896 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: auto - comma: ',' - select_clause_element: column_reference: naked_identifier: avro - comma: ',' - select_clause_element: column_reference: naked_identifier: backup - comma: ',' - select_clause_element: column_reference: naked_identifier: bzip2 - comma: ',' - select_clause_element: column_reference: naked_identifier: case_insensitive - comma: ',' - select_clause_element: column_reference: naked_identifier: case_sensitive - comma: ',' - select_clause_element: column_reference: naked_identifier: compound - comma: ',' - select_clause_element: column_reference: naked_identifier: defaults - comma: ',' - select_clause_element: column_reference: naked_identifier: deflate - comma: ',' - select_clause_element: column_reference: naked_identifier: distkey - comma: ',' - select_clause_element: column_reference: naked_identifier: diststyle - comma: ',' - select_clause_element: column_reference: naked_identifier: encode - comma: ',' - select_clause_element: column_reference: naked_identifier: even - comma: ',' - select_clause_element: column_reference: naked_identifier: excluding - comma: ',' - select_clause_element: column_reference: naked_identifier: explicit - comma: ',' - select_clause_element: column_reference: naked_identifier: gzip - comma: ',' - select_clause_element: column_reference: naked_identifier: including - comma: ',' - select_clause_element: column_reference: naked_identifier: interleaved - comma: ',' - select_clause_element: column_reference: naked_identifier: language - comma: ',' - select_clause_element: column_reference: naked_identifier: lzop - comma: ',' - select_clause_element: column_reference: naked_identifier: offline - comma: ',' - select_clause_element: column_reference: naked_identifier: partitioned - comma: ',' - select_clause_element: column_reference: naked_identifier: sortkey - comma: ',' - select_clause_element: column_reference: naked_identifier: wallet - comma: ',' - select_clause_element: column_reference: naked_identifier: zstd from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/update.sql000066400000000000000000000001021451700765000235740ustar00rootroot00000000000000update tbl1 set col1 = col2; update tbl1 as set set col1 = col2; sqlfluff-2.3.5/test/fixtures/dialects/redshift/update.yml000066400000000000000000000023621451700765000236100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cebda76b10f9cf9689af4a8fd8368be70b43dae0da1680e452495304be615610 file: - statement: update_statement: keyword: update table_reference: naked_identifier: tbl1 set_clause_list: keyword: set set_clause: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col2 - statement_terminator: ; - statement: update_statement: keyword: update table_reference: naked_identifier: tbl1 alias_expression: keyword: as naked_identifier: set set_clause_list: keyword: set set_clause: - column_reference: naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: col2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/vacuum.sql000066400000000000000000000004031451700765000236160ustar00rootroot00000000000000vacuum; vacuum sales; vacuum sales to 100 percent; vacuum recluster sales; vacuum sort only sales to 75 percent; vacuum delete only sales to 75 percent; vacuum reindex listing; vacuum reindex listing to 75 percent; vacuum listing to 75 percent BOOST; sqlfluff-2.3.5/test/fixtures/dialects/redshift/vacuum.yml000066400000000000000000000040731451700765000236270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ffe0de2c6413a5f57b2ad950d92a11bbfd256c6a19ee29b6db333c5d27eb17f0 file: - statement: vacuum_statement: keyword: vacuum - statement_terminator: ; - statement: vacuum_statement: keyword: vacuum table_reference: naked_identifier: sales - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - table_reference: naked_identifier: sales - keyword: to - numeric_literal: '100' - keyword: percent - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: recluster - table_reference: naked_identifier: sales - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: sort - keyword: only - table_reference: naked_identifier: sales - keyword: to - numeric_literal: '75' - keyword: percent - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: delete - keyword: only - table_reference: naked_identifier: sales - keyword: to - numeric_literal: '75' - keyword: percent - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: reindex - table_reference: naked_identifier: listing - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - keyword: reindex - table_reference: naked_identifier: listing - keyword: to - numeric_literal: '75' - keyword: percent - statement_terminator: ; - statement: vacuum_statement: - keyword: vacuum - table_reference: naked_identifier: listing - keyword: to - numeric_literal: '75' - keyword: percent - keyword: BOOST - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/redshift/window_functions.sql000066400000000000000000000001121451700765000257120ustar00rootroot00000000000000select lead(col1, 1) respect nulls over (order by col2 asc) from dual sqlfluff-2.3.5/test/fixtures/dialects/redshift/window_functions.yml000066400000000000000000000027671451700765000257360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6f798983eba402305df278544024b6293edcbe2aaa287497a2caaca401c0d06d file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: lead bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: col1 - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) over_clause: - keyword: respect - keyword: nulls - keyword: over - bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col2 - keyword: asc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dual sqlfluff-2.3.5/test/fixtures/dialects/snowflake/000077500000000000000000000000001451700765000217615ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/snowflake/.sqlfluff000066400000000000000000000000371451700765000236040ustar00rootroot00000000000000[sqlfluff] dialect = snowflake sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_account.sql000066400000000000000000000012111451700765000253200ustar00rootroot00000000000000ALTER ACCOUNT SET TIMEZONE = 'UTC'; ALTER ACCOUNT SET ALLOW_ID_TOKEN = TRUE, DEFAULT_DDL_COLLATION = 'en-ci', CLIENT_ENCRYPTION_KEY_SIZE = 128, NETWORK_POLICY = mypolicy ; ALTER ACCOUNT UNSET TIMEZONE; ALTER ACCOUNT UNSET DATA_RETENTION_TIME_IN_DAYS, JSON_INDENT; ALTER ACCOUNT SET RESOURCE_MONITOR = VERY_RESTRICTIVE_MONITOR; ALTER ACCOUNT SET PASSWORD POLICY mydb.security.at_least_twelve_characters; ALTER ACCOUNT SET SESSION POLICY mydb.policies.only_one_hour; ALTER ACCOUNT UNSET PASSWORD POLICY; ALTER ACCOUNT UNSET SESSION POLICY; ALTER ACCOUNT SET TAG env = 'prod', domain = 'sales' ; ALTER ACCOUNT UNSET TAG env, domain; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_account.yml000066400000000000000000000074161451700765000253370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0541ba28df55ad725cd0b3bb6404051512935441cdc442aea34d203b57135d4b file: - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - parameter: TIMEZONE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UTC'" - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - parameter: ALLOW_ID_TOKEN - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - parameter: DEFAULT_DDL_COLLATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en-ci'" - comma: ',' - parameter: CLIENT_ENCRYPTION_KEY_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '128' - comma: ',' - parameter: NETWORK_POLICY - comparison_operator: raw_comparison_operator: '=' - naked_identifier: mypolicy - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - naked_identifier: TIMEZONE - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - naked_identifier: DATA_RETENTION_TIME_IN_DAYS - comma: ',' - naked_identifier: JSON_INDENT - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - keyword: RESOURCE_MONITOR - comparison_operator: raw_comparison_operator: '=' - naked_identifier: VERY_RESTRICTIVE_MONITOR - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - keyword: PASSWORD - keyword: POLICY - table_reference: - naked_identifier: mydb - dot: . - naked_identifier: security - dot: . - naked_identifier: at_least_twelve_characters - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - keyword: SESSION - keyword: POLICY - table_reference: - naked_identifier: mydb - dot: . - naked_identifier: policies - dot: . - naked_identifier: only_one_hour - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - keyword: PASSWORD - keyword: POLICY - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - keyword: SESSION - keyword: POLICY - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: env - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'prod'" - comma: ',' - tag_reference: naked_identifier: domain - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sales'" - statement_terminator: ; - statement: alter_account_statement: - keyword: ALTER - keyword: ACCOUNT - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: env - comma: ',' - tag_reference: naked_identifier: domain - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_database.sql000066400000000000000000000010531451700765000254340ustar00rootroot00000000000000ALTER DATABASE mydb RENAME TO mydb2; ALTER DATABASE IF EXISTS mydb RENAME TO mydb2; ALTER DATABASE mydb SWAP WITH yourdb; ALTER DATABASE IF EXISTS mydb SWAP WITH yourdb; ALTER DATABASE mydb SET DATA_RETENTION_TIME_IN_DAYS = 7, MAX_DATA_EXTENSION_TIME_IN_DAYS = 14, DEFAULT_DDL_COLLATION = 'en_ci', COMMENT = 'My most excellent database' ; ALTER DATABASE mydb SET TAG environment = 'test', billed_to = 'sales'; ALTER DATABASE mydb UNSET TAG environment, billed_to; ALTER DATABASE mydb UNSET DATA_RETENTION_TIME_IN_DAYS, DEFAULT_DDL_COLLATION; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_database.yml000066400000000000000000000067131451700765000254460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: abb0ec6bf44f97043de27770d22eeb216c42fccaad5e56f9dadbc9e7312d1dd0 file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: RENAME - keyword: TO - object_reference: naked_identifier: mydb2 - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mydb - keyword: RENAME - keyword: TO - object_reference: naked_identifier: mydb2 - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: SWAP - keyword: WITH - object_reference: naked_identifier: yourdb - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mydb - keyword: SWAP - keyword: WITH - object_reference: naked_identifier: yourdb - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: SET - parameter: DATA_RETENTION_TIME_IN_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '7' - comma: ',' - parameter: MAX_DATA_EXTENSION_TIME_IN_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '14' - comma: ',' - parameter: DEFAULT_DDL_COLLATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en_ci'" - comma: ',' - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'My most excellent database'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: environment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test'" - comma: ',' - tag_reference: naked_identifier: billed_to - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sales'" - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: environment - comma: ',' - tag_reference: naked_identifier: billed_to - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - object_reference: naked_identifier: mydb - keyword: UNSET - keyword: DATA_RETENTION_TIME_IN_DAYS - comma: ',' - keyword: DEFAULT_DDL_COLLATION - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_external_function.sql000066400000000000000000000013261451700765000274220ustar00rootroot00000000000000ALTER FUNCTION IF EXISTS FUNCTION1(NUMBER) RENAME TO FUNCTION2; ALTER FUNCTION FUNCTION2(NUMBER) SET SECURE; ALTER FUNCTION FUNCTION3(NUMBER) RENAME TO FUNCTION3B; ALTER FUNCTION FUNCTION4(NUMBER) SET API_INTEGRATION = API_INTEGRATION_2; ALTER FUNCTION FUNCTION5(NUMBER) SET MAX_BATCH_ROWS = 100; ALTER FUNCTION FUNCTION6(NUMBER) SET COMPRESSION = GZIP; ALTER FUNCTION FUNCTION7(NUMBER) SET REQUEST_TRANSLATOR = TRANSLATOR_FUNCTION; ALTER FUNCTION FUNCTION8(NUMBER) SET RESPONSE_TRANSLATOR = TRANSLATOR_FUNCTION; ALTER FUNCTION FUNCTION9(NUMBER) SET HEADERS = ('abc' = 'def'); ALTER FUNCTION FUNCTION10(NUMBER) SET CONTEXT_HEADERS = (CURRENT_ROLE,CURRENT_TIMESTAMP); ALTER FUNCTION FUNCTION11(NUMBER) SET COMMENT = 'Woohoo!'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_external_function.yml000066400000000000000000000136201451700765000274240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6a2bf22f3b84a2339aa80cc957b7f89796f16670cc913876d6fdaeaf53dd83e file: - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: FUNCTION1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: FUNCTION2 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION2 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: SECURE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION3 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: FUNCTION3B - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION4 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: API_INTEGRATION_2 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: MAX_BATCH_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION6 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION7 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: REQUEST_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: function_name_identifier: TRANSLATOR_FUNCTION - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION8 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: RESPONSE_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: function_name_identifier: TRANSLATOR_FUNCTION - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION9 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_identifier: "'abc'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'def'" - end_bracket: ) - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION10 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - keyword: CONTEXT_HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: CURRENT_ROLE - comma: ',' - keyword: CURRENT_TIMESTAMP - end_bracket: ) - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: FUNCTION11 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: NUMBER end_bracket: ) - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Woohoo!'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_external_table.sql000066400000000000000000000010641451700765000266630ustar00rootroot00000000000000alter external table foo refresh; alter external table foo refresh '2018/08/05/'; alter external table foo add files ('foo/bar.json.gz', 'bar/foo.json.gz'); alter external table foo remove files ('foo/bar.json.gz', 'bar/foo.json.gz'); alter external table foo add partition(foo='baz', bar='bar', baz='foo') location '2022/01'; alter external table foo drop partition location '2022/01'; alter external table if exists foo set auto_refresh = true; alter external table if exists foo set tag foo = 'foo', bar = 'bar'; alter external table foo unset tag foo = 'foo'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_external_table.yml000066400000000000000000000102401451700765000266610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c0751db5f967c630dd66c3871d4bef30ce025709822b5b06459e5dc9a9684971 file: - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: refresh - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: refresh - quoted_literal: "'2018/08/05/'" - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: add - keyword: files - bracketed: - start_bracket: ( - quoted_literal: "'foo/bar.json.gz'" - comma: ',' - quoted_literal: "'bar/foo.json.gz'" - end_bracket: ) - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: remove - keyword: files - bracketed: - start_bracket: ( - quoted_literal: "'foo/bar.json.gz'" - comma: ',' - quoted_literal: "'bar/foo.json.gz'" - end_bracket: ) - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: add - keyword: partition - bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'baz'" - comma: ',' - column_reference: naked_identifier: bar - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - comma: ',' - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - end_bracket: ) - keyword: location - quoted_literal: "'2022/01'" - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: drop - keyword: partition - keyword: location - quoted_literal: "'2022/01'" - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: foo - keyword: set - keyword: auto_refresh - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: foo - keyword: set - tag_equals: - keyword: tag - tag_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - comma: ',' - tag_reference: naked_identifier: bar - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - statement_terminator: ; - statement: alter_external_table_statement: - keyword: alter - keyword: external - keyword: table - table_reference: naked_identifier: foo - keyword: unset - tag_equals: keyword: tag tag_reference: naked_identifier: foo comparison_operator: raw_comparison_operator: '=' quoted_literal: "'foo'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_file_format.sql000066400000000000000000000037411451700765000261650ustar00rootroot00000000000000ALTER FILE FORMAT IF EXISTS my_file_format RENAME TO your_file_format ; ALTER FILE FORMAT IF EXISTS my_csv_format SET TYPE = CSV, COMPRESSION = AUTO, RECORD_DELIMITER = NONE, FIELD_DELIMITER = NONE, FILE_EXTENSION = 'foobar', SKIP_HEADER = 1, SKIP_BLANK_LINES = TRUE, DATE_FORMAT = AUTO, TIME_FORMAT = AUTO, TIMESTAMP_FORMAT = AUTO, BINARY_FORMAT = HEX, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar'), FIELD_OPTIONALLY_ENCLOSED_BY = NONE, ERROR_ON_COLUMN_COUNT_MISMATCH = TRUE, REPLACE_INVALID_CHARACTERS = TRUE, VALIDATE_UTF8 = TRUE, EMPTY_FIELD_AS_NULL = TRUE, SKIP_BYTE_ORDER_MARK = TRUE, ENCODING = UTF8 ; ALTER FILE FORMAT IF EXISTS my_json_format SET TYPE = JSON, COMPRESSION = AUTO, DATE_FORMAT = AUTO, TIME_FORMAT = AUTO, TIMESTAMP_FORMAT = AUTO, BINARY_FORMAT = HEX, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar'), FILE_EXTENSION = 'foobar', ENABLE_OCTAL = TRUE, ALLOW_DUPLICATE = TRUE, STRIP_OUTER_ARRAY = TRUE, STRIP_NULL_VALUES = TRUE, REPLACE_INVALID_CHARACTERS = TRUE, IGNORE_UTF8_ERRORS = TRUE, SKIP_BYTE_ORDER_MARK = TRUE ; ALTER FILE FORMAT IF EXISTS my_avro_format SET TYPE = AVRO COMPRESSION = 'GZIP' TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; ALTER FILE FORMAT IF EXISTS my_orc_format SET TYPE = ORC TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; ALTER FILE FORMAT IF EXISTS my_parquet_format SET TYPE = PARQUET COMPRESSION = AUTO SNAPPY_COMPRESSION = FALSE TRIM_SPACE = FALSE BINARY_AS_TEXT = TRUE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; ALTER FILE FORMAT IF EXISTS my_xml_format SET TYPE = XML COMPRESSION = GZIP IGNORE_UTF8_ERRORS = FALSE PRESERVE_SPACE = FALSE STRIP_OUTER_ELEMENT = FALSE DISABLE_SNOWFLAKE_DATA = FALSE DISABLE_AUTO_CONVERT = FALSE SKIP_BYTE_ORDER_MARK = FALSE COMMENT = 'FOOBAR' ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_file_format.yml000066400000000000000000000267711451700765000261770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 98814d1adc2f97c447fd09cbfa35f3297183f7eb13bd7f8f2c45dc86c97d5aff file: - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_file_format - keyword: RENAME - keyword: TO - object_reference: naked_identifier: your_file_format - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_csv_format - keyword: SET - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: RECORD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: FIELD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - comma: ',' - keyword: SKIP_HEADER - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' - comma: ',' - keyword: SKIP_BLANK_LINES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: HEX - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: ERROR_ON_COLUMN_COUNT_MISMATCH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: VALIDATE_UTF8 - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: EMPTY_FIELD_AS_NULL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - keyword: UTF8 - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_json_format - keyword: SET - json_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: JSON - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: HEX - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - comma: ',' - keyword: ENABLE_OCTAL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: ALLOW_DUPLICATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_OUTER_ARRAY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_NULL_VALUES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_avro_format - keyword: SET - avro_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: AVRO - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: "'GZIP'" - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_orc_format - keyword: SET - orc_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: ORC - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_parquet_format - keyword: SET - parquet_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: PARQUET - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - keyword: SNAPPY_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: BINARY_AS_TEXT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: alter_file_format_segment: - keyword: ALTER - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_xml_format - keyword: SET - xml_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: XML - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: PRESERVE_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: STRIP_OUTER_ELEMENT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DISABLE_SNOWFLAKE_DATA - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DISABLE_AUTO_CONVERT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_function.sql000066400000000000000000000002411451700765000255130ustar00rootroot00000000000000ALTER FUNCTION IF EXISTS function1(number) RENAME TO function2; ALTER FUNCTION IF EXISTS function2(number) SET SECURE; ALTER FUNCTION function3() UNSET COMMENT; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_function.yml000066400000000000000000000031371451700765000255240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a1936b1d3db7a914b04b28e3a9e8d806de7efe93cf96c8b99850520c4c4a2a7 file: - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: function1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: function2 - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: function2 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: SET - keyword: SECURE - statement_terminator: ; - statement: alter_function_statement: - keyword: ALTER - keyword: FUNCTION - function_name: function_name_identifier: function3 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: UNSET - keyword: COMMENT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_masking_policy.sql000066400000000000000000000010231451700765000266750ustar00rootroot00000000000000ALTER MASKING POLICY IF EXISTS "a quoted policy name" RENAME TO a_sane_name; ALTER MASKING POLICY email_mask SET BODY -> CASE WHEN current_role() IN ('ANALYST') THEN VAL ELSE sha2(VAL, 512) END ; ALTER MASKING POLICY aggressively_mask_pii SET TAG environment = 'silver' , silo = 'sales'; ALTER MASKING POLICY IF EXISTS mask_pii_policy UNSET TAG environment, billing; ALTER MASKING POLICY db.sch.fully_redacted_policy SET COMMENT = 'A super strict policy'; ALTER MASKING POLICY IF EXISTS mask_pii_policy UNSET COMMENT; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_masking_policy.yml000066400000000000000000000074441451700765000267140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 586ef71891d2af700f48ec15e2f7b1db821952f7cdb249162cf1dfe465f82e61 file: - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: quoted_identifier: '"a quoted policy name"' - keyword: RENAME - keyword: TO - object_reference: naked_identifier: a_sane_name - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - object_reference: naked_identifier: email_mask - keyword: SET - keyword: BODY - function_assigner: -> - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: IN bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: THEN - expression: column_reference: naked_identifier: VAL - else_clause: keyword: ELSE expression: function: function_name: function_name_identifier: sha2 bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: VAL - comma: ',' - expression: numeric_literal: '512' - end_bracket: ) - keyword: END - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - object_reference: naked_identifier: aggressively_mask_pii - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: environment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'silver'" - comma: ',' - tag_reference: naked_identifier: silo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sales'" - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mask_pii_policy - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: environment - comma: ',' - tag_reference: naked_identifier: billing - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - object_reference: - naked_identifier: db - dot: . - naked_identifier: sch - dot: . - naked_identifier: fully_redacted_policy - keyword: SET - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'A super strict policy'" - statement_terminator: ; - statement: alter_masking_policy: - keyword: ALTER - keyword: MASKING - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mask_pii_policy - keyword: UNSET - keyword: COMMENT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_materialized_view.sql000066400000000000000000000010361451700765000273750ustar00rootroot00000000000000alter materialized view table1_mv rename to my_mv; alter materialized view my_mv cluster by(i); alter materialized view my_mv suspend recluster; alter materialized view my_mv resume recluster; alter materialized view my_mv suspend; alter materialized view my_mv resume; alter materialized view my_mv drop clustering key; alter materialized view mv1 set secure; alter materialized view mv1 set comment = 'Sample view'; alter materialized view mv1 set tag my_tag = 'my tag'; alter materialized view mv1 unset tag my_tag = 'not my tag anymore'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_materialized_view.yml000066400000000000000000000073321451700765000274040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 192741ab32ee2d3df2200d491d4b887d936aa68354c14a1c3db809ce07be80f9 file: - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: table1_mv - keyword: rename - keyword: to - table_reference: naked_identifier: my_mv - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: cluster - keyword: by - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: i end_bracket: ) - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: suspend - keyword: recluster - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: resume - keyword: recluster - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: suspend - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: resume - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: my_mv - keyword: drop - keyword: clustering - keyword: key - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: mv1 - keyword: set - keyword: secure - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: mv1 - keyword: set - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Sample view'" - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: mv1 - keyword: set - tag_equals: keyword: tag tag_reference: naked_identifier: my_tag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my tag'" - statement_terminator: ; - statement: alter_materialized_view_statement: - keyword: alter - keyword: materialized - keyword: view - table_reference: naked_identifier: mv1 - keyword: unset - tag_equals: keyword: tag tag_reference: naked_identifier: my_tag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'not my tag anymore'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_pipe.sql000066400000000000000000000010011451700765000246160ustar00rootroot00000000000000alter pipe mypipe refresh prefix = 'd1/'; alter pipe mypipe refresh prefix = 'd1/' modified_after = '2018-07-30T13:56:46-07:00'; alter pipe if exists mypipe refresh; alter pipe mypipe set comment = 'Pipe for North American sales data'; alter pipe mypipe set pipe_execution_paused = true comment = 'Pipe for North American sales data'; alter pipe mypipe set tag tag1 = 'value1', tag2 = 'value2'; alter pipe mypipe unset pipe_execution_paused; alter pipe mypipe unset comment; alter pipe mypipe unset tag foo, bar; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_pipe.yml000066400000000000000000000067051451700765000246400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e420ce149b11c2ab44311a50476ee4c4f43a319f0b7b6a34155d1edc20d13011 file: - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: refresh - keyword: prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'d1/'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: refresh - keyword: prefix - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'d1/'" - keyword: modified_after - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2018-07-30T13:56:46-07:00'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - keyword: if - keyword: exists - object_reference: naked_identifier: mypipe - keyword: refresh - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: set - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Pipe for North American sales data'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: set - keyword: pipe_execution_paused - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Pipe for North American sales data'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: set - tag_equals: - keyword: tag - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: unset - keyword: pipe_execution_paused - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: unset - keyword: comment - statement_terminator: ; - statement: alter_pipe_segment: - keyword: alter - keyword: pipe - object_reference: naked_identifier: mypipe - keyword: unset - keyword: tag - tag_reference: naked_identifier: foo - comma: ',' - tag_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_procedure.sql000066400000000000000000000007351451700765000256660ustar00rootroot00000000000000ALTER PROCEDURE IF EXISTS procedure1(FLOAT) RENAME TO procedure2; ALTER PROCEDURE IF EXISTS procedure1(FLOAT) EXECUTE AS CALLER; ALTER PROCEDURE IF EXISTS procedure1(FLOAT) EXECUTE AS OWNER; ALTER PROCEDURE procedure1(FLOAT_PARAM1 FLOAT) SET COMMENT = 'a_comment'; ALTER PROCEDURE procedure1(FLOAT_PARAM1 FLOAT) SET TAG TAG1 = 'value1', TAG2 = 'value2', TAG3 = 'value3'; ALTER PROCEDURE procedure1() UNSET COMMENT; ALTER PROCEDURE procedure1() UNSET TAG TAG1, TAG2, TAG3; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_procedure.yml000066400000000000000000000100441451700765000256620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8f841d1d99a03db696647d601c374564c49328096510bb3c377ae1fd25c3769d file: - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: RENAME - keyword: TO - function_name: function_name_identifier: procedure2 - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: EXECUTE - keyword: AS - keyword: CALLER - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: EXECUTE - keyword: AS - keyword: OWNER - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( parameter: FLOAT_PARAM1 data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'a_comment'" - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( parameter: FLOAT_PARAM1 data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: TAG2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - tag_reference: naked_identifier: TAG3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - function_name: function_name_identifier: procedure1 - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: TAG1 - comma: ',' - tag_reference: naked_identifier: TAG2 - comma: ',' - tag_reference: naked_identifier: TAG3 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_resource_monitor.sql000066400000000000000000000006651451700765000272760ustar00rootroot00000000000000alter resource monitor limiter set credit_quota=2000 notify_users = (jdoe, "jane smith", "john doe") FREQUENCY=DAILY start_timestamp = immediately end_timestamp = '2038-01-19 03:14:07' triggers on 80 percent do notify on 100 percent do suspend_immediate ; ALTER RESOURCE MONITOR limiter SET CREDIT_QUOTA=2000 TRIGGERS ON 80 PERCENT DO NOTIFY ON 100 PERCENT DO SUSPEND_IMMEDIATE ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_resource_monitor.yml000066400000000000000000000047511451700765000273000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae481af62fbba62a19efa21f4858d4d5607ac334cffa01ed04e4a505539ea3cb file: - statement: alter_resource_monitor_statement: - keyword: alter - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: set - resource_monitor_options: - keyword: credit_quota - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2000' - keyword: notify_users - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - object_reference: naked_identifier: jdoe - comma: ',' - object_reference: quoted_identifier: '"jane smith"' - comma: ',' - object_reference: quoted_identifier: '"john doe"' - end_bracket: ) - keyword: FREQUENCY - comparison_operator: raw_comparison_operator: '=' - keyword: DAILY - keyword: start_timestamp - comparison_operator: raw_comparison_operator: '=' - keyword: immediately - keyword: end_timestamp - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2038-01-19 03:14:07'" - keyword: triggers - keyword: 'on' - integer_literal: '80' - keyword: percent - keyword: do - keyword: notify - keyword: 'on' - integer_literal: '100' - keyword: percent - keyword: do - keyword: suspend_immediate - statement_terminator: ; - statement: alter_resource_monitor_statement: - keyword: ALTER - keyword: RESOURCE - keyword: MONITOR - object_reference: naked_identifier: limiter - keyword: SET - resource_monitor_options: - keyword: CREDIT_QUOTA - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2000' - keyword: TRIGGERS - keyword: 'ON' - integer_literal: '80' - keyword: PERCENT - keyword: DO - keyword: NOTIFY - keyword: 'ON' - integer_literal: '100' - keyword: PERCENT - keyword: DO - keyword: SUSPEND_IMMEDIATE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_role.sql000066400000000000000000000010641451700765000246330ustar00rootroot00000000000000ALTER ROLE IF EXISTS "test_role" RENAME TO "prod_role"; ALTER ROLE "test_role" RENAME TO "prod_role"; ALTER ROLE IF EXISTS "test_role" SET COMMENT = 'test_comment'; ALTER ROLE IF EXISTS "test_role" UNSET COMMENT; ALTER ROLE "test_role" SET COMMENT = 'test_comment'; ALTER ROLE "test_role" UNSET COMMENT; ALTER ROLE IF EXISTS "test_role" SET TAG TAG1 = 'value1'; ALTER ROLE IF EXISTS "test_role" SET TAG TAG1 = 'value1', TAG1 = 'value2', TAG1 = 'value3'; ALTER ROLE IF EXISTS "test_role" UNSET TAG TAG1; ALTER ROLE IF EXISTS "test_role" UNSET TAG TAG1, TAG2, TAG3; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_role.yml000066400000000000000000000100361451700765000246340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 72a427cac4910bd00b0e70ccbc8fea04c0d16f25ddc293b83871cfb5d25b05d3 file: - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: RENAME - keyword: TO - role_reference: quoted_identifier: '"prod_role"' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: quoted_identifier: '"test_role"' - keyword: RENAME - keyword: TO - role_reference: quoted_identifier: '"prod_role"' - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: SET - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_comment'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: UNSET - role_reference: naked_identifier: COMMENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: quoted_identifier: '"test_role"' - keyword: SET - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_comment'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - role_reference: quoted_identifier: '"test_role"' - keyword: UNSET - role_reference: naked_identifier: COMMENT - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: SET - tag_equals: keyword: TAG tag_reference: naked_identifier: TAG1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - comma: ',' - tag_reference: naked_identifier: TAG1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value3'" - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: TAG1 - statement_terminator: ; - statement: alter_role_statement: - keyword: ALTER - keyword: ROLE - keyword: IF - keyword: EXISTS - role_reference: quoted_identifier: '"test_role"' - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: TAG1 - comma: ',' - tag_reference: naked_identifier: TAG2 - comma: ',' - tag_reference: naked_identifier: TAG3 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_schema.sql000066400000000000000000000006711451700765000251350ustar00rootroot00000000000000alter schema if exists schema1 rename to schema2; alter schema schema1 swap with schema2; alter schema schema2 enable managed access; alter schema schema1 set data_retention_time_in_days = 3; alter schema schema1 set tag tag1 = 'value1', tag2 = 'value2'; alter schema schema1 unset data_retention_time_in_days; alter schema schema1 unset data_retention_time_in_days, max_data_extension_time_in_days; alter schema schema1 unset tag foo, bar; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_schema.yml000066400000000000000000000056441451700765000251440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 787f8d6df91da83e973c5393b0fa2722db5b8e6b47d6fead71c4bb9eb4149a98 file: - statement: alter_schema_statement: - keyword: alter - keyword: schema - keyword: if - keyword: exists - schema_reference: naked_identifier: schema1 - keyword: rename - keyword: to - schema_reference: naked_identifier: schema2 - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: swap - keyword: with - schema_reference: naked_identifier: schema2 - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema2 - keyword: enable - keyword: managed - keyword: access - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: set - schema_object_properties: keyword: data_retention_time_in_days comparison_operator: raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: set - tag_equals: - keyword: tag - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: unset - keyword: data_retention_time_in_days - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: unset - keyword: data_retention_time_in_days - comma: ',' - keyword: max_data_extension_time_in_days - statement_terminator: ; - statement: alter_schema_statement: - keyword: alter - keyword: schema - schema_reference: naked_identifier: schema1 - keyword: unset - keyword: tag - tag_reference: naked_identifier: foo - comma: ',' - tag_reference: naked_identifier: bar - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_sequence.sql000066400000000000000000000007131451700765000255020ustar00rootroot00000000000000--ALTER SEQUENCE IF EXISTS seq RENAME TO seq2; ALTER SEQUENCE seq RENAME TO seq2; ALTER SEQUENCE seq SET INCREMENT BY = 2; ALTER SEQUENCE seq INCREMENT BY = 2; ALTER SEQUENCE seq INCREMENT = 2; ALTER SEQUENCE seq INCREMENT 2; ALTER SEQUENCE seq SET ORDER COMMENT = 'comment'; ALTER SEQUENCE seq SET NOORDER COMMENT = 'comment'; ALTER SEQUENCE seq UNSET COMMENT; ALTER SEQUENCE seq SET INCREMENT BY = 2 ORDER COMMENT = 'comment'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_sequence.yml000066400000000000000000000062621451700765000255110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 235b51a76f9656e0292883f729b442b192b7ec6634b41642b4bff172fdca0529 file: - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: RENAME - keyword: TO - sequence_reference: naked_identifier: seq2 - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: SET - keyword: INCREMENT - keyword: BY - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: INCREMENT - keyword: BY - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: INCREMENT - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: INCREMENT - integer_literal: '2' - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: SET - keyword: ORDER - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: SET - keyword: NOORDER - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_sequence_statement: - keyword: ALTER - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: SET - keyword: INCREMENT - keyword: BY - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - keyword: ORDER - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_session_set_timezone.sql000066400000000000000000000000431451700765000301360ustar00rootroot00000000000000ALTER SESSION SET TIMEZONE = 'UTC' sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_session_set_timezone.yml000066400000000000000000000012301451700765000301370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2cd3f296b5baa7b7e76e035a5a900a1cc4e322aec4c4e9f6109d62fb90b6258d file: statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - alter_session_set_statement: keyword: SET parameter: TIMEZONE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'UTC'" sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_session_unset_parameters.sql000066400000000000000000000001011451700765000310050ustar00rootroot00000000000000ALTER SESSION UNSET TIME_OUTPUT_FORMAT, TWO_DIGIT_CENTURY_START; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_session_unset_parameters.yml000066400000000000000000000012271451700765000310210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11408034dc3c2bf4379085bffc0c58769e8760046792bbb44be74840155d483a file: statement: alter_session_statement: - keyword: ALTER - keyword: SESSION - alter_session_unset_clause: - keyword: UNSET - parameter: TIME_OUTPUT_FORMAT - comma: ',' - parameter: TWO_DIGIT_CENTURY_START statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_share.sql000066400000000000000000000015551451700765000250010ustar00rootroot00000000000000ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1; ALTER SHARE IF EXISTS MY_SHARE ADD ACCOUNTS = my_account_1; ALTER SHARE MY_SHARE REMOVE ACCOUNTS = my_account_1; ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1, my_account_2; ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1, my_account_2, my_account_3; ALTER SHARE MY_SHARE SET TAG tag1 = 'value1'; ALTER SHARE IF EXISTS MY_SHARE SET TAG tag1 = 'value1', tag2 = 'value2'; ALTER SHARE MY_SHARE UNSET TAG tag1; ALTER SHARE MY_SHARE UNSET TAG tag1, tag2; ALTER SHARE MY_SHARE UNSET COMMENT; ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1 SHARE_RESTRICTIONS = TRUE; ALTER SHARE MY_SHARE ADD ACCOUNTS = my_account_1, my_account_2 SHARE_RESTRICTIONS = FALSE; ALTER SHARE MY_SHARE SET ACCOUNTS = my_account_1 COMMENT = 'my_comment'; ALTER SHARE IF EXISTS MY_SHARE SET ACCOUNTS = my_account_1, my_account_2 COMMENT = 'my_comment'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_share.yml000066400000000000000000000126511451700765000250020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 599ea38dd18bb92d1a5e4769ba772c97c5928c2251ca5937247bdfb075b8cf1c file: - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - keyword: IF - keyword: EXISTS - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: REMOVE - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - comma: ',' - naked_identifier: my_account_2 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - comma: ',' - naked_identifier: my_account_2 - comma: ',' - naked_identifier: my_account_3 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: SET - tag_equals: keyword: TAG tag_reference: naked_identifier: tag1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - keyword: IF - keyword: EXISTS - naked_identifier: MY_SHARE - keyword: SET - tag_equals: - keyword: TAG - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: tag1 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: tag1 - comma: ',' - tag_reference: naked_identifier: tag2 - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: UNSET - keyword: COMMENT - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - keyword: SHARE_RESTRICTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: ADD - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - comma: ',' - naked_identifier: my_account_2 - keyword: SHARE_RESTRICTIONS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - naked_identifier: MY_SHARE - keyword: SET - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my_comment'" - statement_terminator: ; - statement: alter_share_statement: - keyword: ALTER - keyword: SHARE - keyword: IF - keyword: EXISTS - naked_identifier: MY_SHARE - keyword: SET - keyword: ACCOUNTS - comparison_operator: raw_comparison_operator: '=' - naked_identifier: my_account_1 - comma: ',' - naked_identifier: my_account_2 - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my_comment'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_stage.sql000066400000000000000000000007031451700765000247740ustar00rootroot00000000000000ALTER STAGE my_int_stage RENAME TO new_int_stage; ALTER STAGE my_ext_stage SET URL='s3://loading/files/new/' COPY_OPTIONS = (ON_ERROR='skip_file'); ALTER STAGE my_ext_stage SET STORAGE_INTEGRATION = myint; ALTER STAGE my_ext_stage SET CREDENTIALS=(AWS_KEY_ID='d4c3b2a1' AWS_SECRET_KEY='z9y8x7w6'); ALTER STAGE my_ext_stage3 SET ENCRYPTION=(TYPE='AWS_SSE_S3'); ALTER STAGE mystage REFRESH; ALTER STAGE mystage REFRESH SUBPATH = 'data'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_stage.yml000066400000000000000000000064711451700765000250060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 787b6dd605b96e87ef7d82a2dd3510de972577403435636738ceeaa7a45f843b file: - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_int_stage - keyword: RENAME - keyword: TO - object_reference: naked_identifier: new_int_stage - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: SET - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://loading/files/new/'" - keyword: COPY_OPTIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( copy_options: keyword: ON_ERROR comparison_operator: raw_comparison_operator: '=' copy_on_error_option: "'skip_file'" end_bracket: ) - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: SET - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: SET - stage_parameters: keyword: CREDENTIALS comparison_operator: raw_comparison_operator: '=' bracketed: - start_bracket: ( - keyword: AWS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'d4c3b2a1'" - keyword: AWS_SECRET_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'z9y8x7w6'" - end_bracket: ) - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: my_ext_stage3 - keyword: SET - stage_parameters: keyword: ENCRYPTION comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: TYPE comparison_operator: raw_comparison_operator: '=' stage_encryption_option: "'AWS_SSE_S3'" end_bracket: ) - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: REFRESH - statement_terminator: ; - statement: alter_stage_statement: - keyword: ALTER - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: REFRESH - keyword: SUBPATH - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'data'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_storage_integration.sql000066400000000000000000000063561451700765000277520ustar00rootroot00000000000000alter storage integration test_integration set tag tag1 = 'value1'; alter storage integration test_integration set tag tag1 = 'value1', tag2 = 'value2'; alter storage integration test_integration set comment = 'test comment'; alter storage integration test_integration unset comment; alter storage integration test_integration unset tag tag1, tag2; alter storage integration if exists test_integration unset tag tag1, tag2; alter storage integration test_integration unset enabled; alter storage integration test_integration unset comment; alter storage integration test_integration unset storage_blocked_locations; alter storage integration test_integration set enabled = true; alter storage integration test_integration set enabled = false comment = 'test comment'; alter storage integration test_integration set comment = 'test comment' enabled = false; alter storage integration test_integration set storage_aws_role_arn = 'test_role_arn'; alter storage integration test_integration set storage_aws_object_acl = 'test_object_acl'; alter storage integration test_integration set azure_tenant_id = 'test_azure_tenant_id'; alter storage integration s3_int set storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ( 's3://mybucket1', 's3://mybucket2/' ); alter storage integration gcs_int set enabled = true storage_allowed_locations = ( 'gcs://mybucket1/path1/', 'gcs://mybucket2/path2/' ); alter storage integration azure_int set enabled = true azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ( 'azure://myaccount.blob.core.windows.net/mycontainer/path1/', 'azure://myaccount.blob.core.windows.net/mycontainer/path2/' ); alter storage integration s3_int set storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('*') storage_blocked_locations = ( 's3://mybucket3/path3/', 's3://mybucket4/path4/' ); alter storage integration gcs_int set enabled = true storage_allowed_locations = ('*') storage_blocked_locations = ( 'gcs://mybucket3/path3/', 'gcs://mybucket4/path4/' ); alter storage integration azure_int set enabled = true azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ('*') storage_blocked_locations = ( 'azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/' ); alter storage integration azure_int set enabled = true comment = 'test_comment' azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ('*') storage_blocked_locations = ( 'azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/' ); alter storage integration if exists azure_int set enabled = true comment = 'test_comment' azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ('*') storage_blocked_locations = ( 'azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/' ); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_storage_integration.yml000066400000000000000000000325541451700765000277530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9f3c2e27d2d0d69837f87b5cf9864a70a48856d013b05649a9170d67acfde55d file: - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - tag_equals: keyword: tag tag_reference: naked_identifier: tag1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value1'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - tag_equals: - keyword: tag - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value1'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'value2'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: comment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test comment'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: comment - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: tag - tag_reference: naked_identifier: tag1 - comma: ',' - tag_reference: naked_identifier: tag2 - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - keyword: if - keyword: exists - object_reference: naked_identifier: test_integration - keyword: unset - keyword: tag - tag_reference: naked_identifier: tag1 - comma: ',' - tag_reference: naked_identifier: tag2 - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: enabled - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: comment - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: unset - keyword: storage_blocked_locations - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - keyword: comment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test comment'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: comment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test comment'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_role_arn'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: storage_aws_object_acl - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_object_acl'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: test_integration - keyword: set - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_azure_tenant_id'" - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: set - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket1'" - comma: ',' - bucket_path: "'s3://mybucket2/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket1/path1/'" - comma: ',' - bucket_path: "'gcs://mybucket2/path2/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path1/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path2/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: set - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket3/path3/'" - comma: ',' - bucket_path: "'s3://mybucket4/path4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket3/path3/'" - comma: ',' - bucket_path: "'gcs://mybucket4/path4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: comment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_comment'" - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'" - end_bracket: ) - statement_terminator: ; - statement: alter_storage_integration_statement: - keyword: alter - keyword: storage - keyword: integration - keyword: if - keyword: exists - object_reference: naked_identifier: azure_int - keyword: set - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: comment - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test_comment'" - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_stream.sql000066400000000000000000000006451451700765000251710ustar00rootroot00000000000000alter stream mystream set comment = 'New comment for stream'; alter stream if exists mystream set tag mytag='myvalue'; ALTER STREAM IF EXISTS mystream SET APPEND_ONLY = FALSE TAG mytag1='myvalue1', mytag2 = 'myvalue2' COMMENT = 'amazing comment'; ALTER STREAM IF EXISTS mystream SET INSERT_ONLY = TRUE COMMENT = 'amazing comment'; alter stream mystream unset comment; alter stream mystream unset tag mytag1, mytag2; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_stream.yml000066400000000000000000000061111451700765000251650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 17f8528b83595086c1d083d69e10b12016496e6955fe336060e376b71ee8d3ad file: - statement: alter_stream_statement: - keyword: alter - keyword: stream - object_reference: naked_identifier: mystream - keyword: set - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'New comment for stream'" - statement_terminator: ; - statement: alter_stream_statement: - keyword: alter - keyword: stream - keyword: if - keyword: exists - object_reference: naked_identifier: mystream - keyword: set - tag_equals: keyword: tag tag_reference: naked_identifier: mytag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'myvalue'" - statement_terminator: ; - statement: alter_stream_statement: - keyword: ALTER - keyword: STREAM - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mystream - keyword: SET - keyword: APPEND_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - tag_equals: - keyword: TAG - tag_reference: naked_identifier: mytag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myvalue1'" - comma: ',' - tag_reference: naked_identifier: mytag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myvalue2'" - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: alter_stream_statement: - keyword: ALTER - keyword: STREAM - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: mystream - keyword: SET - keyword: INSERT_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: alter_stream_statement: - keyword: alter - keyword: stream - object_reference: naked_identifier: mystream - keyword: unset - keyword: comment - statement_terminator: ; - statement: alter_stream_statement: - keyword: alter - keyword: stream - object_reference: naked_identifier: mystream - keyword: unset - keyword: tag - tag_reference: naked_identifier: mytag1 - comma: ',' - tag_reference: naked_identifier: mytag2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_table.sql000066400000000000000000000024541451700765000247650ustar00rootroot00000000000000ALTER TABLE my_old_table RENAME TO my_new_table; ALTER TABLE my_existing_table SWAP WITH my_another_table; ALTER TABLE my_existing_table ADD SEARCH OPTIMIZATION; ALTER TABLE my_existing_table DROP SEARCH OPTIMIZATION; ALTER TABLE my_table SET DATA_RETENTION_TIME_IN_DAYS = 30; ALTER TABLE my_table SET DEFAULT_DDL_COLLATION = 'en-ci'; ALTER TABLE my_table SET COMMENT = 'my table comment'; ALTER TABLE table1 ADD CONSTRAINT constraint1 PRIMARY KEY ( col1 ); ALTER TABLE table1 ADD CONSTRAINT "constraint1" PRIMARY KEY ( col1 ); ALTER TABLE table1 ADD CONSTRAINT "constraint1" PRIMARY KEY ( col1, col2 ); ALTER TABLE table1 ADD CONSTRAINT constraint1 FOREIGN KEY ( col1 ) REFERENCES table2 ( col2 ); ALTER TABLE table1 ADD CONSTRAINT "constraint1" FOREIGN KEY ( col1 ) REFERENCES table2 ( col2 ); ALTER TABLE table1 ADD CONSTRAINT "constraint1" FOREIGN KEY ( col1 ) REFERENCES "schema1"."table1" ("col2"); ALTER TABLE table1 ADD CONSTRAINT "constraint1" FOREIGN KEY ( col1 ) REFERENCES "schema1"."table1" ( col1, col2 ); ALTER TABLE table1 DROP CONSTRAINT constraint1 UNIQUE pk_col, pk_col2; ALTER TABLE table1 RENAME CONSTRAINT constraint1 TO constraint2; ALTER TABLE "ADW_TEMP"."FRUIT_PRICE_SAT" ADD CONSTRAINT "FK_2" FOREIGN KEY ("SPECIAL_OFFER_ID") REFERENCES "ADW_TEMP"."OFFER_SAT" ("SPECIAL_OFFER_ID"); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_table.yml000066400000000000000000000205411451700765000247640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1f163734c1f7921014ea72eb0a17ffb2920056606e41b6337e1bae29722ab8e file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_old_table - keyword: RENAME - keyword: TO - table_reference: naked_identifier: my_new_table - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_existing_table - keyword: SWAP - keyword: WITH - table_reference: naked_identifier: my_another_table - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_existing_table - keyword: ADD - keyword: SEARCH - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_existing_table - keyword: DROP - keyword: SEARCH - keyword: OPTIMIZATION - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: SET - parameter: DATA_RETENTION_TIME_IN_DAYS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '30' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: SET - parameter: DEFAULT_DDL_COLLATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'en-ci'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: SET - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my table comment'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: ADD - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: ADD - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: ADD - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: PRIMARY - keyword: KEY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: ADD - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: ADD - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: table2 - bracketed: start_bracket: ( column_reference: naked_identifier: col2 end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: ADD - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: - quoted_identifier: '"schema1"' - dot: . - quoted_identifier: '"table1"' - bracketed: start_bracket: ( column_reference: quoted_identifier: '"col2"' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: ADD - keyword: CONSTRAINT - quoted_identifier: '"constraint1"' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: col1 end_bracket: ) - keyword: REFERENCES - table_reference: - quoted_identifier: '"schema1"' - dot: . - quoted_identifier: '"table1"' - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: DROP - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: UNIQUE - column_reference: naked_identifier: pk_col - comma: ',' - column_reference: naked_identifier: pk_col2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table1 - alter_table_constraint_action: - keyword: RENAME - keyword: CONSTRAINT - naked_identifier: constraint1 - keyword: TO - naked_identifier: constraint2 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '"ADW_TEMP"' - dot: . - quoted_identifier: '"FRUIT_PRICE_SAT"' - alter_table_constraint_action: - keyword: ADD - keyword: CONSTRAINT - quoted_identifier: '"FK_2"' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '"SPECIAL_OFFER_ID"' end_bracket: ) - keyword: REFERENCES - table_reference: - quoted_identifier: '"ADW_TEMP"' - dot: . - quoted_identifier: '"OFFER_SAT"' - bracketed: start_bracket: ( column_reference: quoted_identifier: '"SPECIAL_OFFER_ID"' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_table_clustering_action.sql000066400000000000000000000010761451700765000305600ustar00rootroot00000000000000ALTER TABLE my_table CLUSTER BY (c1, c2); ALTER TABLE my_table CLUSTER BY (to_date(c1), substring(c2, 0, 10)); ALTER TABLE my_table CLUSTER BY (v:"Data":id::number); ALTER TABLE my_table RECLUSTER; ALTER TABLE my_table RECLUSTER MAX_SIZE = 100; ALTER TABLE my_table RECLUSTER WHERE create_date BETWEEN ('2016-01-01') AND ('2016-01-07'); ALTER TABLE my_table RECLUSTER MAX_SIZE = 100 WHERE create_date BETWEEN ('2016-01-01') AND ('2016-01-07'); ALTER TABLE my_table SUSPEND RECLUSTER; ALTER TABLE my_table RESUME RECLUSTER; ALTER TABLE my_table DROP CLUSTERING KEY; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_table_clustering_action.yml000066400000000000000000000132231451700765000305570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5462f4c98240c76d3b93e026e081026dcbd975fe32c9ed5f249a06470c14dbc5 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: CLUSTER - keyword: BY - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c1 - comma: ',' - expression: column_reference: naked_identifier: c2 - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: CLUSTER - keyword: BY - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: to_date bracketed: start_bracket: ( expression: column_reference: naked_identifier: c1 end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: substring bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: c2 - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: numeric_literal: '10' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: CLUSTER - keyword: BY - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: v semi_structured_expression: - colon: ':' - semi_structured_element: '"Data"' - colon: ':' - semi_structured_element: id casting_operator: '::' data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: keyword: RECLUSTER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: RECLUSTER - keyword: MAX_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: keyword: RECLUSTER where_clause: keyword: WHERE expression: - column_reference: naked_identifier: create_date - keyword: BETWEEN - bracketed: start_bracket: ( expression: quoted_literal: "'2016-01-01'" end_bracket: ) - keyword: AND - bracketed: start_bracket: ( expression: quoted_literal: "'2016-01-07'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: RECLUSTER - keyword: MAX_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: create_date - keyword: BETWEEN - bracketed: start_bracket: ( expression: quoted_literal: "'2016-01-01'" end_bracket: ) - keyword: AND - bracketed: start_bracket: ( expression: quoted_literal: "'2016-01-07'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: SUSPEND - keyword: RECLUSTER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: RESUME - keyword: RECLUSTER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_clustering_action: - keyword: DROP - keyword: CLUSTERING - keyword: KEY - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_table_column.sql000066400000000000000000000063231451700765000263410ustar00rootroot00000000000000-- Add column ---- Base cases ALTER TABLE my_table ADD COLUMN my_column INTEGER; ALTER TABLE my_table ADD COLUMN my_column VARCHAR(5000) NOT NULL; ------ Multiple columns ALTER TABLE my_table ADD COLUMN column_1 varchar, column_2 integer; ---- Default, auto-increment & identity ALTER TABLE my_table ADD COLUMN my_column INTEGER DEFAULT 1; ALTER TABLE my_table ADD COLUMN my_column INTEGER AUTOINCREMENT; ALTER TABLE my_table ADD COLUMN my_column INTEGER IDENTITY; ALTER TABLE my_table ADD COLUMN my_column INTEGER AUTOINCREMENT (10000, 1); ALTER TABLE my_table ADD COLUMN my_column INTEGER IDENTITY START 10000 INCREMENT 1; ---- Masking Policy ALTER TABLE my_table ADD COLUMN my_column INTEGER MASKING POLICY my_policy; ALTER TABLE my_table ADD COLUMN my_column INTEGER WITH MASKING POLICY my_policy; ALTER TABLE my_table ADD COLUMN my_column INTEGER WITH MASKING POLICY adatabase.aschema.apolicy; ALTER TABLE my_table ADD COLUMN my_column INTEGER WITH MASKING POLICY my_policy USING(my_column, my_column > 10); -- comment ALTER TABLE reporting_tbl ADD COLUMN reporting_group VARCHAR COMMENT 'internal reporting group defined by DE team'; -- without the word COLUMN ALTER TABLE rpt_enc_table ADD encounter_count INTEGER COMMENT 'count of encounters past year' ; -- Rename column ALTER TABLE empl_info RENAME COLUMN old_col_name TO new_col_name; -- Alter-modify column(s) ---- Base cases ------ Single column alter table t1 alter column c1 drop not null; alter table t1 alter c5 comment '50 character column'; ------ Multiple columns/properties alter table t1 modify c2 drop default, c3 set default seq5.nextval ; alter table t1 alter c4 set data type varchar(50), column c4 drop default; ---- Set Masking Policy ------ Single column ALTER TABLE xxxx.example_table MODIFY COLUMN employeeCode SET MASKING POLICY example_MASKING_POLICY; ALTER TABLE aschema.atable MODIFY COLUMN acolumn SET MASKING POLICY adatabase.aschema.apolicy; alter table empl_info modify column empl_id set masking policy mask_empl_id; alter table empl_info modify column empl_id set masking policy mask_empl_id using(empl_id, empl_id > 10); ------ Multiple columns alter table empl_info modify column empl_id set masking policy mask_empl_id , column empl_dob set masking policy mask_empl_dob ; ---- Unset masking policy ------ Single column alter table empl_info modify column empl_id unset masking policy; ------ Multiple columns alter table empl_info modify column empl_id unset masking policy , column empl_dob unset masking policy ; --- Set Tag ALTER TABLE my_table MODIFY COLUMN my_column SET TAG my_tag = 'tagged'; --- Unset Tag ALTER TABLE my_table MODIFY COLUMN my_column UNSET TAG my_tag; -- Drop column ALTER TABLE empl_info DROP COLUMN my_column; ALTER TABLE some_schema.empl_info DROP COLUMN my_column; ALTER TABLE my_table DROP COLUMN column_1, column_2, column_3; -- IF EXISTS ALTER TABLE IF EXISTS my_table ADD COLUMN my_column INTEGER; ALTER TABLE IF EXISTS empl_info DROP COLUMN my_column; ALTER TABLE IF EXISTS empl_info DROP my_column; ALTER TABLE IF EXISTS empl_info RENAME COLUMN old_col_name TO new_col_name; -- DROP PRIMARY KEY ALTER TABLE my_schema.my_table drop PRIMARY KEY; -- ADD PRIMARY KEY ALTER TABLE my_schema.my_table add PRIMARY KEY(TABLE_ID); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_table_column.yml000066400000000000000000000442431451700765000263460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a661d921f72d807ed26fa97225e83c40b6e705e975a7fe0622e259a0999323e8 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_definition: naked_identifier: my_column data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '5000' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: column_1 - data_type: data_type_identifier: varchar - comma: ',' - column_reference: naked_identifier: column_2 - data_type: data_type_identifier: integer - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: DEFAULT - expression: numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: AUTOINCREMENT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: IDENTITY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: AUTOINCREMENT - bracketed: - start_bracket: ( - numeric_literal: '10000' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: IDENTITY - keyword: START - numeric_literal: '10000' - keyword: INCREMENT - numeric_literal: '1' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: - naked_identifier: adatabase - dot: . - naked_identifier: aschema - dot: . - function_name_identifier: apolicy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_policy - keyword: USING - bracketed: start_bracket: ( column_reference: naked_identifier: my_column comma: ',' expression: column_reference: naked_identifier: my_column comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: reporting_tbl - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: reporting_group - data_type: data_type_identifier: VARCHAR - comment_clause: keyword: COMMENT quoted_literal: "'internal reporting group defined by DE team'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: rpt_enc_table - alter_table_table_column_action: keyword: ADD column_reference: naked_identifier: encounter_count data_type: data_type_identifier: INTEGER comment_clause: keyword: COMMENT quoted_literal: "'count of encounters past year'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: old_col_name - keyword: TO - column_reference: naked_identifier: new_col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_table_column_action: - keyword: alter - keyword: column - column_reference: naked_identifier: c1 - keyword: drop - keyword: not - keyword: 'null' - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_table_column_action: keyword: alter column_reference: naked_identifier: c5 comment_clause: keyword: comment quoted_literal: "'50 character column'" - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_table_column_action: - keyword: modify - column_reference: naked_identifier: c2 - keyword: drop - keyword: default - comma: ',' - column_reference: naked_identifier: c3 - keyword: set - keyword: default - naked_identifier: seq5 - dot: . - keyword: nextval - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: t1 - alter_table_table_column_action: - keyword: alter - column_reference: naked_identifier: c4 - keyword: set - keyword: data - keyword: type - data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '50' end_bracket: ) - comma: ',' - keyword: column - column_reference: naked_identifier: c4 - keyword: drop - keyword: default - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: xxxx - dot: . - naked_identifier: example_table - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: employeeCode - keyword: SET - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: example_MASKING_POLICY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: aschema - dot: . - naked_identifier: atable - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: acolumn - keyword: SET - keyword: MASKING - keyword: POLICY - function_name: - naked_identifier: adatabase - dot: . - naked_identifier: aschema - dot: . - function_name_identifier: apolicy - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: mask_empl_id - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: mask_empl_id - keyword: using - bracketed: start_bracket: ( column_reference: naked_identifier: empl_id comma: ',' expression: column_reference: naked_identifier: empl_id comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: mask_empl_id - comma: ',' - keyword: column - column_reference: naked_identifier: empl_dob - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: mask_empl_dob - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: unset - keyword: masking - keyword: policy - statement_terminator: ; - statement: alter_table_statement: - keyword: alter - keyword: table - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: modify - keyword: column - column_reference: naked_identifier: empl_id - keyword: unset - keyword: masking - keyword: policy - comma: ',' - keyword: column - column_reference: naked_identifier: empl_dob - keyword: unset - keyword: masking - keyword: policy - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: my_column - keyword: SET - keyword: TAG - tag_reference: naked_identifier: my_tag - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'tagged'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: my_column - keyword: UNSET - keyword: TAG - tag_reference: naked_identifier: my_tag - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: my_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: some_schema - dot: . - naked_identifier: empl_info - alter_table_table_column_action: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: my_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: column_1 - comma: ',' - column_reference: naked_identifier: column_2 - comma: ',' - column_reference: naked_identifier: column_3 - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: my_table - alter_table_table_column_action: - keyword: ADD - keyword: COLUMN - column_reference: naked_identifier: my_column - data_type: data_type_identifier: INTEGER - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: my_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: empl_info - alter_table_table_column_action: keyword: DROP column_reference: naked_identifier: my_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: empl_info - alter_table_table_column_action: - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: old_col_name - keyword: TO - column_reference: naked_identifier: new_col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: drop - keyword: PRIMARY - keyword: KEY - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - alter_table_constraint_action: - keyword: add - keyword: PRIMARY - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: TABLE_ID end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_add_after.sql000066400000000000000000000000531451700765000266220ustar00rootroot00000000000000ALTER TASK my_task ADD AFTER another_task; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_add_after.yml000066400000000000000000000012251451700765000266260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9b12935d22e7a6412e3df3b71ccb165a44a8946c522db91ac3eeb48231443b43 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: ADD - keyword: AFTER - object_reference: naked_identifier: another_task statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_if_exists_resume.sql000066400000000000000000000000451451700765000302670ustar00rootroot00000000000000ALTER TASK IF EXISTS my_task RESUME; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_if_exists_resume.yml000066400000000000000000000011541451700765000302730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4148b620a6f5f504a0e18d9da6811e3c3245df22761f8d1eaa27facc74eff009 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: my_task - keyword: RESUME statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_modify_as.sql000066400000000000000000000000501451700765000266600ustar00rootroot00000000000000ALTER TASK my_task MODIFY AS SELECT 42; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_modify_as.yml000066400000000000000000000013421451700765000266670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48346285af95dc42042da4c534119a8c14f9d0c0142c7faef32a83723e84b751 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: MODIFY - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '42' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_modify_when.sql000066400000000000000000000000451451700765000272220ustar00rootroot00000000000000ALTER TASK my_task MODIFY WHEN TRUE; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_modify_when.yml000066400000000000000000000011661451700765000272310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e508027a2af0aa45d938ebd27a1f7422fadabb4d2c0782e3d116985cea689c5 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: MODIFY - keyword: WHEN - boolean_literal: 'TRUE' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_remove_after.sql000066400000000000000000000000561451700765000273720ustar00rootroot00000000000000ALTER TASK my_task REMOVE AFTER another_task; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_remove_after.yml000066400000000000000000000012301451700765000273670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cd3c00d8d446a5315310d8a6d29fbf2357e1a236f271ad803d8c8deacf06c2a9 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: REMOVE - keyword: AFTER - object_reference: naked_identifier: another_task statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_resume.sql000066400000000000000000000000331451700765000262070ustar00rootroot00000000000000ALTER TASK my_task RESUME; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_resume.yml000066400000000000000000000011041451700765000262110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43448d972ffbbfd89bcd826bfca0caebfe76ba3fc7aecbd664b67bc46cc46dbf file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: RESUME statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_set_full.sql000066400000000000000000000000611451700765000265250ustar00rootroot00000000000000ALTER TASK my_task SET a = 'b', c = 1, d = TRUE; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_set_full.yml000066400000000000000000000017541451700765000265410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42fc7c652b12f6b81590456a053332797aaf08485fd4787029091946f0ddb213 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_set_clause: - keyword: SET - parameter: a - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'b'" - comma: ',' - parameter: c - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - parameter: d - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_set_simple.sql000066400000000000000000000000401451700765000270510ustar00rootroot00000000000000ALTER TASK my_task SET x = 'y'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_set_simple.yml000066400000000000000000000013271451700765000270640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fcfe8ff490b0f86d406320c94740e967a924033398f707c950c26d50adb3026f file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_set_clause: keyword: SET parameter: x comparison_operator: raw_comparison_operator: '=' quoted_literal: "'y'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_set_special_full.sql000066400000000000000000000001521451700765000302260ustar00rootroot00000000000000ALTER TASK my_task SET WAREHOUSE = my_warehouse SCHEDULE = '2 MINUTE' ALLOW_OVERLAPPING_EXECUTION = TRUE; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_set_special_full.yml000066400000000000000000000020361451700765000302330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b0dafb65360d9c10e9c56627e02ea35220e8528526ebed4bb9d3a47f8053c304 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_special_set_clause: - keyword: SET - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_warehouse - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2 MINUTE'" - keyword: ALLOW_OVERLAPPING_EXECUTION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_set_special_simple.sql000066400000000000000000000000611451700765000305540ustar00rootroot00000000000000ALTER TASK my_task SET WAREHOUSE = my_warehouse; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_set_special_simple.yml000066400000000000000000000014121451700765000305570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 45fecfe8f9c8243e07accfc63cd05cc8e58ab462c0025be81848ce5b5d8c9e30 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_special_set_clause: - keyword: SET - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_warehouse statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_suspend.sql000066400000000000000000000000341451700765000263710ustar00rootroot00000000000000ALTER TASK my_task SUSPEND; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_suspend.yml000066400000000000000000000011051451700765000263730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f6a07613214b28476af972d503b2f1e699bf9ad2329c4dd9ee843b966d16b085 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - keyword: SUSPEND statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_unset_full.sql000066400000000000000000000000421451700765000270670ustar00rootroot00000000000000ALTER TASK my_task UNSET a, b, c; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_unset_full.yml000066400000000000000000000013111451700765000270710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cdb12631349376612d04f1411ce356f215e8b42523d555865b7af18a680e7988 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_unset_clause: - keyword: UNSET - parameter: a - comma: ',' - parameter: b - comma: ',' - parameter: c statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_unset_simple.sql000066400000000000000000000000341451700765000274170ustar00rootroot00000000000000ALTER TASK my_task UNSET a; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_task_unset_simple.yml000066400000000000000000000011711451700765000274240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0ea1a555d52a62111d506c275aea597b599a1cb5766ab7eda4a7df3d7e8e41c0 file: statement: alter_task_statement: - keyword: ALTER - keyword: TASK - object_reference: naked_identifier: my_task - alter_task_unset_clause: keyword: UNSET parameter: a statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_abort_query.sql000066400000000000000000000000601451700765000272570ustar00rootroot00000000000000ALTER USER IF EXISTS my_user ABORT ALL QUERIES; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_abort_query.yml000066400000000000000000000012231451700765000272630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 61ed0eeed7391519e17038ab1675e29add5eba4ebb47f01865451d7e41c910c5 file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - keyword: IF - keyword: EXISTS - role_reference: naked_identifier: my_user - keyword: ABORT - keyword: ALL - keyword: QUERIES statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_delegate_auth.sql000066400000000000000000000001251451700765000275200ustar00rootroot00000000000000ALTER USER my_user REMOVE DELEGATED AUTHORIZATIONS FROM SECURITY INTEGRATION my_idp; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_delegate_auth.yml000066400000000000000000000013711451700765000275260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b44dc3e20f2ba9931251e2890b3682c088f5c4a5e638f6bede218a098066e9fd file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: REMOVE - keyword: DELEGATED - keyword: AUTHORIZATIONS - keyword: FROM - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_idp statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_delegate_auth_role.sql000066400000000000000000000001371451700765000305440ustar00rootroot00000000000000ALTER USER my_user ADD DELEGATED AUTHORIZATION OF ROLE my_role TO SECURITY INTEGRATION my_idp; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_delegate_auth_role.yml000066400000000000000000000015231451700765000305460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b03b27b8a541fdff08fbe7541b4affbc54268d993cf46918ca530489978e60eb file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: ADD - keyword: DELEGATED - keyword: AUTHORIZATION - keyword: OF - keyword: ROLE - object_reference: naked_identifier: my_role - keyword: TO - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_idp statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_remove_delegate_auth.sql000066400000000000000000000001441451700765000310760ustar00rootroot00000000000000ALTER USER my_user REMOVE DELEGATED AUTHORIZATION OF ROLE my_role FROM SECURITY INTEGRATION my_idp; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_remove_delegate_auth.yml000066400000000000000000000015301451700765000311000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df2710890f25e3dcc3c48047b4cbe87720cfa2e28d5593b350149d87f7c44801 file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: REMOVE - keyword: DELEGATED - keyword: AUTHORIZATION - keyword: OF - keyword: ROLE - object_reference: naked_identifier: my_role - keyword: FROM - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_idp statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_rename.sql000066400000000000000000000000631451700765000261750ustar00rootroot00000000000000ALTER USER IF EXISTS my_user RENAME TO "new_name"; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_rename.yml000066400000000000000000000012741451700765000262040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f4de6567d814a83219145a5aecb5df2d061cd8cc3df92ae8dcf082b3bfcdde42 file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - keyword: IF - keyword: EXISTS - role_reference: naked_identifier: my_user - keyword: RENAME - keyword: TO - object_reference: quoted_identifier: '"new_name"' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_reset_password.sql000066400000000000000000000000431451700765000277700ustar00rootroot00000000000000ALTER USER my_user RESET PASSWORD; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_reset_password.yml000066400000000000000000000011311451700765000277710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 59a47d9eb3ce59750ffb3e13a913c9cfe476b0c2c9b3f817557c15542ec40f5f file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: RESET - keyword: PASSWORD statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_set_values.sql000066400000000000000000000001061451700765000270760ustar00rootroot00000000000000ALTER USER my_user SET password = 'abc123', DEFAULT_ROLE = user_role; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_set_values.yml000066400000000000000000000015451451700765000271100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 76a544b5e2171a7f7b36125275635082014c8b6e883fec398e9ae091664ec4a2 file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: SET - parameter: password - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'abc123'" - comma: ',' - parameter: DEFAULT_ROLE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user_role statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_unset_values.sql000066400000000000000000000001021451700765000274350ustar00rootroot00000000000000ALTER USER my_user unset USE_CACHED_RESULT, must_change_password; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_user_unset_values.yml000066400000000000000000000012331451700765000274450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f94d2bdd6cb9b528654db6db7089ccddaf9cf20433b8060906ebdaa76da603a file: statement: alter_user_statement: - keyword: ALTER - keyword: USER - role_reference: naked_identifier: my_user - keyword: unset - parameter: USE_CACHED_RESULT - comma: ',' - parameter: must_change_password statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_view.sql000066400000000000000000000015341451700765000246460ustar00rootroot00000000000000alter view view1 rename to view2; alter view view1 set secure; alter view view1 unset secure; -- single column alter view user_info_v modify column ssn_number set masking policy ssn_mask_v; -- multiple columns alter view user_info_v modify column ssn_number set masking policy ssn_mask_v , column dob set masking policy dob_mask_v ; -- single column alter view user_info_v modify column ssn_number unset masking policy; -- multiple columns alter view user_info_v modify column ssn_number unset masking policy , column dob unset masking policy ; alter view v1 add row access policy rap_v1 on (empl_id); alter view v1 drop row access policy rap_v1; alter view v1 drop row access policy rap_v1_version_1, add row access policy rap_v1_version_2 on (empl_id); alter view v1 modify column foo set masking policy my.scoped.policy; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_view.yml000066400000000000000000000112471451700765000246520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a420dc4999c43a235f958ae912f431afd817a6b0dd44737ccf8f002256e62eeb file: - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: view1 - keyword: rename - keyword: to - table_reference: naked_identifier: view2 - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: view1 - keyword: set - keyword: secure - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: view1 - keyword: unset - keyword: secure - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: user_info_v - keyword: modify - keyword: column - column_reference: naked_identifier: ssn_number - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: ssn_mask_v - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: user_info_v - keyword: modify - keyword: column - column_reference: naked_identifier: ssn_number - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: ssn_mask_v - comma: ',' - keyword: column - column_reference: naked_identifier: dob - keyword: set - keyword: masking - keyword: policy - function_name: function_name_identifier: dob_mask_v - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: user_info_v - keyword: modify - keyword: column - column_reference: naked_identifier: ssn_number - keyword: unset - keyword: masking - keyword: policy - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: user_info_v - keyword: modify - keyword: column - column_reference: naked_identifier: ssn_number - keyword: unset - keyword: masking - keyword: policy - comma: ',' - keyword: column - column_reference: naked_identifier: dob - keyword: unset - keyword: masking - keyword: policy - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: v1 - keyword: add - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1 - keyword: 'on' - bracketed: start_bracket: ( column_reference: naked_identifier: empl_id end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: v1 - keyword: drop - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1 - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: v1 - keyword: drop - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1_version_1 - comma: ',' - keyword: add - keyword: row - keyword: access - keyword: policy - function_name: function_name_identifier: rap_v1_version_2 - keyword: 'on' - bracketed: start_bracket: ( column_reference: naked_identifier: empl_id end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: alter - keyword: view - table_reference: naked_identifier: v1 - keyword: modify - keyword: column - column_reference: naked_identifier: foo - keyword: set - keyword: masking - keyword: policy - function_name: - naked_identifier: my - dot: . - naked_identifier: scoped - dot: . - function_name_identifier: policy - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_warehouse.sql000066400000000000000000000034621451700765000257000ustar00rootroot00000000000000alter warehouse if exists wh1 rename to wh2; alter warehouse my_wh set warehouse_size=medium; alter warehouse LOAD_WH set warehouse_size = XXLARGE; alter warehouse LOAD_WH set WAIT_FOR_COMPLETION = TRUE; alter warehouse LOAD_WH set MAX_CLUSTER_COUNT = 5; alter warehouse LOAD_WH set MIN_CLUSTER_COUNT = 1; alter warehouse LOAD_WH set SCALING_POLICY = STANDARD; alter warehouse LOAD_WH set SCALING_POLICY = 'STANDARD'; alter warehouse LOAD_WH set SCALING_POLICY = ECONOMY; alter warehouse LOAD_WH set SCALING_POLICY = 'ECONOMY'; alter warehouse LOAD_WH set AUTO_SUSPEND = 1; alter warehouse LOAD_WH set AUTO_RESUME = FALSE; alter warehouse LOAD_WH set RESOURCE_MONITOR = monitor_name; alter warehouse LOAD_WH set COMMENT = 'This is a comment'; alter warehouse LOAD_WH set MAX_CONCURRENCY_LEVEL = 1; alter warehouse LOAD_WH set STATEMENT_QUEUED_TIMEOUT_IN_SECONDS = 300; alter warehouse LOAD_WH set STATEMENT_TIMEOUT_IN_SECONDS = 300; alter warehouse LOAD_WH set TAG thetag = 'tag1'; alter warehouse LOAD_WH set TAG thetag1 = 'tag1', thetag2 = 'tag2'; alter warehouse LOAD_WH RESUME IF SUSPENDED; alter warehouse LOAD_WH ABORT ALL QUERIES; alter warehouse LOAD_WH RENAME TO LOAD_WH2; alter warehouse LOAD_WH SET MAX_CONCURRENCY_LEVEL = 1; alter warehouse LOAD_WH UNSET STATEMENT_QUEUED_TIMEOUT_IN_SECONDS; alter warehouse LOAD_WH UNSET WAREHOUSE_SIZE; alter warehouse LOAD_WH UNSET WAREHOUSE_SIZE, WAIT_FOR_COMPLETION; ALTER WAREHOUSE SET WAREHOUSE_SIZE='X-LARGE'; alter warehouse set warehouse_size=medium; alter warehouse LOAD_WH set WAREHOUSE_TYPE = STANDARD; alter warehouse LOAD_WH set WAREHOUSE_TYPE = 'SNOWPARK-OPTIMIZED'; ALTER WAREHOUSE IDENTIFIER($var_wh) SET WAREHOUSE_TYPE = STANDARD; ALTER WAREHOUSE CI_TRANSFORMING SET COMMENT = 'Warehouse for dbt development transformations in CI' , AUTO_RESUME = TRUE , AUTO_SUSPEND=30; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/alter_warehouse.yml000066400000000000000000000272321451700765000257030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4c934979dd5ee1b2407751137f6f8e93ca0c2e020a91674a8c4746ca4e2cb8fd file: - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - keyword: if - keyword: exists - object_reference: naked_identifier: wh1 - keyword: rename - keyword: to - object_reference: naked_identifier: wh2 - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: my_wh - keyword: set - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: medium - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: XXLARGE - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: WAIT_FOR_COMPLETION comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: MAX_CLUSTER_COUNT comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: MIN_CLUSTER_COUNT comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: SCALING_POLICY comparison_operator: raw_comparison_operator: '=' scaling_policy: STANDARD - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: SCALING_POLICY comparison_operator: raw_comparison_operator: '=' scaling_policy: "'STANDARD'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: SCALING_POLICY comparison_operator: raw_comparison_operator: '=' scaling_policy: ECONOMY - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: SCALING_POLICY comparison_operator: raw_comparison_operator: '=' scaling_policy: "'ECONOMY'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: AUTO_SUSPEND comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: AUTO_RESUME comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: RESOURCE_MONITOR comparison_operator: raw_comparison_operator: '=' naked_identifier: monitor_name - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'This is a comment'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: MAX_CONCURRENCY_LEVEL comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: STATEMENT_QUEUED_TIMEOUT_IN_SECONDS comparison_operator: raw_comparison_operator: '=' numeric_literal: '300' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: STATEMENT_TIMEOUT_IN_SECONDS comparison_operator: raw_comparison_operator: '=' numeric_literal: '300' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - tag_equals: keyword: TAG tag_reference: naked_identifier: thetag comparison_operator: raw_comparison_operator: '=' quoted_literal: "'tag1'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - tag_equals: - keyword: TAG - tag_reference: naked_identifier: thetag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'tag1'" - comma: ',' - tag_reference: naked_identifier: thetag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'tag2'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: RESUME - keyword: IF - keyword: SUSPENDED - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: ABORT - keyword: ALL - keyword: QUERIES - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: RENAME - keyword: TO - object_reference: naked_identifier: LOAD_WH2 - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: SET - warehouse_object_properties: keyword: MAX_CONCURRENCY_LEVEL comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: UNSET - naked_identifier: STATEMENT_QUEUED_TIMEOUT_IN_SECONDS - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: UNSET - naked_identifier: WAREHOUSE_SIZE - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: UNSET - naked_identifier: WAREHOUSE_SIZE - comma: ',' - naked_identifier: WAIT_FOR_COMPLETION - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: ALTER - keyword: WAREHOUSE - keyword: SET - warehouse_object_properties: keyword: WAREHOUSE_SIZE comparison_operator: raw_comparison_operator: '=' warehouse_size: "'X-LARGE'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - keyword: set - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: medium - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: WAREHOUSE_TYPE comparison_operator: raw_comparison_operator: '=' warehouse_size: STANDARD - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: alter - keyword: warehouse - object_reference: naked_identifier: LOAD_WH - keyword: set - warehouse_object_properties: keyword: WAREHOUSE_TYPE comparison_operator: raw_comparison_operator: '=' warehouse_size: "'SNOWPARK-OPTIMIZED'" - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: ALTER - keyword: WAREHOUSE - object_reference: keyword: IDENTIFIER bracketed: start_bracket: ( variable: $var_wh end_bracket: ) - keyword: SET - warehouse_object_properties: keyword: WAREHOUSE_TYPE comparison_operator: raw_comparison_operator: '=' warehouse_size: STANDARD - statement_terminator: ; - statement: alter_warehouse_statement: - keyword: ALTER - keyword: WAREHOUSE - object_reference: naked_identifier: CI_TRANSFORMING - keyword: SET - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Warehouse for dbt development transformations in CI'" - comma: ',' - warehouse_object_properties: keyword: AUTO_RESUME comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - comma: ',' - warehouse_object_properties: keyword: AUTO_SUSPEND comparison_operator: raw_comparison_operator: '=' numeric_literal: '30' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/bare_functions.sql000066400000000000000000000001611451700765000255010ustar00rootroot00000000000000SELECT CURRENT_TIMESTAMP , CURRENT_TIME , CURRENT_DATE , CURRENT_USER , LOCALTIME , LOCALTIMESTAMP ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/bare_functions.yml000066400000000000000000000017651451700765000255160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab003ddfbb797cb27d30cc9c29af599f03eeae608c276a8104bb77ed8b79fc78 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: CURRENT_TIMESTAMP - comma: ',' - select_clause_element: bare_function: CURRENT_TIME - comma: ',' - select_clause_element: bare_function: CURRENT_DATE - comma: ',' - select_clause_element: bare_function: CURRENT_USER - comma: ',' - select_clause_element: bare_function: LOCALTIME - comma: ',' - select_clause_element: bare_function: LOCALTIMESTAMP statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/begin_end.sql000066400000000000000000000001001451700765000244030ustar00rootroot00000000000000begin; select 1; select 2; begin; select 3; select 4; end; end; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/begin_end.yml000066400000000000000000000026121451700765000244170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f6916791b4ecddc4ace203e17b85a320cbabe527a66abe32ba71b53811c14cc5 file: - statement: transaction_statement: keyword: begin - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: transaction_statement: keyword: begin - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '4' - statement_terminator: ; - statement: scripting_block_statement: keyword: end - statement_terminator: ; - statement: scripting_block_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/call.sql000066400000000000000000000001441451700765000234140ustar00rootroot00000000000000CALL sv_proc1('Manitoba', 127.4); SET Variable1 = 49; CALL sv_proc2($Variable1); CALL sv_proc3(); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/call.yml000066400000000000000000000027371451700765000234300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 70acdf1145d808f6fd2a6e54858efba4729c1de5b3da2c9239c62e09a0d2e65a file: - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc1 bracketed: - start_bracket: ( - expression: quoted_literal: "'Manitoba'" - comma: ',' - expression: numeric_literal: '127.4' - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: Variable1 comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '49' - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc2 bracketed: start_bracket: ( expression: variable: $Variable1 end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc3 bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/call_statement.sql000066400000000000000000000001721451700765000255010ustar00rootroot00000000000000CALL MyStoredProcedure(CURRENT_ROLE()); CALL sv_proc1('Manitoba', 127.4); SET Variable1 = 49; CALL sv_proc2($Variable1); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/call_statement.yml000066400000000000000000000033011451700765000255000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f91e6eab07bce733772f80cd2f1803bb3590c44416987b08609627f2e4153167 file: - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: MyStoredProcedure bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: CURRENT_ROLE bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc1 bracketed: - start_bracket: ( - expression: quoted_literal: "'Manitoba'" - comma: ',' - expression: numeric_literal: '127.4' - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: Variable1 comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '49' - statement_terminator: ; - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: sv_proc2 bracketed: start_bracket: ( expression: variable: $Variable1 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/cast_datatype_accessor.sql000066400000000000000000000002421451700765000272070ustar00rootroot00000000000000SELECT bar::array[0] AS channel , foo:bar::array[2] AS channel2 , bar::array[0][1] AS channel3 , raw:foo::array[0]::string AS channel4 FROM my_table; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/cast_datatype_accessor.yml000066400000000000000000000062451451700765000272220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b4d0dd3efafa1a377726c160270be1d9204731518fd317a69985974fa30ac1b8 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: bar casting_operator: '::' data_type: data_type_identifier: array array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: channel - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: foo semi_structured_expression: colon: ':' semi_structured_element: bar casting_operator: '::' data_type: data_type_identifier: array array_accessor: start_square_bracket: '[' numeric_literal: '2' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: channel2 - comma: ',' - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: bar - casting_operator: '::' - data_type: data_type_identifier: array - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '1' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: channel3 - comma: ',' - select_clause_element: expression: cast_expression: - column_reference: naked_identifier: raw - semi_structured_expression: colon: ':' semi_structured_element: foo - casting_operator: '::' - data_type: data_type_identifier: array - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - casting_operator: '::' - data_type: data_type_identifier: string alias_expression: keyword: AS naked_identifier: channel4 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/changes_clause.sql000066400000000000000000000007611451700765000254520ustar00rootroot00000000000000select * from t1 changes(information => default) at(timestamp => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp); select * from t1 changes(information => append_only) at(offset => -60*5); select c1 from t1 changes(information => append_only) at(timestamp => 'Fri, 01 May 2015 16:20:00 -0700'::timestamp) end(timestamp => 'Fri, 05 May 2015 16:20:00 -0700'::timestamp); select * from t1 changes(information => default) before(statement => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/changes_clause.yml000066400000000000000000000115221451700765000254510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 71297b1cb80e4085221b6c2dba0cd9142e4a1eaa595ecd7645fa6fb1497b717a file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: default - end_bracket: ) - keyword: at - bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: cast_expression: quoted_literal: "'Fri, 01 May 2015 16:20:00 -0700'" casting_operator: '::' data_type: keyword: timestamp end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: append_only - end_bracket: ) - keyword: at - bracketed: start_bracket: ( keyword: offset parameter_assigner: => expression: - numeric_literal: sign_indicator: '-' numeric_literal: '60' - binary_operator: '*' - numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: append_only - end_bracket: ) - keyword: at - bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: cast_expression: quoted_literal: "'Fri, 01 May 2015 16:20:00 -0700'" casting_operator: '::' data_type: keyword: timestamp end_bracket: ) - keyword: end - bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: cast_expression: quoted_literal: "'Fri, 05 May 2015 16:20:00 -0700'" casting_operator: '::' data_type: keyword: timestamp end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 changes_clause: - keyword: changes - bracketed: - start_bracket: ( - keyword: information - parameter_assigner: => - keyword: default - end_bracket: ) - keyword: before - bracketed: start_bracket: ( keyword: statement parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/col_position.sql000066400000000000000000000003711451700765000252040ustar00rootroot00000000000000-- In snowflake the column position is denoted with $n syntax (e.g. $1, $2) -- https://docs.snowflake.com/en/sql-reference/sql/select.html#parameters select $1 as type, $2 as price from (values ('toffee', 5), ('starburst', 8), ('flying_saucer', 1)) sqlfluff-2.3.5/test/fixtures/dialects/snowflake/col_position.yml000066400000000000000000000040531451700765000252070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 538a38560aeef8826155d2f8abc3753930e5ade91cffc97aac60f0a024a40c96 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: column_index_identifier_segment: $1 alias_expression: keyword: as naked_identifier: type - comma: ',' - select_clause_element: column_reference: column_index_identifier_segment: $2 alias_expression: keyword: as naked_identifier: price from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: quoted_literal: "'toffee'" - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'starburst'" - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'flying_saucer'" - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/snowflake/comment_statement.sql000066400000000000000000000020511451700765000262260ustar00rootroot00000000000000comment on column my_table.my_column is 'comment'; comment on table foo is 'comment'; comment on view foo is 'comment'; comment on schema foo is 'comment'; comment on database foo is 'comment'; comment on warehouse foo is 'comment'; comment on user foo is 'comment'; comment on stage foo is 'comment'; comment on function foo is 'comment'; comment on procedure foo is 'comment'; comment on sequence foo is 'comment'; comment on share foo is 'comment'; comment on pipe foo is 'comment'; comment on stream foo is 'comment'; comment on task foo is 'comment'; comment on network policy foo is 'comment'; comment on api integration foo is 'comment'; comment on notification integration foo is 'comment'; comment on security integration foo is 'comment'; comment on storage integration foo is 'comment'; comment on session policy foo is 'comment'; comment on external table foo is 'comment'; comment on materialized view foo is 'comment'; comment on masking policy foo is 'comment'; comment on row access policy foo is 'comment'; comment on file format foo is 'comment'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/comment_statement.yml000066400000000000000000000152221451700765000262340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6fd985f23a4d09c42eb03d69589b5522249b07828edb281805a559ceabdbdb7 file: - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: column - object_reference: - naked_identifier: my_table - dot: . - naked_identifier: my_column - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: table - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: view - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: schema - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: database - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: warehouse - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: user - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: stage - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: function - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: procedure - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: sequence - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: share - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: pipe - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: stream - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: task - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: network - keyword: policy - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: api - keyword: integration - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: notification - keyword: integration - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: security - keyword: integration - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: storage - keyword: integration - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: session - keyword: policy - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: external - keyword: table - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: materialized - keyword: view - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: masking - keyword: policy - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: row - keyword: access - keyword: policy - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; - statement: comment_statement: - keyword: comment - keyword: 'on' - keyword: file - keyword: format - object_reference: naked_identifier: foo - keyword: is - quoted_literal: "'comment'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/connect_by.sql000066400000000000000000000014041451700765000246240ustar00rootroot00000000000000select employee_id, manager_id, title from employees start with title = 'President' connect by manager_id = prior employee_id order by employee_id; select sys_connect_by_path(title, ' -> '), employee_id, manager_id, title from employees start with title = 'President' connect by manager_id = prior employee_id order by employee_id; select description, quantity, component_id, parent_component_id, sys_connect_by_path(component_id, ' -> ') as path from components start with component_id = 1 connect by parent_component_id = prior component_id order by path; select employee_id, manager_id, title, connect_by_root title as root_title from employees start with title = 'President' connect by manager_id = prior employee_id order by employee_id; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/connect_by.yml000066400000000000000000000161311451700765000246310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b2ea3dbc494b78abe541194309260d24ae0a2a77bf63f9da74da0b73cdb74e19 file: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: title from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees connectby_clause: - keyword: start - keyword: with - expression: column_reference: naked_identifier: title comparison_operator: raw_comparison_operator: '=' quoted_literal: "'President'" - keyword: connect - keyword: by - column_reference: naked_identifier: manager_id - comparison_operator: raw_comparison_operator: '=' - keyword: prior - column_reference: naked_identifier: employee_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: employee_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: sys_connect_by_path bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: title - comma: ',' - expression: quoted_literal: "' -> '" - end_bracket: ) - comma: ',' - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: title from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees connectby_clause: - keyword: start - keyword: with - expression: column_reference: naked_identifier: title comparison_operator: raw_comparison_operator: '=' quoted_literal: "'President'" - keyword: connect - keyword: by - column_reference: naked_identifier: manager_id - comparison_operator: raw_comparison_operator: '=' - keyword: prior - column_reference: naked_identifier: employee_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: employee_id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: description - comma: ',' - select_clause_element: column_reference: naked_identifier: quantity - comma: ',' - select_clause_element: column_reference: naked_identifier: component_id - comma: ',' - select_clause_element: column_reference: naked_identifier: parent_component_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sys_connect_by_path bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: component_id - comma: ',' - expression: quoted_literal: "' -> '" - end_bracket: ) alias_expression: keyword: as naked_identifier: path from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: components connectby_clause: - keyword: start - keyword: with - expression: column_reference: naked_identifier: component_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: connect - keyword: by - column_reference: naked_identifier: parent_component_id - comparison_operator: raw_comparison_operator: '=' - keyword: prior - column_reference: naked_identifier: component_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: path - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: employee_id - comma: ',' - select_clause_element: column_reference: naked_identifier: manager_id - comma: ',' - select_clause_element: column_reference: naked_identifier: title - comma: ',' - select_clause_element: keyword: connect_by_root column_reference: naked_identifier: title alias_expression: keyword: as naked_identifier: root_title from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees connectby_clause: - keyword: start - keyword: with - expression: column_reference: naked_identifier: title comparison_operator: raw_comparison_operator: '=' quoted_literal: "'President'" - keyword: connect - keyword: by - column_reference: naked_identifier: manager_id - comparison_operator: raw_comparison_operator: '=' - keyword: prior - column_reference: naked_identifier: employee_id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: employee_id - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location.sql000066400000000000000000000004051451700765000263740ustar00rootroot00000000000000COPY INTO '@public.dir/airflow-pipelines/' FROM "MODEL"."FCT_ROLLING_ACTIVE_USERS_L28" FILE_FORMAT = (TYPE = PARQUET) SINGLE = FALSE MAX_FILE_SIZE = 1000000000 INCLUDE_QUERY_ID = TRUE HEADER = TRUE sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location.yml000066400000000000000000000031201451700765000263730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 64f9c2c91035a486e08c134c1d31e91cd01c594c63aec0ab2e42a56689c6b655 file: statement: copy_into_location_statement: - keyword: COPY - keyword: INTO - storage_location: stage_path: "'@public.dir/airflow-pipelines/'" - keyword: FROM - table_reference: - quoted_identifier: '"MODEL"' - dot: . - quoted_identifier: '"FCT_ROLLING_ACTIVE_USERS_L28"' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( parquet_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: PARQUET end_bracket: ) - copy_options: - keyword: SINGLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: MAX_FILE_SIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1000000000' - keyword: INCLUDE_QUERY_ID - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: HEADER - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location_Amazon_SC3.sql000066400000000000000000000002561451700765000303550ustar00rootroot00000000000000copy into 's3://mybucket/unload/' from mytable credentials = (aws_key_id='xxxx' aws_secret_key='xxxxx' aws_token='xxxxxx') file_format = (format_name = my_csv_format); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location_Amazon_SC3.yml000066400000000000000000000031341451700765000303550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2ed14ac70f702add0443d2abba59488f249b2440cce3ef1c7a852157bb9033dd file: statement: copy_into_location_statement: - keyword: copy - keyword: into - storage_location: bucket_path: "'s3://mybucket/unload/'" - keyword: from - table_reference: naked_identifier: mytable - stage_parameters: keyword: credentials comparison_operator: raw_comparison_operator: '=' bracketed: - start_bracket: ( - keyword: aws_key_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'xxxx'" - keyword: aws_secret_key - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'xxxxx'" - keyword: aws_token - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'xxxxxx'" - end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_csv_format end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location_Azure.sql000066400000000000000000000002551451700765000275450ustar00rootroot00000000000000copy into 'azure://myaccount.blob.core.windows.net/mycontainer/unload/' from mytable credentials=(azure_sas_token='xxxx') file_format = (format_name = my_csv_format); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location_Azure.yml000066400000000000000000000025571451700765000275560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11c23956a0c2ca1a9bedb96a614c9be3943e06e6da3e5dd604ad56e031512e35 file: statement: copy_into_location_statement: - keyword: copy - keyword: into - storage_location: bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/unload/'" - keyword: from - table_reference: naked_identifier: mytable - stage_parameters: keyword: credentials comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: azure_sas_token comparison_operator: raw_comparison_operator: '=' quoted_literal: "'xxxx'" end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_csv_format end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location_GoogleCloud.sql000066400000000000000000000002531451700765000306600ustar00rootroot00000000000000copy into 'azure://myaccount.blob.core.windows.net/mycontainer/unload/' from mytable credentials=(azure_sas_token='xxxx') file_format = (format_name = my_csv_format); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location_GoogleCloud.yml000066400000000000000000000025571451700765000306730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 11c23956a0c2ca1a9bedb96a614c9be3943e06e6da3e5dd604ad56e031512e35 file: statement: copy_into_location_statement: - keyword: copy - keyword: into - storage_location: bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/unload/'" - keyword: from - table_reference: naked_identifier: mytable - stage_parameters: keyword: credentials comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: azure_sas_token comparison_operator: raw_comparison_operator: '=' quoted_literal: "'xxxx'" end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_csv_format end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location_partitionby.sql000066400000000000000000000004211451700765000310160ustar00rootroot00000000000000copy into @%t1 from t1 partition by ('date=' || to_varchar(dt, 'YYYY-MM-DD') || '/hour=' || to_varchar(date_part(hour, ts))) -- Concatenate labels and column values to output meaningful filenames file_format = (type=parquet) max_file_size = 32000000 header=true; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_location_partitionby.yml000066400000000000000000000053471451700765000310340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 53a1d62a5ea066a8ab5f0eca78f91e43ad43c776489d6bf50e9bbf75b968ced6 file: statement: copy_into_location_statement: - keyword: copy - keyword: into - storage_location: stage_path: '@%t1' - keyword: from - table_reference: naked_identifier: t1 - partition_by_segment: - keyword: partition - keyword: by - bracketed: start_bracket: ( expression: - quoted_literal: "'date='" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: to_varchar bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: dt - comma: ',' - expression: quoted_literal: "'YYYY-MM-DD'" - end_bracket: ) - binary_operator: - pipe: '|' - pipe: '|' - quoted_literal: "'/hour='" - binary_operator: - pipe: '|' - pipe: '|' - function: function_name: function_name_identifier: to_varchar bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: date_part bracketed: start_bracket: ( date_part: hour comma: ',' expression: column_reference: naked_identifier: ts end_bracket: ) end_bracket: ) end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( parquet_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: parquet end_bracket: ) - copy_options: keyword: max_file_size comparison_operator: raw_comparison_operator: '=' numeric_literal: '32000000' - keyword: header - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_table.sql000066400000000000000000000026431451700765000256610ustar00rootroot00000000000000copy into mytable from @my_int_stage; copy into mytable from @my_int_stage file_format = (type = csv); copy into mytable from @my_int_stage file_format = (format_name = 'mycsv'); copy into mytable from @my_int_stage file_format = (type = 'CSV') pattern='.*/.*/.*[.]csv[.]gz'; copy into mytable from @my_int_stage file_format = (format_name = myformat) pattern='.*sales.*[.]csv'; copy into mytable from @my_int_stage file_format = (format_name = myformat) pattern=$my_var; copy into mytable; copy into mytable from @%mytable; copy into mytable from @~/data_files; copy into mytable from @mydb.myschema.mystage; copy into mytable from @mydatabase.myschema.%mytable; copy into mytable purge = true; copy into mytable validation_mode = 'RETURN_ERRORS'; copy into mytable validation_mode = 'RETURN_2_ROWS'; copy into mytable validation_mode = 'RETURN_3_ROWS'; COPY INTO target_table FROM ( SELECT $1 FROM @source_stage ); copy into mytable1 (column1) from 's3://bucket/source' file_format = (TYPE = JSON); copy into mytable1 from (select column1 from @ext.stage/path1) file_format = (TYPE = JSON); copy into mytable1 from 's3://bucket/source' file_format = (type=csv SKIP_HEADER=1); copy into mytable1 (column1) from @public.stage/sub-folder/myfile-1.csv file_format = (TYPE = JSON); copy into mytable1 (column1) from @public.stage/subfolder/ file_format = (TYPE = JSON); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/copy_into_table.yml000066400000000000000000000256571451700765000256750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c4d4e7e1e19b88fd5a9c7ea830ff7525d26b585df90d081acfbcf8a23b3a30f file: - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: csv end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' quoted_literal: "'mycsv'" end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: "'CSV'" end_bracket: ) - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*/.*/.*[.]csv[.]gz'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myformat end_bracket: ) - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*sales.*[.]csv'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@my_int_stage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: format_name comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myformat end_bracket: ) - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - variable: $my_var - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@%mytable' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@~/data_files' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@mydb.myschema.mystage' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: from - storage_location: stage_path: '@mydatabase.myschema.%mytable' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - copy_options: keyword: purge comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: validation_mode - comparison_operator: raw_comparison_operator: '=' - validation_mode_option: "'RETURN_ERRORS'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: validation_mode - comparison_operator: raw_comparison_operator: '=' - validation_mode_option: "'RETURN_2_ROWS'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable - keyword: validation_mode - comparison_operator: raw_comparison_operator: '=' - validation_mode_option: "'RETURN_3_ROWS'" - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: target_table - keyword: FROM - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: column_index_identifier_segment: $1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: stage_path: '@source_stage' end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: from - storage_location: bucket_path: "'s3://bucket/source'" - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - keyword: from - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: column1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@ext.stage/path1' end_bracket: ) - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - keyword: from - storage_location: bucket_path: "'s3://bucket/source'" - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: - keyword: type - comparison_operator: raw_comparison_operator: '=' - file_type: csv - keyword: SKIP_HEADER - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: from - storage_location: stage_path: '@public.stage/sub-folder/myfile-1.csv' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: mytable1 - bracketed: start_bracket: ( column_reference: naked_identifier: column1 end_bracket: ) - keyword: from - storage_location: stage_path: '@public.stage/subfolder/' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: JSON end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_clone.sql000066400000000000000000000007521451700765000251310ustar00rootroot00000000000000CREATE DATABASE mytestdb_clone CLONE mytestdb; CREATE SCHEMA mytestschema_clone CLONE testschema; CREATE TABLE orders_clone CLONE orders; CREATE SCHEMA mytestschema_clone_restore CLONE testschema BEFORE (TIMESTAMP => TO_TIMESTAMP(40*365*86400)); CREATE TABLE orders_clone_restore CLONE orders AT (TIMESTAMP => TO_TIMESTAMP_TZ('04/05/2013 01:02:03', 'mm/dd/yyyy hh24:mi:ss')); CREATE TABLE orders_clone_restore CLONE orders BEFORE (STATEMENT => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_clone.yml000066400000000000000000000066071451700765000251400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 549e4efbfe226096e1bc994ca038c5641128fd54e090debe72634e6436c9cd99 file: - statement: create_clone_statement: - keyword: CREATE - keyword: DATABASE - object_reference: naked_identifier: mytestdb_clone - keyword: CLONE - object_reference: naked_identifier: mytestdb - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: SCHEMA - object_reference: naked_identifier: mytestschema_clone - keyword: CLONE - object_reference: naked_identifier: testschema - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: orders_clone - keyword: CLONE - table_reference: naked_identifier: orders - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: SCHEMA - object_reference: naked_identifier: mytestschema_clone_restore - keyword: CLONE - object_reference: naked_identifier: testschema - from_before_expression: keyword: BEFORE bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: function: function_name: function_name_identifier: TO_TIMESTAMP bracketed: start_bracket: ( expression: - numeric_literal: '40' - binary_operator: '*' - numeric_literal: '365' - binary_operator: '*' - numeric_literal: '86400' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: orders_clone_restore - keyword: CLONE - object_reference: naked_identifier: orders - from_at_expression: keyword: AT bracketed: start_bracket: ( keyword: TIMESTAMP parameter_assigner: => expression: function: function_name: function_name_identifier: TO_TIMESTAMP_TZ bracketed: - start_bracket: ( - expression: quoted_literal: "'04/05/2013 01:02:03'" - comma: ',' - expression: quoted_literal: "'mm/dd/yyyy hh24:mi:ss'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_clone_statement: - keyword: CREATE - keyword: TABLE - object_reference: naked_identifier: orders_clone_restore - keyword: CLONE - object_reference: naked_identifier: orders - from_before_expression: keyword: BEFORE bracketed: start_bracket: ( keyword: STATEMENT parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_database.sql000066400000000000000000000002041451700765000255650ustar00rootroot00000000000000CREATE DATABASE MY_DATABASE; CREATE DATABASE IF NOT EXISTS MY_DATABASE; CREATE DATABASE MY_DATABASE FROM SHARE MY_ACCOUNT.MY_SHARE; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_database.yml000066400000000000000000000022021451700765000255670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 58ff3d3126b72210e4fdb67622ebebae1a87031847cfe9384b76d74645447bf6 file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: MY_DATABASE - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: MY_DATABASE - statement_terminator: ; - statement: create_database_from_share_statement: - keyword: CREATE - keyword: DATABASE - object_reference: naked_identifier: MY_DATABASE - keyword: FROM - keyword: SHARE - object_reference: - naked_identifier: MY_ACCOUNT - dot: . - naked_identifier: MY_SHARE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_external_function.sql000066400000000000000000000036471451700765000275660ustar00rootroot00000000000000CREATE OR REPLACE EXTERNAL FUNCTION LOCAL_ECHO(STRING_COL VARCHAR) RETURNS VARIANT API_INTEGRATION = DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 AS 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo' ; CREATE OR REPLACE EXTERNAL FUNCTION MY_SCHEMA.LOCAL_ECHO(STRING_COL VARCHAR) RETURNS VARIANT NOT NULL STRICT VOLATILE API_INTEGRATION = DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 HEADERS = ( 'volume-measure' = 'liters', 'distance-measure' = 'kilometers' ) CONTEXT_HEADERS = (CURRENT_TIMESTAMP) MAX_BATCH_ROWS = 50 COMPRESSION = NONE REQUEST_TRANSLATOR = UTILITY.SOME_FUNCTION RESPONSE_TRANSLATOR = UTILITY.SOME_FUNCTION AS 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo' ; CREATE OR REPLACE EXTERNAL FUNCTION MY_SCHEMA.LOCAL_ECHO(STRING_COL VARCHAR) RETURNS VARIANT NOT NULL RETURNS NULL ON NULL INPUT VOLATILE API_INTEGRATION = DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 HEADERS = ( 'volume-measure' = 'liters', 'distance-measure' = 'kilometers' ) CONTEXT_HEADERS = (CURRENT_TIMESTAMP) MAX_BATCH_ROWS = 50 COMPRESSION = NONE REQUEST_TRANSLATOR = UTILITY.SOME_FUNCTION RESPONSE_TRANSLATOR = UTILITY.SOME_FUNCTION AS 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo' ; CREATE OR REPLACE EXTERNAL FUNCTION MY_SCHEMA.LOCAL_ECHO(OBJ OBJECT) RETURNS VARCHAR NULL CALLED ON NULL INPUT IMMUTABLE COMMENT = 'SQLFluff rocks!' API_INTEGRATION = DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 HEADERS = ( 'volume-measure' = 'liters', 'distance-measure' = 'kilometers' ) CONTEXT_HEADERS = (CURRENT_TIMESTAMP) MAX_BATCH_ROWS = 50 COMPRESSION = NONE REQUEST_TRANSLATOR = UTILITY.SOME_FUNCTION RESPONSE_TRANSLATOR = UTILITY.SOME_FUNCTION AS 'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo' ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_external_function.yml000066400000000000000000000202561451700765000275630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 30cc5c94b78e31be517e738d44b5c3cd8bec96e130bf1cd285a66ce642433734 file: - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: function_name_identifier: LOCAL_ECHO - function_parameter_list: bracketed: start_bracket: ( parameter: STRING_COL data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARIANT - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 - keyword: AS - quoted_identifier: "'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'" - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: LOCAL_ECHO - function_parameter_list: bracketed: start_bracket: ( parameter: STRING_COL data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARIANT - keyword: NOT - keyword: 'NULL' - keyword: STRICT - keyword: VOLATILE - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 - keyword: HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_identifier: "'volume-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'liters'" - comma: ',' - quoted_identifier: "'distance-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'kilometers'" - end_bracket: ) - keyword: CONTEXT_HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: CURRENT_TIMESTAMP end_bracket: ) - keyword: MAX_BATCH_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '50' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: NONE - keyword: REQUEST_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: RESPONSE_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: AS - quoted_identifier: "'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'" - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: LOCAL_ECHO - function_parameter_list: bracketed: start_bracket: ( parameter: STRING_COL data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARIANT - keyword: NOT - keyword: 'NULL' - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: VOLATILE - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 - keyword: HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_identifier: "'volume-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'liters'" - comma: ',' - quoted_identifier: "'distance-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'kilometers'" - end_bracket: ) - keyword: CONTEXT_HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: CURRENT_TIMESTAMP end_bracket: ) - keyword: MAX_BATCH_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '50' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: NONE - keyword: REQUEST_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: RESPONSE_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: AS - quoted_identifier: "'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'" - statement_terminator: ; - statement: create_external_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: EXTERNAL - keyword: FUNCTION - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: LOCAL_ECHO - function_parameter_list: bracketed: start_bracket: ( parameter: OBJ data_type: data_type_identifier: OBJECT end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: 'NULL' - keyword: CALLED - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: IMMUTABLE - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'SQLFluff rocks!'" - keyword: API_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - naked_identifier: DEMONSTRATION_EXTERNAL_API_INTEGRATION_01 - keyword: HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_identifier: "'volume-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'liters'" - comma: ',' - quoted_identifier: "'distance-measure'" - comparison_operator: raw_comparison_operator: '=' - quoted_identifier: "'kilometers'" - end_bracket: ) - keyword: CONTEXT_HEADERS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: CURRENT_TIMESTAMP end_bracket: ) - keyword: MAX_BATCH_ROWS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '50' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: NONE - keyword: REQUEST_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: RESPONSE_TRANSLATOR - comparison_operator: raw_comparison_operator: '=' - function_name: naked_identifier: UTILITY dot: . function_name_identifier: SOME_FUNCTION - keyword: AS - quoted_identifier: "'https://xyz.execute-api.us-west-2.amazonaws.com/prod/remote_echo'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_external_table.sql000066400000000000000000000031301451700765000270130ustar00rootroot00000000000000create or replace external table ext_table with location = @mystage/path1/ file_format = (type = json) aws_sns_topic = 'arn:aws:sns:us-west-2:001234567890:s3_mybucket'; create or replace external table "_p08" with location=@carpe_datastore_commercial/p08 auto_refresh=true file_format = (type=parquet) pattern='.*[.]parquet.*'; CREATE EXTERNAL TABLE EXTERNAL_TABLES.TRIPS( tripduration integer as try_cast(VALUE:c1::varchar as integer) not null, starttime timestamp as try_cast(VALUE:c2::varchar as timestamp), stoptime timestamp as try_cast(VALUE:c3::varchar as timestamp), start_station_id integer as try_cast(VALUE:c4::varchar as integer) null, start_station_name varchar as (VALUE:c5::varchar), start_station_latitude float as try_cast(VALUE:c6::varchar as float), start_station_longitude float as try_cast(VALUE:c7::varchar as float), end_station_id integer as try_cast(VALUE:c8::varchar as integer), end_station_name varchar as (VALUE:c9::varchar), end_station_latitude float as try_cast(VALUE:c10::varchar as float), end_station_longitude float as try_cast(VALUE:c11::varchar as float), bikeid integer as try_cast(VALUE:c12::varchar as integer), membership_type varchar as (VALUE:c13::varchar), usertype varchar as (VALUE:c14::varchar), birth_year integer as try_cast(VALUE:c15::varchar as integer), gender integer as try_cast(VALUE:c16::varchar as integer), year integer as (substr(metadata$filename, 22, 4)::integer) ) PARTITION BY (year) LOCATION = @external_tables.citibike_trips FILE_FORMAT = ( TYPE = 'CSV' FIELD_OPTIONALLY_ENCLOSED_BY = '"' ); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_external_table.yml000066400000000000000000000400011451700765000270130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3fd9d3efb6620e03f0d42e0aac7b4cd42ebb5bbb57618ecf0729e9c12557f981 file: - statement: create_external_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: external - keyword: table - table_reference: naked_identifier: ext_table - keyword: with - keyword: location - comparison_operator: raw_comparison_operator: '=' - stage_path: '@mystage/path1/' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: json end_bracket: ) - keyword: aws_sns_topic - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:sns:us-west-2:001234567890:s3_mybucket'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: external - keyword: table - table_reference: quoted_identifier: '"_p08"' - keyword: with - keyword: location - comparison_operator: raw_comparison_operator: '=' - stage_path: '@carpe_datastore_commercial/p08' - keyword: auto_refresh - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( parquet_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: parquet end_bracket: ) - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*[.]parquet.*'" - statement_terminator: ; - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: EXTERNAL_TABLES - dot: . - naked_identifier: TRIPS - bracketed: - start_bracket: ( - naked_identifier: tripduration - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c1 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - keyword: not - keyword: 'null' - comma: ',' - naked_identifier: starttime - data_type: keyword: timestamp - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c2 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: keyword: timestamp end_bracket: ) - comma: ',' - naked_identifier: stoptime - data_type: keyword: timestamp - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c3 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: keyword: timestamp end_bracket: ) - comma: ',' - naked_identifier: start_station_id - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c4 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - keyword: 'null' - comma: ',' - naked_identifier: start_station_name - data_type: data_type_identifier: varchar - keyword: as - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c5 casting_operator: '::' data_type: data_type_identifier: varchar end_bracket: ) - comma: ',' - naked_identifier: start_station_latitude - data_type: data_type_identifier: float - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c6 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: float end_bracket: ) - comma: ',' - naked_identifier: start_station_longitude - data_type: data_type_identifier: float - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c7 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: float end_bracket: ) - comma: ',' - naked_identifier: end_station_id - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c8 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - comma: ',' - naked_identifier: end_station_name - data_type: data_type_identifier: varchar - keyword: as - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c9 casting_operator: '::' data_type: data_type_identifier: varchar end_bracket: ) - comma: ',' - naked_identifier: end_station_latitude - data_type: data_type_identifier: float - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c10 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: float end_bracket: ) - comma: ',' - naked_identifier: end_station_longitude - data_type: data_type_identifier: float - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c11 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: float end_bracket: ) - comma: ',' - naked_identifier: bikeid - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c12 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - comma: ',' - naked_identifier: membership_type - data_type: data_type_identifier: varchar - keyword: as - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c13 casting_operator: '::' data_type: data_type_identifier: varchar end_bracket: ) - comma: ',' - naked_identifier: usertype - data_type: data_type_identifier: varchar - keyword: as - bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c14 casting_operator: '::' data_type: data_type_identifier: varchar end_bracket: ) - comma: ',' - naked_identifier: birth_year - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c15 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - comma: ',' - naked_identifier: gender - data_type: data_type_identifier: integer - keyword: as - expression: function: function_name: function_name_identifier: try_cast bracketed: start_bracket: ( expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: c16 casting_operator: '::' data_type: data_type_identifier: varchar keyword: as data_type: data_type_identifier: integer end_bracket: ) - comma: ',' - naked_identifier: year - data_type: data_type_identifier: integer - keyword: as - bracketed: start_bracket: ( expression: cast_expression: function: function_name: function_name_identifier: substr bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: metadata$filename - comma: ',' - expression: numeric_literal: '22' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: integer end_bracket: ) - end_bracket: ) - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( naked_identifier: year end_bracket: ) - keyword: LOCATION - comparison_operator: raw_comparison_operator: '=' - stage_path: '@external_tables.citibike_trips' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: "'CSV'" - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\"'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_file_format.sql000066400000000000000000000100411451700765000263100ustar00rootroot00000000000000CREATE FILE FORMAT my_file_format TYPE = CSV ; CREATE OR REPLACE FILE FORMAT my_csv_format TYPE = CSV, COMPRESSION = AUTO, RECORD_DELIMITER = NONE, FIELD_DELIMITER = NONE, FILE_EXTENSION = 'foobar', SKIP_HEADER = 1, SKIP_BLANK_LINES = TRUE, DATE_FORMAT = AUTO, TIME_FORMAT = AUTO, TIMESTAMP_FORMAT = AUTO, BINARY_FORMAT = HEX, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar'), FIELD_OPTIONALLY_ENCLOSED_BY = NONE, ERROR_ON_COLUMN_COUNT_MISMATCH = TRUE, REPLACE_INVALID_CHARACTERS = TRUE, VALIDATE_UTF8 = TRUE, EMPTY_FIELD_AS_NULL = TRUE, SKIP_BYTE_ORDER_MARK = TRUE, ENCODING = UTF8 ; CREATE FILE FORMAT IF NOT EXISTS my_csv_format TYPE = CSV ESCAPE = '\\' FIELD_OPTIONALLY_ENCLOSED_BY = '\"' COMPRESSION = NONE FIELD_DELIMITER ='|' NULL_IF=() ; CREATE FILE FORMAT IF NOT EXISTS my_csv_format TYPE = CSV COMPRESSION = GZIP RECORD_DELIMITER = 'foo' FIELD_DELIMITER = 'bar' FILE_EXTENSION = 'foobar' SKIP_HEADER = 99 SKIP_BLANK_LINES = FALSE DATE_FORMAT = 'foo' TIME_FORMAT = 'bar' TIMESTAMP_FORMAT = 'foobar' BINARY_FORMAT = UTF8 TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') FIELD_OPTIONALLY_ENCLOSED_BY = 'foo' ERROR_ON_COLUMN_COUNT_MISMATCH = FALSE REPLACE_INVALID_CHARACTERS = FALSE VALIDATE_UTF8 = FALSE EMPTY_FIELD_AS_NULL = FALSE SKIP_BYTE_ORDER_MARK = FALSE ENCODING = 'foo' COMMENT = 'FOOBAR' ; CREATE OR REPLACE FILE FORMAT my_json_format TYPE = JSON, COMPRESSION = AUTO, DATE_FORMAT = AUTO, TIME_FORMAT = AUTO, TIMESTAMP_FORMAT = AUTO, BINARY_FORMAT = HEX, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar'), FILE_EXTENSION = 'foobar', ENABLE_OCTAL = TRUE, ALLOW_DUPLICATE = TRUE, STRIP_OUTER_ARRAY = TRUE, STRIP_NULL_VALUES = TRUE, REPLACE_INVALID_CHARACTERS = TRUE, IGNORE_UTF8_ERRORS = TRUE, SKIP_BYTE_ORDER_MARK = TRUE ; CREATE FILE FORMAT IF NOT EXISTS my_json_format TYPE = JSON COMPRESSION = GZIP DATE_FORMAT = 'foobar' TIME_FORMAT = 'foobar' TIMESTAMP_FORMAT = 'foobar' BINARY_FORMAT = BASE64 TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') FILE_EXTENSION = 'foobar' ENABLE_OCTAL = FALSE ALLOW_DUPLICATE = FALSE STRIP_OUTER_ARRAY = FALSE STRIP_NULL_VALUES = FALSE REPLACE_INVALID_CHARACTERS = TRUE IGNORE_UTF8_ERRORS = FALSE SKIP_BYTE_ORDER_MARK = FALSE COMMENT = 'FOOBAR' ; CREATE FILE FORMAT IF NOT EXISTS my_json_format TYPE = JSON NULL_IF = () ; CREATE OR REPLACE FILE FORMAT my_avro_format TYPE = AVRO, COMPRESSION = AUTO, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar') ; CREATE FILE FORMAT IF NOT EXISTS my_avro_format TYPE = AVRO COMPRESSION = 'GZIP' TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; CREATE OR REPLACE FILE FORMAT my_orc_format TYPE = ORC, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar') ; CREATE FILE FORMAT IF NOT EXISTS my_orc_format TYPE = ORC TRIM_SPACE = FALSE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; CREATE OR REPLACE FILE FORMAT my_parquet_format TYPE = PARQUET, COMPRESSION = SNAPPY, SNAPPY_COMPRESSION = TRUE, TRIM_SPACE = TRUE, NULL_IF = ('foo', 'bar') ; CREATE FILE FORMAT IF NOT EXISTS my_parquet_format TYPE = PARQUET COMPRESSION = AUTO SNAPPY_COMPRESSION = FALSE TRIM_SPACE = FALSE BINARY_AS_TEXT = TRUE NULL_IF = ('foo', 'bar') COMMENT = 'FOOBAR' ; CREATE OR REPLACE FILE FORMAT my_xml_format TYPE = XML, COMPRESSION = AUTO, IGNORE_UTF8_ERRORS = TRUE, PRESERVE_SPACE = TRUE, STRIP_OUTER_ELEMENT = TRUE, DISABLE_SNOWFLAKE_DATA = TRUE, DISABLE_AUTO_CONVERT = TRUE, SKIP_BYTE_ORDER_MARK = TRUE ; CREATE FILE FORMAT IF NOT EXISTS my_xml_format TYPE = XML COMPRESSION = GZIP IGNORE_UTF8_ERRORS = FALSE PRESERVE_SPACE = FALSE STRIP_OUTER_ELEMENT = FALSE DISABLE_SNOWFLAKE_DATA = FALSE DISABLE_AUTO_CONVERT = FALSE SKIP_BYTE_ORDER_MARK = FALSE COMMENT = 'FOOBAR' ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_file_format.yml000066400000000000000000000562671451700765000263360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fbe3353e07c57202acff14e3a5d21434fdf7ef8d76994a4e23d5e7440d4c73a6 file: - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_file_format - csv_file_format_type_parameters: keyword: TYPE comparison_operator: raw_comparison_operator: '=' file_type: CSV - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_csv_format - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: RECORD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: FIELD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - comma: ',' - keyword: SKIP_HEADER - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' - comma: ',' - keyword: SKIP_BLANK_LINES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: HEX - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - comma: ',' - keyword: ERROR_ON_COLUMN_COUNT_MISMATCH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: VALIDATE_UTF8 - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: EMPTY_FIELD_AS_NULL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - keyword: UTF8 - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_csv_format - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - keyword: ESCAPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\\\\'" - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\\\"'" - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: NONE - keyword: FIELD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'|'" - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_csv_format - csv_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: CSV - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - keyword: RECORD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - keyword: FIELD_DELIMITER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: SKIP_HEADER - comparison_operator: raw_comparison_operator: '=' - integer_literal: '99' - keyword: SKIP_BLANK_LINES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: UTF8 - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - keyword: FIELD_OPTIONALLY_ENCLOSED_BY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - keyword: ERROR_ON_COLUMN_COUNT_MISMATCH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: VALIDATE_UTF8 - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: EMPTY_FIELD_AS_NULL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_json_format - json_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: JSON - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - comma: ',' - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: HEX - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comma: ',' - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - comma: ',' - keyword: ENABLE_OCTAL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: ALLOW_DUPLICATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_OUTER_ARRAY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_NULL_VALUES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_json_format - json_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: JSON - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - keyword: DATE_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: TIME_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: TIMESTAMP_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: BINARY_FORMAT - comparison_operator: raw_comparison_operator: '=' - keyword: BASE64 - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - keyword: FILE_EXTENSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foobar'" - keyword: ENABLE_OCTAL - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: ALLOW_DUPLICATE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: STRIP_OUTER_ARRAY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: STRIP_NULL_VALUES - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: REPLACE_INVALID_CHARACTERS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_json_format - json_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: JSON - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_avro_format - avro_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: AVRO - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_avro_format - avro_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: AVRO - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: "'GZIP'" - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_orc_format - orc_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: ORC - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_orc_format - orc_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: ORC - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_parquet_format - parquet_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: PARQUET - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: SNAPPY - comma: ',' - keyword: SNAPPY_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_parquet_format - parquet_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: PARQUET - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - keyword: SNAPPY_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: TRIM_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: BINARY_AS_TEXT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: NULL_IF - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'foo'" - comma: ',' - quoted_literal: "'bar'" - end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_xml_format - xml_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: XML - comma: ',' - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: AUTO - comma: ',' - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: PRESERVE_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: STRIP_OUTER_ELEMENT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: DISABLE_SNOWFLAKE_DATA - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: DISABLE_AUTO_CONVERT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comma: ',' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - statement_terminator: ; - statement: create_file_format_segment: - keyword: CREATE - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: my_xml_format - xml_file_format_type_parameters: - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - file_type: XML - keyword: COMPRESSION - comparison_operator: raw_comparison_operator: '=' - compression_type: GZIP - keyword: IGNORE_UTF8_ERRORS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: PRESERVE_SPACE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: STRIP_OUTER_ELEMENT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DISABLE_SNOWFLAKE_DATA - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: DISABLE_AUTO_CONVERT - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SKIP_BYTE_ORDER_MARK - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'FOOBAR'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_function.sql000066400000000000000000000044351451700765000256600ustar00rootroot00000000000000CREATE FUNCTION pi_udf() RETURNS FLOAT AS '3.141592654::FLOAT'; CREATE FUNCTION simple_table_function () RETURNS TABLE (x INTEGER, y INTEGER) AS $$ SELECT 1, 2 UNION ALL SELECT 3, 4 $$; CREATE OR REPLACE FUNCTION get_countries_for_user ( id number ) RETURNS TABLE (country_code char, country_name varchar) RETURNS NULL ON NULL INPUT AS 'select distinct c.country_code, c.country_name from user_addresses a, countries c where a.user_id = id and c.country_code = a.country_code'; CREATE SECURE FUNCTION js_factorial(d double) RETURNS double IMMUTABLE LANGUAGE JAVASCRIPT STRICT AS ' if (D <= 0) { return 1; } else { var result = 1; for (var i = 2; i <= D; i++) { result = result * i; } return result; } '; CREATE FUNCTION IF NOT EXISTS simple_table_function () RETURNS TABLE (x INTEGER, y INTEGER) LANGUAGE SQL AS $$ SELECT 1, 2 UNION ALL SELECT 3, 4 $$; create function my_decrement_udf(i numeric(9, 0)) returns numeric language java imports = ('@~/my_decrement_udf_package_dir/my_decrement_udf_jar.jar') handler = 'my_decrement_udf_package.my_decrement_udf_class.my_decrement_udf_method' ; create or replace function echo_varchar(x varchar) returns varchar language java called on null input handler='TestFunc.echoVarchar' target_path='@~/testfunc.jar' as 'class TestFunc { public static String echoVarchar(String x) { return x; } }'; create or replace function py_udf() returns variant language python runtime_version = '3.8' packages = ('numpy','pandas','xgboost==1.5.0') handler = 'udf' as $$ import numpy as np import pandas as pd import xgboost as xgb def udf(): return [np.__version__, pd.__version__, xgb.__version__] $$; create or replace function dream(i int) returns variant language python runtime_version = '3.8' handler = 'sleepy.snore' imports = ('@my_stage/sleepy.py') ; create or replace function addone(i int) returns int language python runtime_version = '3.8' handler = 'addone_py' as $$ def addone_py(i): return i+1 $$; CREATE OR REPLACE FUNCTION echo_varchar(x VARCHAR) RETURNS VARCHAR LANGUAGE SCALA RUNTIME_VERSION = '2.12' HANDLER='Echo.echoVarchar' AS $$ class Echo { def echoVarchar(x : String): String = { return x } } $$; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_function.yml000066400000000000000000000243221451700765000256570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c6d5a25a9e969e736853457509d7391cef708fac8f582e55c7c83d85bd6fa5a1 file: - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: pi_udf - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: FLOAT - keyword: AS - udf_body: "'3.141592654::FLOAT'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name: function_name_identifier: simple_table_function - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INTEGER - end_bracket: ) - keyword: AS - udf_body: "$$\n SELECT 1, 2\n UNION ALL\n SELECT 3, 4\n $$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: get_countries_for_user - function_parameter_list: bracketed: start_bracket: ( parameter: id data_type: data_type_identifier: number end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: country_code data_type: data_type_identifier: char - comma: ',' - column_definition: naked_identifier: country_name data_type: data_type_identifier: varchar - end_bracket: ) - keyword: RETURNS - keyword: 'NULL' - keyword: 'ON' - keyword: 'NULL' - keyword: INPUT - keyword: AS - udf_body: "'select distinct c.country_code, c.country_name\n from user_addresses\ \ a, countries c\n where a.user_id = id\n and c.country_code = a.country_code'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: SECURE - keyword: FUNCTION - function_name: function_name_identifier: js_factorial - function_parameter_list: bracketed: start_bracket: ( parameter: d data_type: data_type_identifier: double end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: double - keyword: IMMUTABLE - keyword: LANGUAGE - keyword: JAVASCRIPT - keyword: STRICT - keyword: AS - udf_body: "'\n if (D <= 0) {\n return 1;\n } else {\n var result = 1;\n\ \ for (var i = 2; i <= D; i++) {\n result = result * i;\n }\n \ \ return result;\n }\n '" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - keyword: IF - keyword: NOT - keyword: EXISTS - function_name: function_name_identifier: simple_table_function - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: x data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: y data_type: data_type_identifier: INTEGER - end_bracket: ) - keyword: LANGUAGE - keyword: SQL - keyword: AS - udf_body: "$$\n SELECT 1, 2\n UNION ALL\n SELECT 3, 4\n $$" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: function - function_name: function_name_identifier: my_decrement_udf - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: data_type_identifier: numeric bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '9' - comma: ',' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - keyword: returns - data_type: data_type_identifier: numeric - keyword: language - keyword: java - keyword: imports - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'@~/my_decrement_udf_package_dir/my_decrement_udf_jar.jar'" end_bracket: ) - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_decrement_udf_package.my_decrement_udf_class.my_decrement_udf_method'" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: echo_varchar - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: varchar end_bracket: ) - keyword: returns - data_type: data_type_identifier: varchar - keyword: language - keyword: java - keyword: called - keyword: 'on' - keyword: 'null' - keyword: input - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'TestFunc.echoVarchar'" - keyword: target_path - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'@~/testfunc.jar'" - keyword: as - udf_body: "'class TestFunc {\n public static String echoVarchar(String x) {\n\ \ return x;\n }\n}'" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: py_udf - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: returns - data_type: data_type_identifier: variant - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: packages - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'numpy'" - comma: ',' - quoted_literal: "'pandas'" - comma: ',' - quoted_literal: "'xgboost==1.5.0'" - end_bracket: ) - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'udf'" - keyword: as - udf_body: "$$\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\n\ def udf():\n return [np.__version__, pd.__version__, xgb.__version__]\n\ $$" - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: dream - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: data_type_identifier: int end_bracket: ) - keyword: returns - data_type: data_type_identifier: variant - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'sleepy.snore'" - keyword: imports - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'@my_stage/sleepy.py'" end_bracket: ) - statement_terminator: ; - statement: create_function_statement: - keyword: create - keyword: or - keyword: replace - keyword: function - function_name: function_name_identifier: addone - function_parameter_list: bracketed: start_bracket: ( parameter: i data_type: data_type_identifier: int end_bracket: ) - keyword: returns - data_type: data_type_identifier: int - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'addone_py'" - keyword: as - udf_body: "$$\ndef addone_py(i):\n return i+1\n$$" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name: function_name_identifier: echo_varchar - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: VARCHAR end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: LANGUAGE - keyword: SCALA - keyword: RUNTIME_VERSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2.12'" - keyword: HANDLER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Echo.echoVarchar'" - keyword: AS - udf_body: "$$\nclass Echo {\n def echoVarchar(x : String): String = {\n \ \ return x\n }\n}\n$$" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_masking_policy.sql000066400000000000000000000037431451700765000270440ustar00rootroot00000000000000CREATE OR REPLACE MASKING POLICY XXXX.XX.example_MASKING_POLICY AS (val VARCHAR) RETURNS VARCHAR -> CASE WHEN is_role_in_session('SNOWFLAKE_PII') THEN val ELSE '*** masked ***' END COMMENT = 'Applied 2021-07-13T03:12:16+0000'; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else '*********' end; create or replace masking policy email_mask as (val string) returns string -> case when current_account() in ('') then val else '*********' end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() IN ('ANALYST') then val else NULL end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else '********' end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else sha2(val) -- return hash of the column value end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val when current_role() in ('SUPPORT') then regexp_replace(val,'.+\@','*****@') -- leave email domain unmasked else '********' end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('SUPPORT') then val else date_from_parts(0001, 01, 01)::timestamp_ntz -- returns 0001-01-01 00:00:00.000 end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else mask_udf(val) -- custom masking function end; create or replace masking policy email_mask as (val string) returns string -> case when current_role() in ('ANALYST') then val else object_insert(val, 'USER_IPADDRESS', '****', true) end; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_masking_policy.yml000066400000000000000000000407551451700765000270520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a5247086475ea136cf5c23fd44af0812085724d0d316825e5c96452695432526 file: - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: MASKING - keyword: POLICY - object_reference: - naked_identifier: XXXX - dot: . - naked_identifier: XX - dot: . - naked_identifier: example_MASKING_POLICY - keyword: AS - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: VARCHAR - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - function_assigner: -> - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: function: function_name: function_name_identifier: is_role_in_session bracketed: start_bracket: ( expression: quoted_literal: "'SNOWFLAKE_PII'" end_bracket: ) - keyword: THEN - expression: column_reference: naked_identifier: val - else_clause: keyword: ELSE expression: quoted_literal: "'*** masked ***'" - keyword: END - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Applied 2021-07-13T03:12:16+0000'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: quoted_literal: "'*********'" - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_account bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "''" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: quoted_literal: "'*********'" - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: IN bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: null_literal: 'NULL' - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: quoted_literal: "'********'" - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: function: function_name: function_name_identifier: sha2 bracketed: start_bracket: ( expression: column_reference: naked_identifier: val end_bracket: ) - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'SUPPORT'" end_bracket: ) - keyword: then - expression: function: function_name: function_name_identifier: regexp_replace bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - comma: ',' - expression: quoted_literal: "'.+\\@'" - comma: ',' - expression: quoted_literal: "'*****@'" - end_bracket: ) - else_clause: keyword: else expression: quoted_literal: "'********'" - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'SUPPORT'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: cast_expression: function: function_name: function_name_identifier: date_from_parts bracketed: - start_bracket: ( - expression: numeric_literal: '0001' - comma: ',' - expression: numeric_literal: '01' - comma: ',' - expression: numeric_literal: '01' - end_bracket: ) casting_operator: '::' data_type: data_type_identifier: timestamp_ntz - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: function: function_name: function_name_identifier: mask_udf bracketed: start_bracket: ( expression: column_reference: naked_identifier: val end_bracket: ) - keyword: end - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: masking - keyword: policy - object_reference: naked_identifier: email_mask - keyword: as - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - expression: column_reference: naked_identifier: string - end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - function_assigner: -> - expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: function: function_name: function_name_identifier: current_role bracketed: start_bracket: ( end_bracket: ) keyword: in bracketed: start_bracket: ( quoted_literal: "'ANALYST'" end_bracket: ) - keyword: then - expression: column_reference: naked_identifier: val - else_clause: keyword: else expression: function: function_name: function_name_identifier: object_insert bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: val - comma: ',' - expression: quoted_literal: "'USER_IPADDRESS'" - comma: ',' - expression: quoted_literal: "'****'" - comma: ',' - expression: boolean_literal: 'true' - end_bracket: ) - keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_network_policy.sql000066400000000000000000000004051451700765000270740ustar00rootroot00000000000000create network policy mypolicy1 allowed_ip_list=('192.168.1.0/24') blocked_ip_list=('192.168.1.99'); CREATE OR REPLACE NETWORK POLICY TEST_NW_POLICY ALLOWED_IP_LIST=('xx.xxx.xxx.xx/xx','xx.xxx.xxx.xx/xx') COMMENT='NW Policy' ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_network_policy.yml000066400000000000000000000031141451700765000270760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9315b4c172e0bf1ae67125034ad97c68730a35c856061ebdd6fe8a0fe9aaaaf7 file: - statement: create_statement: - keyword: create - keyword: network - keyword: policy - object_reference: naked_identifier: mypolicy1 - keyword: allowed_ip_list - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'192.168.1.0/24'" end_bracket: ) - keyword: blocked_ip_list - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'192.168.1.99'" end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: NETWORK - keyword: POLICY - object_reference: naked_identifier: TEST_NW_POLICY - keyword: ALLOWED_IP_LIST - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'xx.xxx.xxx.xx/xx'" - comma: ',' - quoted_literal: "'xx.xxx.xxx.xx/xx'" - end_bracket: ) - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'NW Policy'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_notification_integration.sql000066400000000000000000000023071451700765000311200ustar00rootroot00000000000000create or replace notification integration if not exists my_notification_int type = queue notification_provider = gcp_pubsub enabled = true gcp_pubsub_subscription_name = 'projects/project-1234/subscriptions/sub2'; create notification integration my_notification_int enabled = true type = queue notification_provider = azure_storage_queue azure_storage_queue_primary_uri = 'https://myqueue.queue.core.windows.net/mystoragequeue' azure_tenant_id = 'a123bcde-1234-5678-abc1-9abc12345678'; create notification integration my_notification_int enabled = true type = queue notification_provider = aws_sns direction = outbound aws_sns_topic_arn = 'arn:aws:sns:us-east-2:111122223333:sns_topic' aws_sns_role_arn = 'arn:aws:iam::111122223333:role/error_sns_role'; create notification integration my_notification_int type = queue direction = outbound notification_provider = gcp_pubsub enabled = true gcp_pubsub_topic_name = 'projects/sdm-prod/topics/mytopic'; create notification integration my_notification_int enabled = true type = queue notification_provider = azure_event_grid direction = outbound azure_event_grid_topic_endpoint = 'https://myaccount.region-1.eventgrid.azure.net/api/events' azure_tenant_id = 'mytenantid'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_notification_integration.yml000066400000000000000000000115611451700765000311240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f7b274e2ac94393f593c7e1f3db70fa7d46001636620255cd34b00d1a3fe98f6 file: - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: notification - keyword: integration - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: my_notification_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: gcp_pubsub - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: gcp_pubsub_subscription_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'projects/project-1234/subscriptions/sub2'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: notification - keyword: integration - object_reference: naked_identifier: my_notification_int - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: azure_storage_queue - keyword: azure_storage_queue_primary_uri - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://myqueue.queue.core.windows.net/mystoragequeue'" - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123bcde-1234-5678-abc1-9abc12345678'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: notification - keyword: integration - object_reference: naked_identifier: my_notification_int - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: aws_sns - keyword: direction - comparison_operator: raw_comparison_operator: '=' - keyword: outbound - keyword: aws_sns_topic_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:sns:us-east-2:111122223333:sns_topic'" - keyword: aws_sns_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::111122223333:role/error_sns_role'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: notification - keyword: integration - object_reference: naked_identifier: my_notification_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: direction - comparison_operator: raw_comparison_operator: '=' - keyword: outbound - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: gcp_pubsub - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: gcp_pubsub_topic_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'projects/sdm-prod/topics/mytopic'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: notification - keyword: integration - object_reference: naked_identifier: my_notification_int - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: queue - keyword: notification_provider - comparison_operator: raw_comparison_operator: '=' - keyword: azure_event_grid - keyword: direction - comparison_operator: raw_comparison_operator: '=' - keyword: outbound - keyword: azure_event_grid_topic_endpoint - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'https://myaccount.region-1.eventgrid.azure.net/api/events'" - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'mytenantid'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_pipe.sql000066400000000000000000000013361451700765000247650ustar00rootroot00000000000000create or replace pipe mypipe_s3 auto_ingest = true error_integration = my_error aws_sns_topic = 'arn:aws:blablabla..0:s3_mybucket' as copy into snowpipe_db.public.mytable from @snowpipe_db.public.mystage file_format = (type = 'JSON'); create or replace pipe test_pipe auto_ingest = true integration = notification_integration as copy into table_name ( column1, column2 ) from (select $1, current_timestamp() as column2 from @stage_name/folder); create or replace pipe test_pipe auto_ingest = true integration = 'notification_integration' as copy into table_name ( column1, column2 ) from (select $1, current_timestamp() as column2 from @stage_name/folder); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_pipe.yml000066400000000000000000000124371451700765000247730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c27bfb97b5ff0780411b77d50a4966bfcf7e0385ce5639ad4c0ae2c496b10f27 file: - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: pipe - object_reference: naked_identifier: mypipe_s3 - keyword: auto_ingest - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: error_integration - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_error - keyword: aws_sns_topic - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:blablabla..0:s3_mybucket'" - keyword: as - copy_into_table_statement: - keyword: copy - keyword: into - table_reference: - naked_identifier: snowpipe_db - dot: . - naked_identifier: public - dot: . - naked_identifier: mytable - keyword: from - storage_location: stage_path: '@snowpipe_db.public.mystage' - keyword: file_format - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( json_file_format_type_parameters: keyword: type comparison_operator: raw_comparison_operator: '=' file_type: "'JSON'" end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: pipe - object_reference: naked_identifier: test_pipe - keyword: auto_ingest - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: integration - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: notification_integration - keyword: as - copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - keyword: from - bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: column_index_identifier_segment: $1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: current_timestamp bracketed: start_bracket: ( end_bracket: ) alias_expression: keyword: as naked_identifier: column2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@stage_name/folder' end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: pipe - object_reference: naked_identifier: test_pipe - keyword: auto_ingest - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: integration - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'notification_integration'" - keyword: as - copy_into_table_statement: - keyword: copy - keyword: into - table_reference: naked_identifier: table_name - bracketed: - start_bracket: ( - column_reference: naked_identifier: column1 - comma: ',' - column_reference: naked_identifier: column2 - end_bracket: ) - keyword: from - bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: column_index_identifier_segment: $1 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: current_timestamp bracketed: start_bracket: ( end_bracket: ) alias_expression: keyword: as naked_identifier: column2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@stage_name/folder' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_procedure.sql000066400000000000000000000056621451700765000260260ustar00rootroot00000000000000create or replace procedure sp_pi() returns float not null language javascript as $$ return 3.1415926; $$ ; create or replace procedure stproc1(FLOAT_PARAM1 FLOAT) returns string language javascript strict execute as owner as $$ var sql_command = "INSERT INTO stproc_test_table1 (num_col1) VALUES (" + FLOAT_PARAM1 + ")"; try { snowflake.execute ( {sqlText: sql_command} ); return "Succeeded."; // Return a success/error indicator. } catch (err) { return "Failed: " + err; // Return a success/error indicator. } $$ ; CREATE OR REPLACE PROCEDURE public.test_procedure (test_table VARCHAR(), test_col VARCHAR()) RETURNS VARCHAR() LANGUAGE JAVASCRIPT AS $$ try { var sql_command = "ALTER TABLE " + test_table + " DROP " + tet_col; snowflake.execute ({sqlText: sql_command}); return "Succeeded."; } catch (err) { return "Failed: execute "+ sql_command +". Error : "+ err; // Return a success/error indicator. } $$ ; CREATE OR REPLACE PROCEDURE UTIL_DB.PUBLIC.PROCEDURE_WITHOUT_EXPLICIT_LANGUAGE() RETURNS INT AS $$ BEGIN RETURN 1; END $$; CREATE OR REPLACE PROCEDURE UTIL_DB.PUBLIC.PROCEDURE_LANGUAGE_SQL() RETURNS INT LANGUAGE SQL AS $$ BEGIN RETURN 1; END $$; create or replace procedure UTIL_DB.PUBLIC.PROCEDURE_LANGUAGE_PYTHON() returns variant language python runtime_version = '3.8' packages = ('numpy','pandas','xgboost==1.5.0') handler = 'udf' comment = 'hello_world' as $$ import numpy as np import pandas as pd import xgboost as xgb def udf(): return [np.__version__, pd.__version__, xgb.__version__] $$; create or replace procedure UTIL_DB.PUBLIC.PROCEDURE_LANGUAGE_JAVA(x varchar) returns varchar language java called on null input handler='TestFunc.echoVarchar' target_path='@~/testfunc.jar' as 'class TestFunc { public static String echoVarchar(String x) { return x; } }'; CREATE OR REPLACE PROCEDURE filter_by_role(table_name VARCHAR, role VARCHAR) RETURNS INT --TABLE() LANGUAGE SCALA RUNTIME_VERSION = '2.12' PACKAGES = ('com.snowflake:snowpark:latest') HANDLER = 'Filter.filterByRole' AS $$ import com.snowflake.snowpark.functions._ import com.snowflake.snowpark._ object Filter { def filterByRole(session: Session, tableName: String, role: String): DataFrame = { val table = session.table(tableName) val filteredRows = table.filter(col("role") === role) return filteredRows } } $$; CREATE OR REPLACE PROCEDURE myprocedure( "Id" NUMBER(38,0) ) RETURNS VARCHAR LANGUAGE SQL AS $$ -- Snowflake Scripting code DECLARE radius_of_circle FLOAT; area_of_circle FLOAT; BEGIN radius_of_circle := 3; area_of_circle := pi() * radius_of_circle * radius_of_circle; RETURN area_of_circle; END; $$ ; CREATE OR REPLACE PROCEDURE MY_PROCEDURE( "Id" NUMBER(38,0) ) RETURNS VARCHAR LANGUAGE SQL AS BEGIN select 1; select 2; select 3; select 4; return 5; END; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_procedure.yml000066400000000000000000000266051451700765000260300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e18d36bd3cb56aadfae7dff8b253d22092e599a678492d30024a71ccdfe03b30 file: - statement: create_procedure_statement: - keyword: create - keyword: or - keyword: replace - keyword: procedure - function_name: function_name_identifier: sp_pi - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: returns - data_type: data_type_identifier: float - keyword: not - keyword: 'null' - keyword: language - keyword: javascript - keyword: as - udf_body: "$$\n return 3.1415926;\n $$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: create - keyword: or - keyword: replace - keyword: procedure - function_name: function_name_identifier: stproc1 - function_parameter_list: bracketed: start_bracket: ( parameter: FLOAT_PARAM1 data_type: data_type_identifier: FLOAT end_bracket: ) - keyword: returns - data_type: data_type_identifier: string - keyword: language - keyword: javascript - keyword: strict - keyword: execute - keyword: as - keyword: owner - keyword: as - udf_body: "$$\n var sql_command =\n \"INSERT INTO stproc_test_table1\ \ (num_col1) VALUES (\" + FLOAT_PARAM1 + \")\";\n try {\n snowflake.execute\ \ (\n {sqlText: sql_command}\n );\n return \"\ Succeeded.\"; // Return a success/error indicator.\n }\n catch\ \ (err) {\n return \"Failed: \" + err; // Return a success/error\ \ indicator.\n }\n $$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: naked_identifier: public dot: . function_name_identifier: test_procedure - function_parameter_list: bracketed: - start_bracket: ( - parameter: test_table - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( end_bracket: ) - comma: ',' - parameter: test_col - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( end_bracket: ) - keyword: LANGUAGE - keyword: JAVASCRIPT - keyword: AS - udf_body: "$$\ntry {\n var sql_command = \"ALTER TABLE \" + test_table +\ \ \" DROP \" + tet_col;\n snowflake.execute ({sqlText: sql_command});\n\ \ return \"Succeeded.\";\n}\ncatch (err) {\n return \"Failed: execute\ \ \"+ sql_command +\". Error : \"+ err; // Return a success/error indicator.\n\ }\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_WITHOUT_EXPLICIT_LANGUAGE - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: AS - udf_body: "$$\nBEGIN\n RETURN 1;\nEND\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_LANGUAGE_SQL - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: LANGUAGE - keyword: SQL - keyword: AS - udf_body: "$$\nBEGIN\n RETURN 1;\nEND\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: create - keyword: or - keyword: replace - keyword: procedure - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_LANGUAGE_PYTHON - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: returns - data_type: data_type_identifier: variant - keyword: language - keyword: python - keyword: runtime_version - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'3.8'" - keyword: packages - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - quoted_literal: "'numpy'" - comma: ',' - quoted_literal: "'pandas'" - comma: ',' - quoted_literal: "'xgboost==1.5.0'" - end_bracket: ) - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'udf'" - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'hello_world'" - keyword: as - udf_body: "$$\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\n\ def udf():\n return [np.__version__, pd.__version__, xgb.__version__]\n\ $$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: create - keyword: or - keyword: replace - keyword: procedure - function_name: - naked_identifier: UTIL_DB - dot: . - naked_identifier: PUBLIC - dot: . - function_name_identifier: PROCEDURE_LANGUAGE_JAVA - function_parameter_list: bracketed: start_bracket: ( parameter: x data_type: data_type_identifier: varchar end_bracket: ) - keyword: returns - data_type: data_type_identifier: varchar - keyword: language - keyword: java - keyword: called - keyword: 'on' - keyword: 'null' - keyword: input - keyword: handler - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'TestFunc.echoVarchar'" - keyword: target_path - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'@~/testfunc.jar'" - keyword: as - udf_body: "'class TestFunc {\n public static String echoVarchar(String x) {\n\ \ return x;\n }\n}'" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: filter_by_role - function_parameter_list: bracketed: - start_bracket: ( - parameter: table_name - data_type: data_type_identifier: VARCHAR - comma: ',' - parameter: role - data_type: data_type_identifier: VARCHAR - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: LANGUAGE - keyword: SCALA - keyword: RUNTIME_VERSION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2.12'" - keyword: PACKAGES - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'com.snowflake:snowpark:latest'" end_bracket: ) - keyword: HANDLER - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Filter.filterByRole'" - keyword: AS - udf_body: "$$\nimport com.snowflake.snowpark.functions._\nimport com.snowflake.snowpark._\n\ \nobject Filter {\n def filterByRole(session: Session, tableName: String,\ \ role: String): DataFrame = {\n val table = session.table(tableName)\n\ \ val filteredRows = table.filter(col(\"role\") === role)\n \ \ return filteredRows\n }\n}\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: myprocedure - function_parameter_list: bracketed: start_bracket: ( parameter: '"Id"' data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: LANGUAGE - keyword: SQL - keyword: AS - udf_body: "$$\n-- Snowflake Scripting code\nDECLARE\nradius_of_circle FLOAT;\n\ area_of_circle FLOAT;\nBEGIN\nradius_of_circle := 3;\narea_of_circle := pi()\ \ * radius_of_circle * radius_of_circle;\nRETURN area_of_circle;\nEND;\n$$" - statement_terminator: ; - statement: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: PROCEDURE - function_name: function_name_identifier: MY_PROCEDURE - function_parameter_list: bracketed: start_bracket: ( parameter: '"Id"' data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: VARCHAR - keyword: LANGUAGE - keyword: SQL - keyword: AS - scripting_block_statement: keyword: BEGIN statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '4' - statement_terminator: ; - statement: return_statement: keyword: return expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_block_statement: keyword: END - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_resource_monitor.sql000066400000000000000000000013531451700765000274250ustar00rootroot00000000000000create resource monitor test; create or replace resource monitor limiter with credit_quota = 1; create or replace resource monitor test with frequency = monthly; create or replace resource monitor limiter with start_timestamp = immediately; create or replace resource monitor limiter with start_timestamp= '2038-01-19 03:14:07'; create or replace resource monitor limiter with credit_quota = 100 NOTIFY_USERS = (joe, "sara", "ashlee") start_timestamp = immediately end_timestamp = '2038-01-19 03:14:07' ; create or replace resource monitor limiter with credit_quota=5000 notify_users = (jdoe, "jane smith", "john doe") triggers on 75 percent do notify on 100 percent do suspend on 110 percent do suspend_immediate ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_resource_monitor.yml000066400000000000000000000110641451700765000274270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bf15810de6377ed14ef180cbb051a3f9449ec044d0f45b97932917f5388ae4eb file: - statement: create_statement: - keyword: create - keyword: resource - keyword: monitor - object_reference: naked_identifier: test - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: keyword: credit_quota comparison_operator: raw_comparison_operator: '=' integer_literal: '1' - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: test - keyword: with - resource_monitor_options: - keyword: frequency - comparison_operator: raw_comparison_operator: '=' - keyword: monthly - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: - keyword: start_timestamp - comparison_operator: raw_comparison_operator: '=' - keyword: immediately - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: keyword: start_timestamp comparison_operator: raw_comparison_operator: '=' quoted_literal: "'2038-01-19 03:14:07'" - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: - keyword: credit_quota - comparison_operator: raw_comparison_operator: '=' - integer_literal: '100' - keyword: NOTIFY_USERS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - object_reference: naked_identifier: joe - comma: ',' - object_reference: quoted_identifier: '"sara"' - comma: ',' - object_reference: quoted_identifier: '"ashlee"' - end_bracket: ) - keyword: start_timestamp - comparison_operator: raw_comparison_operator: '=' - keyword: immediately - keyword: end_timestamp - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2038-01-19 03:14:07'" - statement_terminator: ; - statement: create_resource_monitor_statement: - keyword: create - keyword: or - keyword: replace - keyword: resource - keyword: monitor - object_reference: naked_identifier: limiter - keyword: with - resource_monitor_options: - keyword: credit_quota - comparison_operator: raw_comparison_operator: '=' - integer_literal: '5000' - keyword: notify_users - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - object_reference: naked_identifier: jdoe - comma: ',' - object_reference: quoted_identifier: '"jane smith"' - comma: ',' - object_reference: quoted_identifier: '"john doe"' - end_bracket: ) - keyword: triggers - keyword: 'on' - integer_literal: '75' - keyword: percent - keyword: do - keyword: notify - keyword: 'on' - integer_literal: '100' - keyword: percent - keyword: do - keyword: suspend - keyword: 'on' - integer_literal: '110' - keyword: percent - keyword: do - keyword: suspend_immediate - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_role.sql000066400000000000000000000001721451700765000247660ustar00rootroot00000000000000CREATE ROLE MY_ROLE; CREATE ROLE "my_role"; CREATE OR REPLACE ROLE IF NOT EXISTS foo_role COMMENT = 'this is a fake role' sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_role.yml000066400000000000000000000021131451700765000247650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b9e46f73078146cac98637bc2868a0f0978302ce24dc58d36793a3baf4a30a9d file: - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: quoted_identifier: '"my_role"' - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: ROLE - keyword: IF - keyword: NOT - keyword: EXISTS - role_reference: naked_identifier: foo_role - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'this is a fake role'" sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_schema.sql000066400000000000000000000007731451700765000252740ustar00rootroot00000000000000create schema mytestschema_clone_restore clone testschema; create schema mytestdatabase1.mytestschema_clone_restore clone mytestdatabase2.testschema; create schema mytestschema_clone_restore clone testschema before (timestamp => to_timestamp(40*365*86400)); create schema mytestschema comment = 'My test schema.'; create schema mytestschema tag (tag1 = 'foo', tag2 = 'bar'); create schema mytestschema with managed access; create transient schema if not exists mytestschema default_ddl_collation = 'de_DE'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_schema.yml000066400000000000000000000071071451700765000252740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c1647483237fdfbd031edde7d795dda66cf0ba600d18a8c7917579a97f8e7daf file: - statement: create_clone_statement: - keyword: create - keyword: schema - object_reference: naked_identifier: mytestschema_clone_restore - keyword: clone - object_reference: naked_identifier: testschema - statement_terminator: ; - statement: create_clone_statement: - keyword: create - keyword: schema - object_reference: - naked_identifier: mytestdatabase1 - dot: . - naked_identifier: mytestschema_clone_restore - keyword: clone - object_reference: - naked_identifier: mytestdatabase2 - dot: . - naked_identifier: testschema - statement_terminator: ; - statement: create_clone_statement: - keyword: create - keyword: schema - object_reference: naked_identifier: mytestschema_clone_restore - keyword: clone - object_reference: naked_identifier: testschema - from_before_expression: keyword: before bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: function: function_name: function_name_identifier: to_timestamp bracketed: start_bracket: ( expression: - numeric_literal: '40' - binary_operator: '*' - numeric_literal: '365' - binary_operator: '*' - numeric_literal: '86400' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: schema - schema_reference: naked_identifier: mytestschema - schema_object_properties: comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'My test schema.'" - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: schema - schema_reference: naked_identifier: mytestschema - tag_bracketed_equals: keyword: tag bracketed: - start_bracket: ( - tag_reference: naked_identifier: tag1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'foo'" - comma: ',' - tag_reference: naked_identifier: tag2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - end_bracket: ) - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: schema - schema_reference: naked_identifier: mytestschema - keyword: with - keyword: managed - keyword: access - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: transient - keyword: schema - keyword: if - keyword: not - keyword: exists - schema_reference: naked_identifier: mytestschema - schema_object_properties: keyword: default_ddl_collation comparison_operator: raw_comparison_operator: '=' quoted_literal: "'de_DE'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_sequence.sql000066400000000000000000000004671451700765000256440ustar00rootroot00000000000000CREATE SEQUENCE seq; CREATE OR REPLACE SEQUENCE IF NOT EXISTS seq WITH START WITH = 2 INCREMENT BY = 15 ORDER COMMENT = 'this_a_beautiful_sequence'; CREATE OR REPLACE SEQUENCE IF NOT EXISTS seq START = 2 INCREMENT = 15 NOORDER; CREATE SEQUENCE seq START 2; CREATE SEQUENCE seq INCREMENT 2; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_sequence.yml000066400000000000000000000043621451700765000256440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6322eafcc127c45d98a95cf96ccc3ebb53d36248f5a873598221e370bf16c7c1 file: - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: SEQUENCE - keyword: IF - keyword: NOT - keyword: EXISTS - sequence_reference: naked_identifier: seq - keyword: WITH - keyword: START - keyword: WITH - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - keyword: INCREMENT - keyword: BY - comparison_operator: raw_comparison_operator: '=' - integer_literal: '15' - keyword: ORDER - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'this_a_beautiful_sequence'" - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: SEQUENCE - keyword: IF - keyword: NOT - keyword: EXISTS - sequence_reference: naked_identifier: seq - keyword: START - comparison_operator: raw_comparison_operator: '=' - integer_literal: '2' - keyword: INCREMENT - comparison_operator: raw_comparison_operator: '=' - integer_literal: '15' - keyword: NOORDER - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: START - integer_literal: '2' - statement_terminator: ; - statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: seq - keyword: INCREMENT - integer_literal: '2' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_stage.sql000066400000000000000000000053421451700765000251340ustar00rootroot00000000000000CREATE STAGE my_int_stage COPY_OPTIONS = (ON_ERROR='skip_file'); CREATE STAGE my_int_stage ENCRYPTION = (TYPE = 'SNOWFLAKE_SSE') COPY_OPTIONS = (ON_ERROR='skip_file'); CREATE TEMPORARY STAGE my_temp_int_stage; CREATE TEMPORARY STAGE my_int_stage FILE_FORMAT = my_csv_format; CREATE STAGE mystage DIRECTORY = (ENABLE = TRUE) FILE_FORMAT = myformat; CREATE STAGE my_ext_stage URL='s3://load/files/' STORAGE_INTEGRATION = myint; CREATE STAGE my_ext_stage URL='s3://load' STORAGE_INTEGRATION = myint; CREATE STAGE my_ext_stage URL='s3://load/' STORAGE_INTEGRATION = myint; CREATE STAGE my_ext_stage URL='s3://load/files' STORAGE_INTEGRATION = myint; CREATE STAGE my_ext_stage1 URL='s3://load/files/' CREDENTIALS=(AWS_KEY_ID='1a2b3c' AWS_SECRET_KEY='4x5y6z'); CREATE STAGE my_ext_stage2 URL='s3://load/encrypted_files/' CREDENTIALS=(AWS_KEY_ID='1a2b3c' AWS_SECRET_KEY='4x5y6z') ENCRYPTION=(MASTER_KEY = 'eSxX0jzYfIamtnBKOEOwq80Au6NbSgPH5r4BDDwOaO8='); CREATE STAGE my_ext_stage3 URL='s3://load/encrypted_files/' CREDENTIALS=(AWS_KEY_ID='1a2b3c' AWS_SECRET_KEY='4x5y6z') ENCRYPTION=(TYPE='AWS_SSE_KMS' KMS_KEY_ID = 'aws/key'); CREATE STAGE my_ext_stage3 URL='s3://load/encrypted_files/' CREDENTIALS=(AWS_ROLE='arn:aws:iam::001234567890:role/mysnowflakerole') ENCRYPTION=(TYPE='AWS_SSE_KMS' KMS_KEY_ID = 'aws/key'); CREATE STAGE mystage URL='s3://load/files/' STORAGE_INTEGRATION = my_storage_int DIRECTORY = ( ENABLE = true AUTO_REFRESH = true ); CREATE STAGE my_ext_stage URL='gcs://load/files/' STORAGE_INTEGRATION = myint; CREATE STAGE mystage URL='gcs://load/files/' STORAGE_INTEGRATION = my_storage_int DIRECTORY = ( ENABLE = true AUTO_REFRESH = true NOTIFICATION_INTEGRATION = 'MY_NOTIFICATION_INT' ); CREATE STAGE my_ext_stage URL='azure://myaccount.blob.core.windows.net/load/files/' STORAGE_INTEGRATION = myint; CREATE STAGE mystage URL='azure://myaccount.blob.core.windows.net/mycontainer/files/' CREDENTIALS=(AZURE_SAS_TOKEN='?sv=2016-05-31&ss=b&srt=sco&sp=rwdl&se=2018-06-27T10:05:50Z&st=2017-06-27T02:05:50Z&spr=https,http&sig=bgqQwoXwxzuD2GJfagRg7VOS8hzNr3QLT7rhS8OFRLQ%3D') ENCRYPTION=(TYPE='AZURE_CSE' MASTER_KEY = 'kPxX0jzYfIamtnJEUTHwq80Au6NbSgPH5r4BDDwOaO8=') FILE_FORMAT = my_csv_format; CREATE STAGE mystage URL='azure://myaccount.blob.core.windows.net/load/files/' STORAGE_INTEGRATION = my_storage_int DIRECTORY = ( ENABLE = true AUTO_REFRESH = true NOTIFICATION_INTEGRATION = 'MY_NOTIFICATION_INT' ); CREATE OR REPLACE STAGE foo.bar URL = 's3://foobar' STORAGE_INTEGRATION = foo FILE_FORMAT = foo.bar.baz ; CREATE OR REPLACE STAGE foo.bar URL = 's3://foobar' STORAGE_INTEGRATION = foo FILE_FORMAT = (FORMAT_NAME = foo.bar.baz) ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_stage.yml000066400000000000000000000401771451700765000251430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 624645bd6d3167a1ded47fab25130bf61c3bc682b56a9cd7651a92836f2802a0 file: - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_int_stage - keyword: COPY_OPTIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( copy_options: keyword: ON_ERROR comparison_operator: raw_comparison_operator: '=' copy_on_error_option: "'skip_file'" end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_int_stage - stage_parameters: keyword: ENCRYPTION comparison_operator: raw_comparison_operator: '=' bracketed: start_bracket: ( keyword: TYPE comparison_operator: raw_comparison_operator: '=' stage_encryption_option: "'SNOWFLAKE_SSE'" end_bracket: ) - keyword: COPY_OPTIONS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( copy_options: keyword: ON_ERROR comparison_operator: raw_comparison_operator: '=' copy_on_error_option: "'skip_file'" end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: STAGE - object_reference: naked_identifier: my_temp_int_stage - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: STAGE - object_reference: naked_identifier: my_int_stage - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: naked_identifier: my_csv_format - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: ENABLE comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' end_bracket: ) - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: naked_identifier: myformat - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/files'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage1 - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/files/'" - stage_parameters: keyword: CREDENTIALS comparison_operator: raw_comparison_operator: '=' bracketed: - start_bracket: ( - keyword: AWS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1a2b3c'" - keyword: AWS_SECRET_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'4x5y6z'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage2 - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/encrypted_files/'" - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: AWS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1a2b3c'" - keyword: AWS_SECRET_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'4x5y6z'" - end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: MASTER_KEY comparison_operator: raw_comparison_operator: '=' quoted_literal: "'eSxX0jzYfIamtnBKOEOwq80Au6NbSgPH5r4BDDwOaO8='" end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage3 - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/encrypted_files/'" - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: AWS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1a2b3c'" - keyword: AWS_SECRET_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'4x5y6z'" - end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AWS_SSE_KMS'" - keyword: KMS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'aws/key'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage3 - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/encrypted_files/'" - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: AWS_ROLE comparison_operator: raw_comparison_operator: '=' quoted_literal: "'arn:aws:iam::001234567890:role/mysnowflakerole'" end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AWS_SSE_KMS'" - keyword: KMS_KEY_ID - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'aws/key'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_storage_int - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: ENABLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'gcs://load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'gcs://load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_storage_int - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: ENABLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: NOTIFICATION_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MY_NOTIFICATION_INT'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: my_ext_stage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: myint - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/files/'" - stage_parameters: - keyword: CREDENTIALS - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: AZURE_SAS_TOKEN comparison_operator: raw_comparison_operator: '=' quoted_literal: "'?sv=2016-05-31&ss=b&srt=sco&sp=rwdl&se=2018-06-27T10:05:50Z&st=2017-06-27T02:05:50Z&spr=https,http&sig=bgqQwoXwxzuD2GJfagRg7VOS8hzNr3QLT7rhS8OFRLQ%3D'" end_bracket: ) - keyword: ENCRYPTION - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - stage_encryption_option: "'AZURE_CSE'" - keyword: MASTER_KEY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'kPxX0jzYfIamtnJEUTHwq80Au6NbSgPH5r4BDDwOaO8='" - end_bracket: ) - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: naked_identifier: my_csv_format - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: STAGE - object_reference: naked_identifier: mystage - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'azure://myaccount.blob.core.windows.net/load/files/'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: my_storage_int - keyword: DIRECTORY - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: ENABLE - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: AUTO_REFRESH - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: NOTIFICATION_INTEGRATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'MY_NOTIFICATION_INT'" - end_bracket: ) - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STAGE - object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://foobar'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: foo - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - dot: . - naked_identifier: baz - statement_terminator: ; - statement: create_stage_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STAGE - object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - keyword: URL - comparison_operator: raw_comparison_operator: '=' - bucket_path: "'s3://foobar'" - stage_parameters: keyword: STORAGE_INTEGRATION comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: foo - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - file_format_segment: bracketed: start_bracket: ( keyword: FORMAT_NAME comparison_operator: raw_comparison_operator: '=' object_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - dot: . - naked_identifier: baz end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_storage_integration.sql000066400000000000000000000050241451700765000300750ustar00rootroot00000000000000create storage integration s3_int type = external_stage storage_provider = s3 storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('s3://mybucket1/path1/', 's3://mybucket2/path2/'); create storage integration s3_int type = external_stage storage_provider = s3 storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('s3://mybucket1', 's3://mybucket2/'); create storage integration gcs_int type = external_stage storage_provider = gcs enabled = true storage_allowed_locations = ('gcs://mybucket1/path1/', 'gcs://mybucket2/path2/'); create storage integration azure_int type = external_stage storage_provider = azure enabled = true azure_tenant_id = '' storage_allowed_locations = ('azure://myaccount.blob.core.windows.net/mycontainer/path1/', 'azure://myaccount.blob.core.windows.net/mycontainer/path2/'); create or replace storage integration s3_int type = external_stage storage_provider = s3 storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('*') storage_blocked_locations = ('s3://mybucket3/path3/', 's3://mybucket4/path4/'); create or replace storage integration gcs_int type = external_stage storage_provider = gcs enabled = true storage_allowed_locations = ('*') storage_blocked_locations = ('gcs://mybucket3/path3/', 'gcs://mybucket4/path4/'); create or replace storage integration azure_int type = external_stage storage_provider = azure enabled = false azure_tenant_id = 'a123b4c5-1234-123a-a12b-1a23b45678c9' storage_allowed_locations = ('*') storage_blocked_locations = ('azure://myaccount.blob.core.windows.net/mycontainer/path3/', 'azure://myaccount.blob.core.windows.net/mycontainer/path4/'); create storage integration s3_int type = external_stage storage_provider = 's3' storage_aws_role_arn = 'arn:aws:iam::001234567890:role/myrole' enabled = true storage_allowed_locations = ('s3://mybucket1', 's3://mybucket2/'); create storage integration gcs_int type = external_stage storage_provider = 'gcs' enabled = true storage_allowed_locations = ('gcs://mybucket1/path1/', 'gcs://mybucket2/path2/'); create storage integration azure_int type = external_stage storage_provider = 'azure' enabled = true azure_tenant_id = '' storage_allowed_locations = ('azure://myaccount.blob.core.windows.net/mycontainer/path1/', 'azure://myaccount.blob.core.windows.net/mycontainer/path2/'); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_storage_integration.yml000066400000000000000000000246041451700765000301040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b4e1028d47f6cd4b5148ab06be5ea1d2ccc483c14245c6bfe23c55edee06cb1c file: - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: s3 - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket1/path1/'" - comma: ',' - bucket_path: "'s3://mybucket2/path2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: s3 - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket1'" - comma: ',' - bucket_path: "'s3://mybucket2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: gcs - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket1/path1/'" - comma: ',' - bucket_path: "'gcs://mybucket2/path2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: azure - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path1/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: s3 - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket3/path3/'" - comma: ',' - bucket_path: "'s3://mybucket4/path4/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: gcs - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket3/path3/'" - comma: ',' - bucket_path: "'gcs://mybucket4/path4/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - keyword: azure - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'a123b4c5-1234-123a-a12b-1a23b45678c9'" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_star: "'*'" end_bracket: ) - keyword: storage_blocked_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path3/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path4/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: s3_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'s3'" - keyword: storage_aws_role_arn - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'arn:aws:iam::001234567890:role/myrole'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'s3://mybucket1'" - comma: ',' - bucket_path: "'s3://mybucket2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: gcs_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'gcs'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'gcs://mybucket1/path1/'" - comma: ',' - bucket_path: "'gcs://mybucket2/path2/'" - end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: storage - keyword: integration - object_reference: naked_identifier: azure_int - keyword: type - comparison_operator: raw_comparison_operator: '=' - keyword: external_stage - keyword: storage_provider - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'azure'" - keyword: enabled - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - keyword: azure_tenant_id - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: storage_allowed_locations - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path1/'" - comma: ',' - bucket_path: "'azure://myaccount.blob.core.windows.net/mycontainer/path2/'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_stream.sql000066400000000000000000000021041451700765000253150ustar00rootroot00000000000000create stream new_stream on table table_name; create stream mystream on table mytable before (timestamp => to_timestamp(40*365*86400)); create stream mystream on table mytable at(offset => -60*5); create stream mystream on table mytable before(statement => '8e5d0ca9-005e-44e6-b858-a8f5b37c5726'); create stream new_stream on external table table_name; create stream new_stream on stage stage_name; create stream new_stream on view view_name; create stream new_stream clone source_stream; create or replace stream new_stream on table table_name; create stream if not exists new_stream on table table_name; CREATE OR REPLACE STREAM new_stream COPY GRANTS ON TABLE table_name APPEND_ONLY = TRUE SHOW_INITIAL_ROWS = TRUE COMMENT = 'amazing comment'; CREATE OR REPLACE STREAM new_stream ON EXTERNAL TABLE table_name INSERT_ONLY = TRUE COMMENT = 'amazing comment'; CREATE STREAM IF NOT EXISTS new_stream ON STAGE stage_name COMMENT = 'amazing comment'; CREATE STREAM IF NOT EXISTS new_stream ON VIEW view_name APPEND_ONLY = FALSE SHOW_INITIAL_ROWS = FALSE COMMENT = 'amazing comment'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_stream.yml000066400000000000000000000157631451700765000253360ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4231f1ad0afda72d3bf827f7d841e80a423dfec0a772844a467110f18f99cf4d file: - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: table - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: mystream - keyword: 'on' - keyword: table - object_reference: naked_identifier: mytable - from_before_expression: keyword: before bracketed: start_bracket: ( keyword: timestamp parameter_assigner: => expression: function: function_name: function_name_identifier: to_timestamp bracketed: start_bracket: ( expression: - numeric_literal: '40' - binary_operator: '*' - numeric_literal: '365' - binary_operator: '*' - numeric_literal: '86400' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: mystream - keyword: 'on' - keyword: table - object_reference: naked_identifier: mytable - from_at_expression: keyword: at bracketed: start_bracket: ( keyword: offset parameter_assigner: => expression: - numeric_literal: sign_indicator: '-' numeric_literal: '60' - binary_operator: '*' - numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: mystream - keyword: 'on' - keyword: table - object_reference: naked_identifier: mytable - from_before_expression: keyword: before bracketed: start_bracket: ( keyword: statement parameter_assigner: => expression: quoted_literal: "'8e5d0ca9-005e-44e6-b858-a8f5b37c5726'" end_bracket: ) - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: external - keyword: table - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: stage - object_reference: naked_identifier: stage_name - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: view - object_reference: naked_identifier: view_name - statement_terminator: ; - statement: create_clone_statement: - keyword: create - keyword: stream - object_reference: naked_identifier: new_stream - keyword: clone - object_reference: naked_identifier: source_stream - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: or - keyword: replace - keyword: stream - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: table - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_stream_statement: - keyword: create - keyword: stream - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: new_stream - keyword: 'on' - keyword: table - object_reference: naked_identifier: table_name - statement_terminator: ; - statement: create_stream_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STREAM - object_reference: naked_identifier: new_stream - keyword: COPY - keyword: GRANTS - keyword: 'ON' - keyword: TABLE - object_reference: naked_identifier: table_name - keyword: APPEND_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - keyword: SHOW_INITIAL_ROWS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: create_stream_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: STREAM - object_reference: naked_identifier: new_stream - keyword: 'ON' - keyword: EXTERNAL - keyword: TABLE - object_reference: naked_identifier: table_name - keyword: INSERT_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: create_stream_statement: - keyword: CREATE - keyword: STREAM - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: new_stream - keyword: 'ON' - keyword: STAGE - object_reference: naked_identifier: stage_name - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; - statement: create_stream_statement: - keyword: CREATE - keyword: STREAM - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: new_stream - keyword: 'ON' - keyword: VIEW - object_reference: naked_identifier: view_name - keyword: APPEND_ONLY - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - keyword: SHOW_INITIAL_ROWS - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'FALSE' - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'amazing comment'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_table.sql000066400000000000000000000060711451700765000251200ustar00rootroot00000000000000create table if not exists "p08_base" as select VALUE:id::TEXT id from "_p08"; CREATE TABLE IF NOT EXISTS table_name ( col1 VARCHAR ); create table mytable (amount number); create table mytable (amount number) CLUSTER BY (amount); create table mytable (amount number) CLUSTER BY LINEAR(amount); create table mytable CLUSTER BY (amount) (amount number); create table mytable CLUSTER BY LINEAR(amount) (amount number); create table mytable_copy2 as select b+1 as c from mytable_copy; create table mytable_2 like mytable; create temporary table demo_temporary (i integer); create temp table demo_temp (i integer); create local temporary table demo_local_temporary (i integer); create local temp table demo_local_temp (i integer); create global temporary table demo_global_temporary (i integer); create global temp table demo_global_temp (i integer); create volatile table demo_volatile (i integer); create table example (col1 number comment 'a column comment') comment='a table comment'; create table testtable_summary (name, summary_amount) as select name, amount1 + amount2 from testtable; create table testtable_summary (barry char) as select name, amount1 + amount2 from testtable; create table testtable_summary as select name, amount1 + amount2 from testtable; create or replace table parquet_col ( custkey number default null, orderdate date default null, orderstatus varchar(100) default null, price varchar(255) ) as select $1:o_custkey::number, $1:o_orderdate::date, $1:o_orderstatus::text, $1:o_totalprice::text from @my_stage; create table collation_demo ( uncollated_phrase varchar, utf8_phrase varchar collate 'utf8', english_phrase varchar collate 'en', spanish_phrase varchar collate 'sp' ); create table mytable using template ( select array_agg(object_construct(*)) from table( infer_schema( location=>'@mystage', file_format=>'my_parquet_format' ) )); create table dollar_sign_table (foo$bar boolean); create table dollar_sign_schema.dollar_sign_table (foo$bar boolean); CREATE TABLE timestamp_column_default_value_demo ( timestamp_col1 TIMESTAMP_TZ NOT NULL DEFAULT CURRENT_TIMESTAMP, timestamp_col2 TIMESTAMP_TZ DEFAULT CURRENT_TIMESTAMP(), timestamp_col3 TIMESTAMP_TZ DEFAULT CURRENT_TIMESTAMP(2), sysdate_col4 TIMESTAMP_TZ DEFAULT SYSDATE() ); create table test_table (test_column NUMBER autoincrement (0, 1)); create table test_schema.test_table (test_column NUMBER autoincrement (0, 1)); create or replace table test_schema.test_table (test_column NUMBER autoincrement (0, 1)); create table test_schema.test_table (test_column INTEGER AUTOINCREMENT); CREATE TABLE test_table (test_column NUMBER WITH MASKING POLICY my_policy USING(test_column, test_column > 10)); CREATE OR REPLACE TABLE SCHEMA1.TABLE1 ( "COL1" varchar(128) NOT NULL, "COL2" varchar(128) NOT NULL ) CHANGE_TRACKING = TRUE WITH TAG ( account_objects.tags.IRM = '{"IRM":[{"Primary":"ABC123"}]}' ); CREATE TABLE my_table ( foo TIMESTAMP_NTZ DEFAULT CURRENT_TIMESTAMP::TIMESTAMP_NTZ ) ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_table.yml000066400000000000000000000653211451700765000251250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c2e97e0473403cc1af6e0ba8dd54cc56a3e1b5a66b1cde9ac79d160e53a5ef3f file: - statement: create_table_statement: - keyword: create - keyword: table - keyword: if - keyword: not - keyword: exists - table_reference: quoted_identifier: '"p08_base"' - keyword: as - select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: naked_identifier: VALUE semi_structured_expression: colon: ':' semi_structured_element: id casting_operator: '::' data_type: data_type_identifier: TEXT alias_expression: naked_identifier: id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '"_p08"' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_name - bracketed: start_bracket: ( column_definition: naked_identifier: col1 data_type: data_type_identifier: VARCHAR end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_definition: naked_identifier: amount data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_definition: naked_identifier: amount data_type: data_type_identifier: number end_bracket: ) - keyword: CLUSTER - keyword: BY - bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_definition: naked_identifier: amount data_type: data_type_identifier: number end_bracket: ) - keyword: CLUSTER - keyword: BY - function: function_name: function_name_identifier: LINEAR bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: mytable - keyword: CLUSTER - keyword: BY - bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - bracketed: start_bracket: ( column_definition: naked_identifier: amount data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: mytable - keyword: CLUSTER - keyword: BY - function: function_name: function_name_identifier: LINEAR bracketed: start_bracket: ( expression: column_reference: naked_identifier: amount end_bracket: ) - bracketed: start_bracket: ( column_definition: naked_identifier: amount data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: mytable_copy2 - keyword: as - select_statement: select_clause: keyword: select select_clause_element: expression: column_reference: naked_identifier: b binary_operator: + numeric_literal: '1' alias_expression: keyword: as naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable_copy - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: mytable_2 - keyword: like - table_reference: naked_identifier: mytable - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: temporary - keyword: table - table_reference: naked_identifier: demo_temporary - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: integer end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: temp - keyword: table - table_reference: naked_identifier: demo_temp - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: integer end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: local - keyword: temporary - keyword: table - table_reference: naked_identifier: demo_local_temporary - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: integer end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: local - keyword: temp - keyword: table - table_reference: naked_identifier: demo_local_temp - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: integer end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: global - keyword: temporary - keyword: table - table_reference: naked_identifier: demo_global_temporary - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: integer end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: global - keyword: temp - keyword: table - table_reference: naked_identifier: demo_global_temp - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: integer end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: volatile - keyword: table - table_reference: naked_identifier: demo_volatile - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: integer end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: example - bracketed: start_bracket: ( column_definition: naked_identifier: col1 data_type: data_type_identifier: number comment_clause: keyword: comment quoted_literal: "'a column comment'" end_bracket: ) - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'a table comment'" - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: testtable_summary - bracketed: - start_bracket: ( - naked_identifier: name - comma: ',' - naked_identifier: summary_amount - end_bracket: ) - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: amount1 - binary_operator: + - column_reference: naked_identifier: amount2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: testtable_summary - bracketed: start_bracket: ( column_definition: naked_identifier: barry data_type: data_type_identifier: char end_bracket: ) - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: amount1 - binary_operator: + - column_reference: naked_identifier: amount2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: testtable_summary - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: amount1 - binary_operator: + - column_reference: naked_identifier: amount2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: table - table_reference: naked_identifier: parquet_col - bracketed: - start_bracket: ( - column_definition: naked_identifier: custkey data_type: data_type_identifier: number column_constraint_segment: keyword: default expression: null_literal: 'null' - comma: ',' - column_definition: naked_identifier: orderdate data_type: data_type_identifier: date column_constraint_segment: keyword: default expression: null_literal: 'null' - comma: ',' - column_definition: naked_identifier: orderstatus data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: default expression: null_literal: 'null' - comma: ',' - column_definition: naked_identifier: price data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - end_bracket: ) - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: expression: cast_expression: column_reference: column_index_identifier_segment: $1 semi_structured_expression: colon: ':' semi_structured_element: o_custkey casting_operator: '::' data_type: data_type_identifier: number - comma: ',' - select_clause_element: expression: cast_expression: column_reference: column_index_identifier_segment: $1 semi_structured_expression: colon: ':' semi_structured_element: o_orderdate casting_operator: '::' data_type: data_type_identifier: date - comma: ',' - select_clause_element: expression: cast_expression: column_reference: column_index_identifier_segment: $1 semi_structured_expression: colon: ':' semi_structured_element: o_orderstatus casting_operator: '::' data_type: data_type_identifier: text - comma: ',' - select_clause_element: expression: cast_expression: column_reference: column_index_identifier_segment: $1 semi_structured_expression: colon: ':' semi_structured_element: o_totalprice casting_operator: '::' data_type: data_type_identifier: text from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@my_stage' - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: collation_demo - bracketed: - start_bracket: ( - column_definition: naked_identifier: uncollated_phrase data_type: data_type_identifier: varchar - comma: ',' - column_definition: naked_identifier: utf8_phrase data_type: data_type_identifier: varchar column_constraint_segment: keyword: collate collation_reference: quoted_literal: "'utf8'" - comma: ',' - column_definition: naked_identifier: english_phrase data_type: data_type_identifier: varchar column_constraint_segment: keyword: collate collation_reference: quoted_literal: "'en'" - comma: ',' - column_definition: naked_identifier: spanish_phrase data_type: data_type_identifier: varchar column_constraint_segment: keyword: collate collation_reference: quoted_literal: "'sp'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: mytable - keyword: using - keyword: template - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: array_agg bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: object_construct bracketed: start_bracket: ( star: '*' end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: table bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: infer_schema bracketed: - start_bracket: ( - snowflake_keyword_expression: parameter: location parameter_assigner: => quoted_literal: "'@mystage'" - comma: ',' - snowflake_keyword_expression: parameter: file_format parameter_assigner: => quoted_literal: "'my_parquet_format'" - end_bracket: ) end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: dollar_sign_table - bracketed: start_bracket: ( column_definition: naked_identifier: foo$bar data_type: data_type_identifier: boolean end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: dollar_sign_schema - dot: . - naked_identifier: dollar_sign_table - bracketed: start_bracket: ( column_definition: naked_identifier: foo$bar data_type: data_type_identifier: boolean end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: timestamp_column_default_value_demo - bracketed: - start_bracket: ( - column_definition: naked_identifier: timestamp_col1 data_type: data_type_identifier: TIMESTAMP_TZ column_constraint_segment: - keyword: NOT - keyword: 'NULL' - keyword: DEFAULT - expression: bare_function: CURRENT_TIMESTAMP - comma: ',' - column_definition: naked_identifier: timestamp_col2 data_type: data_type_identifier: TIMESTAMP_TZ column_constraint_segment: keyword: DEFAULT expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: naked_identifier: timestamp_col3 data_type: data_type_identifier: TIMESTAMP_TZ column_constraint_segment: keyword: DEFAULT expression: function: function_name: function_name_identifier: CURRENT_TIMESTAMP bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - column_definition: naked_identifier: sysdate_col4 data_type: data_type_identifier: TIMESTAMP_TZ column_constraint_segment: keyword: DEFAULT expression: function: function_name: function_name_identifier: SYSDATE bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: NUMBER column_constraint_segment: keyword: autoincrement bracketed: - start_bracket: ( - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: test_schema - dot: . - naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: NUMBER column_constraint_segment: keyword: autoincrement bracketed: - start_bracket: ( - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: table - table_reference: - naked_identifier: test_schema - dot: . - naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: NUMBER column_constraint_segment: keyword: autoincrement bracketed: - start_bracket: ( - numeric_literal: '0' - comma: ',' - numeric_literal: '1' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: test_schema - dot: . - naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: INTEGER column_constraint_segment: keyword: AUTOINCREMENT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: test_table - bracketed: start_bracket: ( column_definition: naked_identifier: test_column data_type: data_type_identifier: NUMBER column_constraint_segment: - keyword: WITH - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: my_policy - keyword: USING - bracketed: start_bracket: ( column_reference: naked_identifier: test_column comma: ',' expression: column_reference: naked_identifier: test_column comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: SCHEMA1 - dot: . - naked_identifier: TABLE1 - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '"COL1"' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '128' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '"COL2"' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '128' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - tag_bracketed_equals: - keyword: WITH - keyword: TAG - bracketed: start_bracket: ( tag_reference: - naked_identifier: account_objects - dot: . - naked_identifier: tags - dot: . - naked_identifier: IRM comparison_operator: raw_comparison_operator: '=' quoted_literal: "'{\"IRM\":[{\"Primary\":\"ABC123\"}]}'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: my_table - bracketed: start_bracket: ( column_definition: naked_identifier: foo data_type: data_type_identifier: TIMESTAMP_NTZ column_constraint_segment: keyword: DEFAULT expression: cast_expression: bare_function: CURRENT_TIMESTAMP casting_operator: '::' data_type: data_type_identifier: TIMESTAMP_NTZ end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_table_comments.sql000066400000000000000000000000651451700765000270220ustar00rootroot00000000000000 CREATE TABLE foo_table (bar INTEGER) COMMENT = '1'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_table_comments.yml000066400000000000000000000016061451700765000270260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 48c1bc6fe4a7a82dff45ccd61ff2cda7a04e1f20b279bdda730bf4875fc03ba5 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo_table - bracketed: start_bracket: ( column_definition: naked_identifier: bar data_type: data_type_identifier: INTEGER end_bracket: ) - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'1'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_table_with_partition_as_column_name.sql000066400000000000000000000001311451700765000332730ustar00rootroot00000000000000CREATE TABLE foo ( timestamp_col TIMESTAMP, date_col DATE, partition INTEGER ); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_table_with_partition_as_column_name.yml000066400000000000000000000020111451700765000332740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 817c71c35394d857f535d0605e556e64bc82e28c33f5edfe3262a640c54dfeca file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: timestamp_col data_type: keyword: TIMESTAMP - comma: ',' - column_definition: naked_identifier: date_col data_type: data_type_identifier: DATE - comma: ',' - column_definition: naked_identifier: partition data_type: data_type_identifier: INTEGER - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_tag.sql000066400000000000000000000003131451700765000245750ustar00rootroot00000000000000CREATE OR REPLACE TAG IF NOT EXISTS boo; CREATE TAG cost_center COMMENT = 'cost_center tag'; CREATE OR REPLACE TAG IF NOT EXISTS DATA_CLASSIFICATION ALLOWED_VALUES 'RESTRICTED', 'CONFIDENTIAL', 'PII'; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_tag.yml000066400000000000000000000025511451700765000246050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 88d5bd2ded5ad8524417efc8f9ec4266186eae436ee5498dabf35c9b969f4813 file: - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TAG - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: boo - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: TAG - object_reference: naked_identifier: cost_center - keyword: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'cost_center tag'" - statement_terminator: ; - statement: create_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TAG - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: DATA_CLASSIFICATION - keyword: ALLOWED_VALUES - quoted_literal: "'RESTRICTED'" - comma: ',' - quoted_literal: "'CONFIDENTIAL'" - comma: ',' - quoted_literal: "'PII'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_task.sql000066400000000000000000000045161451700765000247750ustar00rootroot00000000000000-- Examples from the documentation CREATE TASK t1 SCHEDULE = 'USING CRON 0 9-17 * * SUN America/Los_Angeles' TIMESTAMP_INPUT_FORMAT = 'YYYY-MM-DD HH24' USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE = 'XSMALL' AS INSERT INTO mytable(ts) VALUES(1); CREATE TASK mytask_hour WAREHOUSE = mywh SCHEDULE = 'USING CRON 0 9-17 * * SUN America/Los_Angeles' TIMESTAMP_INPUT_FORMAT = 'YYYY-MM-DD HH24' AS INSERT INTO mytable(ts) VALUES(1, 2, 3); -- All possible optional clauses CREATE OR REPLACE TASK IF NOT EXISTS t1 SCHEDULE = 'USING CRON 0 9-17 * * SUN America/Los_Angeles' ALLOW_OVERLAPPING_EXECUTION = TRUE TIMESTAMP_INPUT_FORMAT = 'YYYY-MM-DD HH24' USER_TASK_TIMEOUT_MS = 25 USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE = 'XSMALL' COPY GRANTS COMMENT = 'Hello world' AFTER dependency_task AS INSERT INTO mytable(ts) VALUES(1); -- Only mandatory clauses CREATE TASK t1 AS INSERT INTO mytable(ts) VALUES(1); -- Real life examples CREATE OR REPLACE TASK insert_session WAREHOUSE = eng_wh SCHEDULE = 'USING CRON 45 6 * * * UTC' AS INSERT INTO sch.s_session SELECT *, sum(break) OVER (PARTITION BY serial ORDER BY datetime) AS session_id FROM ( SELECT * FROM base_table ) ; CREATE OR REPLACE TASK update_session WAREHOUSE = eng_wh AFTER insert_session AS UPDATE sch.s_session SET lag_datetime = v.lag_datetime, row_number = v.row_number FROM ( SELECT *, ( sum(break) OVER (PARTITION BY serial ORDER BY datetime) ) AS session_id FROM ( SELECT * FROM derived_table ) ORDER BY serial, datetime ) AS v WHERE sch.s_session.event_id = v.event_id ; CREATE OR REPLACE TASK sch.truncate_session WAREHOUSE = eng_wh AFTER sch.update_session AS CALL sch.session_agg_insert(); CREATE OR REPLACE TASK insert__agg WAREHOUSE = eng_wh SCHEDULE = 'USING CRON 15 7 2 * * UTC' AS CALL auto_device_insert(); CREATE OR REPLACE TASK SCH.MY_TASK WAREHOUSE = MY_WH SCHEDULE = 'USING CRON 15 7 2 * * UTC' USER_TASK_TIMEOUT_MS = 10800000 WHEN SYSTEM$STREAM_HAS_DATA('SCH.MY_STREAM') AND 1=1 AS CALL SCH.MY_SPROC(); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_task.yml000066400000000000000000000415411451700765000247760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 840a52361e794d5324d9d7859afb34f2a8552f2f8f6038f77fe60cc7169f8316 file: - statement: create_task_statement: - keyword: CREATE - keyword: TASK - object_reference: naked_identifier: t1 - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 0 9-17 * * SUN America/Los_Angeles'" - parameter: TIMESTAMP_INPUT_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'YYYY-MM-DD HH24'" - keyword: USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'XSMALL'" - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: TASK - object_reference: naked_identifier: mytask_hour - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: mywh - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 0 9-17 * * SUN America/Los_Angeles'" - parameter: TIMESTAMP_INPUT_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'YYYY-MM-DD HH24'" - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - keyword: IF - keyword: NOT - keyword: EXISTS - object_reference: naked_identifier: t1 - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 0 9-17 * * SUN America/Los_Angeles'" - keyword: ALLOW_OVERLAPPING_EXECUTION - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - parameter: TIMESTAMP_INPUT_FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'YYYY-MM-DD HH24'" - keyword: USER_TASK_TIMEOUT_MS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '25' - keyword: USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'XSMALL'" - keyword: COPY - keyword: GRANTS - parameter: COMMENT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Hello world'" - keyword: AFTER - object_reference: naked_identifier: dependency_task - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: TASK - object_reference: naked_identifier: t1 - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: naked_identifier: insert_session - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: eng_wh - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 45 6 * * * UTC'" - keyword: AS - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: sch - dot: . - naked_identifier: s_session - select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: break end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: serial orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: datetime end_bracket: ) alias_expression: keyword: AS naked_identifier: session_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: base_table end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: naked_identifier: update_session - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: eng_wh - keyword: AFTER - object_reference: naked_identifier: insert_session - keyword: AS - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: sch - dot: . - naked_identifier: s_session set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: lag_datetime - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: v - dot: . - naked_identifier: lag_datetime - comma: ',' - set_clause: - column_reference: naked_identifier: row_number - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: v - dot: . - naked_identifier: row_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: break end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: serial orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: datetime end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: session_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: derived_table end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: serial - comma: ',' - column_reference: naked_identifier: datetime end_bracket: ) alias_expression: keyword: AS naked_identifier: v where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: sch - dot: . - naked_identifier: s_session - dot: . - naked_identifier: event_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: v - dot: . - naked_identifier: event_id - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: - naked_identifier: sch - dot: . - naked_identifier: truncate_session - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: eng_wh - keyword: AFTER - object_reference: - naked_identifier: sch - dot: . - naked_identifier: update_session - keyword: AS - statement: call_segment: keyword: CALL function: function_name: naked_identifier: sch dot: . function_name_identifier: session_agg_insert bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: naked_identifier: insert__agg - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: eng_wh - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 15 7 2 * * UTC'" - keyword: AS - statement: call_segment: keyword: CALL function: function_name: function_name_identifier: auto_device_insert bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: create_task_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TASK - object_reference: - naked_identifier: SCH - dot: . - naked_identifier: MY_TASK - keyword: WAREHOUSE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: MY_WH - keyword: SCHEDULE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'USING CRON 15 7 2 * * UTC'" - keyword: USER_TASK_TIMEOUT_MS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10800000' - keyword: WHEN - snowflake_task_expression_segment: system_function_name: SYSTEM$STREAM_HAS_DATA bracketed: start_bracket: ( quoted_literal: "'SCH.MY_STREAM'" end_bracket: ) binary_operator: AND expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: AS - statement: call_segment: keyword: CALL function: function_name: naked_identifier: SCH dot: . function_name_identifier: MY_SPROC bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_user.sql000066400000000000000000000012521451700765000250030ustar00rootroot00000000000000create user user1 password='abc123' default_role = myrole display_name = user1 login_name = my_login_name first_name = User1 middle_name = abc last_name = Test1 default_warehouse = my_default_warehouse default_namespace = my_default_namespace default_secondary_roles = ('ALL') must_change_password = true; create user user2 password='abc123' default_role = 'myrole' display_name = 'user 2' login_name = 'test login name' first_name = 'User' middle_name = 'abc' last_name = 'test2' default_warehouse = 'my_default_warehouse' default_namespace = 'my_default_namespace' must_change_password = false; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_user.yml000066400000000000000000000071411451700765000250100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 22283b573b8035fbf38494b02509411ee71d9b5493e78f1c4c88b09998ca16e8 file: - statement: create_user_statement: - keyword: create - keyword: user - object_reference: naked_identifier: user1 - keyword: password - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'abc123'" - keyword: default_role - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: myrole - keyword: display_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: user1 - keyword: login_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_login_name - keyword: first_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: User1 - keyword: middle_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: abc - keyword: last_name - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: Test1 - keyword: default_warehouse - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_default_warehouse - keyword: default_namespace - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: my_default_namespace - keyword: default_secondary_roles - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( quoted_literal: "'ALL'" end_bracket: ) - keyword: must_change_password - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_user_statement: - keyword: create - keyword: user - object_reference: naked_identifier: user2 - keyword: password - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'abc123'" - keyword: default_role - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'myrole'" - keyword: display_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'user 2'" - keyword: login_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test login name'" - keyword: first_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'User'" - keyword: middle_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'abc'" - keyword: last_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test2'" - keyword: default_warehouse - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_default_warehouse'" - keyword: default_namespace - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my_default_namespace'" - keyword: must_change_password - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_view.sql000066400000000000000000000026401451700765000250010ustar00rootroot00000000000000 create view another_view comment = 'a great description' as select col_1, col_2 from other_table; CREATE VIEW basic_view AS SELECT col1, col2 FROM src_table; CREATE TEMPORARY VIEW view_with_comments COMMENT = 'my comment' AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_replace_and_comment COMMENT = 'my comment' AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE SECURE RECURSIVE VIEW IF NOT EXISTS secure_recursive_view_with_comment COMMENT = 'my comment' AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_comment_and_copy_grants COMMENT = 'my comment' COPY GRANTS AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_tags_and_copy_grants WITH TAG (foo = 'bar', hello = 'world') COPY GRANTS AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE VIEW view_with_column_comment ( col1, col2 COMMENT 'some comment' ) AS SELECT col1, col2 FROM src_table; CREATE OR REPLACE SECURE RECURSIVE VIEW IF NOT EXISTS view_with_all_implemented_features COMMENT = 'table-level comment' ( col1, col2 COMMENT 'some comment' ) AS WITH cte AS (SELECT col1 FROM table_1) SELECT col1, col2 FROM table_2 INNER JOIN my_cte ON table_1.pk = table_2.pk; CREATE OR REPLACE VIEW vw_appt_latest AS ( WITH most_current as ( SELECT da.* FROM dim_appt da WHERE da.current_appt_id IS NULL ) SELECT * from most_current ); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_view.yml000066400000000000000000000312711451700765000250050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddbf5f2d24bc7e37e3fec26d6145c5453334948627a94bff776d63464b9d8b08 file: - statement: create_view_statement: - keyword: create - keyword: view - table_reference: naked_identifier: another_view - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'a great description'" - keyword: as - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col_1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col_2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: other_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: basic_view - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: VIEW - table_reference: naked_identifier: view_with_comments - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my comment'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_replace_and_comment - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my comment'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: SECURE - keyword: RECURSIVE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: secure_recursive_view_with_comment - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my comment'" - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_comment_and_copy_grants - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my comment'" - keyword: COPY - keyword: GRANTS - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_tags_and_copy_grants - tag_bracketed_equals: - keyword: WITH - keyword: TAG - bracketed: - start_bracket: ( - tag_reference: naked_identifier: foo - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'bar'" - comma: ',' - tag_reference: naked_identifier: hello - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'world'" - end_bracket: ) - keyword: COPY - keyword: GRANTS - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: view_with_column_comment - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comment_clause: keyword: COMMENT quoted_literal: "'some comment'" - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src_table - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: SECURE - keyword: RECURSIVE - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: view_with_all_implemented_features - comment_equals_clause: keyword: COMMENT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'table-level comment'" - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - comment_clause: keyword: COMMENT quoted_literal: "'some comment'" - end_bracket: ) - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_1 end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_2 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: my_cte - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table_1 - dot: . - naked_identifier: pk - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table_2 - dot: . - naked_identifier: pk - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: vw_appt_latest - keyword: AS - bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: most_current keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: da dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dim_appt alias_expression: naked_identifier: da where_clause: keyword: WHERE expression: column_reference: - naked_identifier: da - dot: . - naked_identifier: current_appt_id keyword: IS null_literal: 'NULL' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: most_current end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_warehouse.sql000066400000000000000000000007311451700765000260300ustar00rootroot00000000000000create or replace warehouse my_wh with warehouse_size='X-LARGE'; create or replace warehouse my_wh warehouse_size=large initially_suspended=true; create warehouse if not exists LOAD_WH warehouse_size='medium'; create warehouse if not exists LOAD_WH warehouse_size='medium' warehouse_type = standard; create warehouse my_wh WAREHOUSE_TYPE = 'SNOWPARK-OPTIMIZED' warehouse_size = 'medium' SCALING_POLICY = ECONOMY comment = 'comment' auto_suspend = 60 ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/create_warehouse.yml000066400000000000000000000061221451700765000260320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3194a56a124b29b4e8ccc62a397597b01bbcb39dd0523445316a2e3a89f7a4b8 file: - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: warehouse - object_reference: naked_identifier: my_wh - keyword: with - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: "'X-LARGE'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: warehouse - object_reference: naked_identifier: my_wh - warehouse_object_properties: - keyword: warehouse_size - comparison_operator: raw_comparison_operator: '=' - warehouse_size: large - keyword: initially_suspended - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: warehouse - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: LOAD_WH - warehouse_object_properties: keyword: warehouse_size comparison_operator: raw_comparison_operator: '=' warehouse_size: "'medium'" - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: warehouse - keyword: if - keyword: not - keyword: exists - object_reference: naked_identifier: LOAD_WH - warehouse_object_properties: - keyword: warehouse_size - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'medium'" - keyword: warehouse_type - comparison_operator: raw_comparison_operator: '=' - warehouse_size: standard - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: warehouse - object_reference: naked_identifier: my_wh - warehouse_object_properties: - keyword: WAREHOUSE_TYPE - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'SNOWPARK-OPTIMIZED'" - keyword: warehouse_size - comparison_operator: raw_comparison_operator: '=' - warehouse_size: "'medium'" - keyword: SCALING_POLICY - comparison_operator: raw_comparison_operator: '=' - scaling_policy: ECONOMY - comment_equals_clause: keyword: comment comparison_operator: raw_comparison_operator: '=' quoted_literal: "'comment'" - warehouse_object_properties: keyword: auto_suspend comparison_operator: raw_comparison_operator: '=' numeric_literal: '60' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/datetime_intervals.sql000066400000000000000000000016631451700765000263730ustar00rootroot00000000000000SELECT DATEADD(NANOSECONDS, -1, '2020-01-01'); -- https://docs.snowflake.com/en/sql-reference/data-types-datetime.html#interval-examples select to_date('2018-04-15') + INTERVAL '1 year'; select to_time('04:15:29') + INTERVAL '3 hours, 18 minutes'; select current_timestamp + INTERVAL '1 year, 3 quarters, 4 months, 5 weeks, 6 days, 7 minutes, 8 seconds, 1000 milliseconds, 4000000 microseconds, 5000000001 nanoseconds' as complex_interval1; select to_date('2025-01-17') + INTERVAL '1 y, 3 q, 4 mm, 5 w, 6 d, 7 h, 9 m, 8 s, 1000 ms, 445343232 us, 898498273498 ns' as complex_interval2; select name, hire_date from employees where hire_date > current_date - INTERVAL '2 y, 3 month'; select ts + INTERVAL '4 seconds' from t1 where ts > to_timestamp('2014-04-05 01:02:03'); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/datetime_intervals.yml000066400000000000000000000123351451700765000263730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 36aac154cc29a8174068e6e2c01eaa791f94c0af65b03ae16a94ff319675d85f file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATEADD bracketed: - start_bracket: ( - date_part: NANOSECONDS - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'2020-01-01'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: function: function_name: function_name_identifier: to_date bracketed: start_bracket: ( expression: quoted_literal: "'2018-04-15'" end_bracket: ) binary_operator: + keyword: INTERVAL date_constructor_literal: "'1 year'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: function: function_name: function_name_identifier: to_time bracketed: start_bracket: ( expression: quoted_literal: "'04:15:29'" end_bracket: ) binary_operator: + keyword: INTERVAL date_constructor_literal: "'3 hours, 18 minutes'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: bare_function: current_timestamp binary_operator: + keyword: INTERVAL date_constructor_literal: "'1 year, 3 quarters, 4 months, 5 weeks, 6 days,\ \ 7 minutes, 8 seconds,\n 1000 milliseconds,\ \ 4000000 microseconds, 5000000001 nanoseconds'" alias_expression: keyword: as naked_identifier: complex_interval1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: function: function_name: function_name_identifier: to_date bracketed: start_bracket: ( expression: quoted_literal: "'2025-01-17'" end_bracket: ) binary_operator: + keyword: INTERVAL date_constructor_literal: "'1 y, 3 q, 4 mm, 5 w, 6 d, 7 h, 9 m, 8 s,\n\ \ 1000 ms, 445343232 us, 898498273498\ \ ns'" alias_expression: keyword: as naked_identifier: complex_interval2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: hire_date from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees where_clause: keyword: where expression: column_reference: naked_identifier: hire_date comparison_operator: raw_comparison_operator: '>' bare_function: current_date binary_operator: '-' keyword: INTERVAL date_constructor_literal: "'2 y, 3 month'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: column_reference: naked_identifier: ts binary_operator: + keyword: INTERVAL date_constructor_literal: "'4 seconds'" from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 where_clause: keyword: where expression: column_reference: naked_identifier: ts comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: to_timestamp bracketed: start_bracket: ( expression: quoted_literal: "'2014-04-05 01:02:03'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/datetime_units.sql000066400000000000000000000152511451700765000255240ustar00rootroot00000000000000SELECT t1.field, EXTRACT(year FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(y FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yyy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yyyy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yr FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(years FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yrs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(month FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mm FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mon FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mons FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(months FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(day FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(d FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dd FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(days FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dayofmonth FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dayofweek FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekday FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dow FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dw FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dayofweekiso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekday_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dow_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dw_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dayofyear FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yearday FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(doy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(dy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(week FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(w FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(wk FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekofyear FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(woy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(wy FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekiso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(week_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekofyeariso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(weekofyear_iso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(quarter FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(q FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(qtr FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(qtrs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(quarters FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yearofweek FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(yearofweekiso FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hour FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(h FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hh FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hr FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hours FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(hrs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(minute FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(m FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mi FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(min FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(minutes FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(mins FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(second FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(s FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(sec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(seconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(secs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(millisecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(ms FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(msec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(milliseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(microsecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(us FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(usec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(microseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nanosecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(ns FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nsec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nanosec FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nsecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nanoseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nanosecs FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(nseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_second FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_seconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_millisecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_milliseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_microsecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_microseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_nanosecond FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(epoch_nanoseconds FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(timezone_hour FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(tzh FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(timezone_minute FROM t1.sometime) AS a FROM t1; SELECT t1.field, EXTRACT(tzm FROM t1.sometime) AS a FROM t1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/datetime_units.yml000066400000000000000000002651541451700765000255370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f460493de140f638f4dd9eb1395cbaf4587a1ccb7526ce784a60fcc24dd8b5c2 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: year keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: y keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: yy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: yyy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: yyyy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: yr keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: years keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: yrs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: month keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: mm keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: mon keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: mons keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: months keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: day keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: d keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dd keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: days keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dayofmonth keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dayofweek keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: weekday keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dow keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dw keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dayofweekiso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: weekday_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dow_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dw_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dayofyear keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: yearday keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: doy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: dy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: week keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: w keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: wk keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: weekofyear keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: woy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: wy keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: weekiso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: week_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: weekofyeariso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: weekofyear_iso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: quarter keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: q keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: qtr keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: qtrs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: quarters keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: yearofweek keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: yearofweekiso keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: hour keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: h keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: hh keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: hr keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: hours keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: hrs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: minute keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: m keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: mi keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: min keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: minutes keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: mins keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: second keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: s keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: sec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: seconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: secs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: millisecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: ms keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: msec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: milliseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: microsecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: us keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: usec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: microseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: nanosecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: ns keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: nsec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: nanosec keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: nsecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: nanoseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: nanosecs keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: nseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch_second keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch_seconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch_millisecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch_milliseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch_microsecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch_microseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch_nanosecond keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: epoch_nanoseconds keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: timezone_hour keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: tzh keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: timezone_minute keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: field - comma: ',' - select_clause_element: function: function_name: function_name_identifier: EXTRACT bracketed: start_bracket: ( date_part: tzm keyword: FROM expression: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: sometime end_bracket: ) alias_expression: keyword: AS naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/delete.sql000066400000000000000000000025171451700765000237510ustar00rootroot00000000000000delete from leased_bicycles; delete from leased_bicycles as lb; delete from x using y, z; delete from x where 1 = 2; delete from leased_bicycles using returned_bicycles where leased_bicycles.bicycle_id = returned_bicycles.bicycle_id; delete from leased_bicycles as lb using returned_bicycles as rb where lb.bicycle_id = rb.bicycle_id; delete from leased_bicycles lb using returned_bicycles rb where lb.bicycle_id = rb.bicycle_id; delete from leased_bicycles using returned_bicycles, broken_bicycles where leased_bicycles.bicycle_id = returned_bicycles.bicycle_id and leased_bicycles.bicycle_id = broken_bicycles.bicycle_id; delete from leased_bicycles as lb using returned_bicycles as rb, broken_bicycles as bb where lb.bicycle_id = rb.bicycle_id and lb.bicycle_id = bb.bicycle_id; delete from leased_bicycles lb using returned_bicycles rb, broken_bicycles bb where lb.bicycle_id = rb.bicycle_id and lb.bicycle_id = bb.bicycle_id; delete from leased_bicycles using (select bicycle_id as bicycle_id from returned_bicycles) as returned where leased_bicycles.bicycle_id = returned.bicycle_id; delete from leased_bicycles using (select bicycle_id as bicycle_id from returned_bicycles where 1=2) as returned where leased_bicycles.bicycle_id = returned.bicycle_id; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/delete.yml000066400000000000000000000243141451700765000237520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ebfa67a74f2c11ab6f7f3b464b9046dc119fef9e7938acc264322711dfc5c4d1 file: - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: keyword: as naked_identifier: lb - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: x - keyword: using - table_expression: table_reference: naked_identifier: y - comma: ',' - table_expression: table_reference: naked_identifier: z - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: x - where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - where_clause: keyword: where expression: - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned_bicycles - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: keyword: as naked_identifier: lb - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - alias_expression: keyword: as naked_identifier: rb - where_clause: keyword: where expression: - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rb - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: naked_identifier: lb - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - alias_expression: naked_identifier: rb - where_clause: keyword: where expression: - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rb - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - comma: ',' - table_expression: table_reference: naked_identifier: broken_bicycles - where_clause: keyword: where expression: - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned_bicycles - dot: . - naked_identifier: bicycle_id - binary_operator: and - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: broken_bicycles - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: keyword: as naked_identifier: lb - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - alias_expression: keyword: as naked_identifier: rb - comma: ',' - table_expression: table_reference: naked_identifier: broken_bicycles - alias_expression: keyword: as naked_identifier: bb - where_clause: keyword: where expression: - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rb - dot: . - naked_identifier: bicycle_id - binary_operator: and - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bb - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - alias_expression: naked_identifier: lb - keyword: using - table_expression: table_reference: naked_identifier: returned_bicycles - alias_expression: naked_identifier: rb - comma: ',' - table_expression: table_reference: naked_identifier: broken_bicycles - alias_expression: naked_identifier: bb - where_clause: keyword: where expression: - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: rb - dot: . - naked_identifier: bicycle_id - binary_operator: and - column_reference: - naked_identifier: lb - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bb - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - keyword: using - table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: bicycle_id alias_expression: keyword: as naked_identifier: bicycle_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: returned_bicycles end_bracket: ) - alias_expression: keyword: as naked_identifier: returned - where_clause: keyword: where expression: - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned - dot: . - naked_identifier: bicycle_id - statement_terminator: ; - statement: delete_statement: - keyword: delete - keyword: from - table_reference: naked_identifier: leased_bicycles - keyword: using - table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: bicycle_id alias_expression: keyword: as naked_identifier: bicycle_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: returned_bicycles where_clause: keyword: where expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' end_bracket: ) - alias_expression: keyword: as naked_identifier: returned - where_clause: keyword: where expression: - column_reference: - naked_identifier: leased_bicycles - dot: . - naked_identifier: bicycle_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned - dot: . - naked_identifier: bicycle_id - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/describe_statements.sql000066400000000000000000000053261451700765000265370ustar00rootroot00000000000000DESCRIBE RESULT 'f2f07bdb-6a08-4689-9ad8-a1ba968a44b6'; DESC RESULT 'f2f07bdb-6a08-4689-9ad8-a1ba968a44b6'; DESCRIBE RESULT LAST_QUERY_ID(); DESC RESULT LAST_QUERY_ID(); DESCRIBE NETWORK POLICY my_policy; DESC NETWORK POLICY my_policy; DESCRIBE SHARE sales_s; DESC SHARE sales_s; DESCRIBE SHARE ab67890.sales_s; DESC SHARE ab67890.sales_s; DESCRIBE USER test_user; DESC USER test_user; DESCRIBE WAREHOUSE my_warehouse; DESC WAREHOUSE my_warehouse; DESCRIBE WAREHOUSE "my warehouse"; DESC WAREHOUSE "my warehouse"; DESCRIBE DATABASE my_database; DESC DATABASE my_database; DESCRIBE API INTEGRATION my_integration; DESC API INTEGRATION my_integration; DESCRIBE NOTIFICATION INTEGRATION my_integration; DESC NOTIFICATION INTEGRATION my_integration; DESCRIBE SECURITY INTEGRATION my_integration; DESC SECURITY INTEGRATION my_integration; DESCRIBE STORAGE INTEGRATION my_integration; DESC STORAGE INTEGRATION my_integration; DESCRIBE INTEGRATION my_integration; DESC INTEGRATION my_integration; DESCRIBE SESSION POLICY my_session_policy; DESC SESSION POLICY my_session_policy; DESCRIBE SCHEMA my_schema; DESC SCHEMA my_schema; DESCRIBE SCHEMA my_database.my_schema; DESC SCHEMA my_database.my_schema; DESCRIBE TABLE my_table; DESC TABLE my_table; DESCRIBE TABLE my_database.my_schema.my_table; DESC TABLE my_database.my_schema.my_table; DESCRIBE TABLE my_table TYPE = COLUMNS; DESC TABLE my_table TYPE = COLUMNS; DESCRIBE TABLE my_table TYPE = STAGE; DESC TABLE my_table TYPE = STAGE; DESCRIBE EXTERNAL TABLE my_table; DESC EXTERNAL TABLE my_table; DESCRIBE EXTERNAL TABLE my_table TYPE = COLUMNS; DESC EXTERNAL TABLE my_table TYPE = COLUMNS; DESCRIBE EXTERNAL TABLE my_table TYPE = STAGE; DESC EXTERNAL TABLE my_table TYPE = STAGE; DESCRIBE VIEW my_view; DESC VIEW my_view; DESCRIBE VIEW my_database.my_schema.my_view; DESC VIEW my_database.my_schema.my_view; DESCRIBE MATERIALIZED VIEW my_view; DESC MATERIALIZED VIEW my_view; DESCRIBE MATERIALIZED VIEW my_database.my_schema.my_view; DESC MATERIALIZED VIEW my_database.my_schema.my_view; DESCRIBE SEQUENCE my_sequence; DESC SEQUENCE my_sequence; DESCRIBE MASKING POLICY my_masking_policy; DESC MASKING POLICY my_masking_policy; DESCRIBE ROW ACCESS POLICY my_row_access_policy; DESC ROW ACCESS POLICY my_row_access_policy; DESCRIBE FILE FORMAT my_file_format; DESC FILE FORMAT my_file_format; DESCRIBE STAGE my_stage; DESC STAGE my_stage; DESCRIBE PIPE my_pipe; DESC PIPE my_pipe; DESCRIBE STREAM my_stream; DESC STREAM my_stream; DESCRIBE TASK my_task; DESC TASK my_task; DESCRIBE FUNCTION multiply(NUMBER, NUMBER); DESC FUNCTION multiply(NUMBER, NUMBER); DESCRIBE PROCEDURE my_pi(); DESC PROCEDURE my_pi(); DESCRIBE PROCEDURE area_of_circle(FLOAT); DESC PROCEDURE area_of_circle(FLOAT); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/describe_statements.yml000066400000000000000000000406361451700765000265440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 990d8f15823f8f62dc00f28663b53829fcfb81cd480f1cdccf6ad16b819589e1 file: - statement: describe_statement: - keyword: DESCRIBE - keyword: RESULT - quoted_literal: "'f2f07bdb-6a08-4689-9ad8-a1ba968a44b6'" - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: RESULT - quoted_literal: "'f2f07bdb-6a08-4689-9ad8-a1ba968a44b6'" - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: RESULT - keyword: LAST_QUERY_ID - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: RESULT - keyword: LAST_QUERY_ID - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: NETWORK - keyword: POLICY - object_reference: naked_identifier: my_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: NETWORK - keyword: POLICY - object_reference: naked_identifier: my_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SHARE - object_reference: naked_identifier: sales_s - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SHARE - object_reference: naked_identifier: sales_s - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SHARE - object_reference: - naked_identifier: ab67890 - dot: . - naked_identifier: sales_s - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SHARE - object_reference: - naked_identifier: ab67890 - dot: . - naked_identifier: sales_s - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: USER - object_reference: naked_identifier: test_user - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: USER - object_reference: naked_identifier: test_user - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: WAREHOUSE - object_reference: naked_identifier: my_warehouse - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: WAREHOUSE - object_reference: naked_identifier: my_warehouse - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: WAREHOUSE - object_reference: quoted_identifier: '"my warehouse"' - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: WAREHOUSE - object_reference: quoted_identifier: '"my warehouse"' - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: DATABASE - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: DATABASE - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: API - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: NOTIFICATION - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: NOTIFICATION - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SECURITY - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: STORAGE - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: STORAGE - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: INTEGRATION - object_reference: naked_identifier: my_integration - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SESSION - keyword: POLICY - object_reference: naked_identifier: my_session_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SESSION - keyword: POLICY - object_reference: naked_identifier: my_session_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SCHEMA - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SCHEMA - schema_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SCHEMA - schema_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TABLE - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNS - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNS - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: STAGE - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: STAGE - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNS - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNS - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: STAGE - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: STAGE - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: VIEW - table_reference: naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: VIEW - table_reference: naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: VIEW - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: VIEW - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: MATERIALIZED - keyword: VIEW - table_reference: naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: my_database - dot: . - naked_identifier: my_schema - dot: . - naked_identifier: my_view - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SEQUENCE - sequence_reference: naked_identifier: my_sequence - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: SEQUENCE - sequence_reference: naked_identifier: my_sequence - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: MASKING - keyword: POLICY - object_reference: naked_identifier: my_masking_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: MASKING - keyword: POLICY - object_reference: naked_identifier: my_masking_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_row_access_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: my_row_access_policy - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_file_format - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: my_file_format - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: STAGE - object_reference: naked_identifier: my_stage - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: STAGE - object_reference: naked_identifier: my_stage - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: PIPE - object_reference: naked_identifier: my_pipe - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: PIPE - object_reference: naked_identifier: my_pipe - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: STREAM - object_reference: naked_identifier: my_stream - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: STREAM - object_reference: naked_identifier: my_stream - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TASK - object_reference: naked_identifier: my_task - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: TASK - object_reference: naked_identifier: my_task - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: FUNCTION - function_name: function_name_identifier: multiply - bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - comma: ',' - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - function_name: function_name_identifier: multiply - bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - comma: ',' - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: PROCEDURE - function_name: function_name_identifier: my_pi - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: PROCEDURE - function_name: function_name_identifier: my_pi - bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: PROCEDURE - function_name: function_name_identifier: area_of_circle - bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: PROCEDURE - function_name: function_name_identifier: area_of_circle - bracketed: start_bracket: ( data_type: data_type_identifier: FLOAT end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/drop_statements.sql000066400000000000000000000041451451700765000257210ustar00rootroot00000000000000DROP CONNECTION MY_SCHEMA.T1; DROP CONNECTION IF EXISTS MY_SCHEMA.T1; DROP DATABASE MYTESTDB2; DROP DATABASE IF EXISTS MYTESTDB2; DROP DATABASE IF EXISTS MYTESTDB2 CASCADE; DROP EXTERNAL TABLE MY_SCHEMA.T1; DROP EXTERNAL TABLE IF EXISTS MY_SCHEMA.T1; DROP EXTERNAL TABLE IF EXISTS MY_SCHEMA.T1 RESTRICT; DROP FILE FORMAT MY_SCHEMA.MY_FORMAT; DROP FILE FORMAT IF EXISTS MY_SCHEMA.MY_FORMAT; DROP FUNCTION MY_SCHEMA.MY_FUNCTION(NUMBER, NUMBER); DROP FUNCTION IF EXISTS MY_SCHEMA.MY_FUNCTION(NUMBER, NUMBER); DROP INTEGRATION T2; DROP API INTEGRATION IF EXISTS T2; DROP MANAGED ACCOUNT READER_ACCT1; DROP MASKING POLICY SSN_MASK; DROP MATERIALIZED VIEW MY_SCHEMA.MV1; DROP MATERIALIZED VIEW IF EXISTS MY_SCHEMA.MV1; DROP NETWORK POLICY MY_POLICY; DROP NETWORK POLICY IF EXISTS MY_POLICY; DROP PIPE MY_SCHEMA.MYPIPE; DROP PIPE IF EXISTS MY_SCHEMA.MYPIPE; DROP PROCEDURE MY_SCHEMA.ADD_ACCOUNTING_USER(VARCHAR); DROP PROCEDURE IF EXISTS MY_SCHEMA.ADD_ACCOUNTING_USER(VARCHAR); DROP RESOURCE MONITOR MY_MONITOR_RESOURCE; DROP ROLE MYROLE; DROP ROLE IF EXISTS MYROLE; DROP ROW ACCESS POLICY RAP_TABLE_EMPLOYEE_INFO; DROP ROW ACCESS POLICY IF EXISTS RAP_TABLE_EMPLOYEE_INFO; DROP SCHEMA MY_SCHEMA; DROP SCHEMA IF EXISTS MY_SCHEMA; DROP SCHEMA IF EXISTS MY_SCHEMA CASCADE; DROP SEQUENCE MY_SCHEMA.INVOICE_SEQUENCE_NUMBER; DROP SEQUENCE IF EXISTS MY_SCHEMA.INVOICE_SEQUENCE_NUMBER; DROP SEQUENCE IF EXISTS MY_SCHEMA.INVOICE_SEQUENCE_NUMBER CASCADE; DROP SESSION POLICY SESSION_POLICY_PRODUCTION_1; DROP SESSION POLICY IF EXISTS SESSION_POLICY_PRODUCTION_1; DROP SHARE SALES_S; DROP STAGE MY_SCHEMA.MY_STAGE; DROP STAGE IF EXISTS MY_SCHEMA.MY_STAGE; DROP STREAM MY_SCHEMA.T2; DROP STREAM IF EXISTS MY_SCHEMA.T2; DROP TABLE MY_SCHEMA.T2; DROP TABLE IF EXISTS MY_SCHEMA.T2; DROP TABLE IF EXISTS MY_SCHEMA.T2 CASCADE; DROP TAG COST_CENTER; DROP TAG IF EXISTS COST_CENTER; DROP TASK MY_SCHEAM.T2; DROP TASK IF EXISTS MY_SCHEAM.T2; DROP USER USER1; DROP USER IF EXISTS USER1; DROP VIEW MY_SCHEMA.MY_VIEW; DROP VIEW IF EXISTS MY_SCHEMA.MY_VIEW; DROP WAREHOUSE MY_WAREHOUSE; DROP WAREHOUSE IF EXISTS MY_WAREHOUSE; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/drop_statements.yml000066400000000000000000000321661451700765000257270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 18990817d9700a4630edc7caf16f00fcaee62f490269d80f93ed1084f69d9f33 file: - statement: drop_object_statement: - keyword: DROP - keyword: CONNECTION - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: CONNECTION - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: MYTESTDB2 - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: MYTESTDB2 - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: MYTESTDB2 - keyword: CASCADE - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T1 - keyword: RESTRICT - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: FILE - keyword: FORMAT - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_FORMAT - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: FILE - keyword: FORMAT - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_FORMAT - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: MY_FUNCTION - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - comma: ',' - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: MY_FUNCTION - function_parameter_list: bracketed: - start_bracket: ( - data_type: data_type_identifier: NUMBER - comma: ',' - data_type: data_type_identifier: NUMBER - end_bracket: ) - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: INTEGRATION - object_reference: naked_identifier: T2 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: API - keyword: INTEGRATION - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: T2 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: MANAGED - keyword: ACCOUNT - naked_identifier: READER_ACCT1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: MASKING - keyword: POLICY - naked_identifier: SSN_MASK - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MV1 - statement_terminator: ; - statement: drop_materialized_view_statement: - keyword: DROP - keyword: MATERIALIZED - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MV1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: NETWORK - keyword: POLICY - naked_identifier: MY_POLICY - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: NETWORK - keyword: POLICY - keyword: IF - keyword: EXISTS - naked_identifier: MY_POLICY - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: PIPE - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MYPIPE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: PIPE - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MYPIPE - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: ADD_ACCOUNTING_USER - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: VARCHAR end_bracket: ) - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - function_name: naked_identifier: MY_SCHEMA dot: . function_name_identifier: ADD_ACCOUNTING_USER - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: VARCHAR end_bracket: ) - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: RESOURCE - keyword: MONITOR - object_reference: naked_identifier: MY_MONITOR_RESOURCE - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - naked_identifier: MYROLE - statement_terminator: ; - statement: drop_role_statement: - keyword: DROP - keyword: ROLE - keyword: IF - keyword: EXISTS - naked_identifier: MYROLE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: ROW - keyword: ACCESS - keyword: POLICY - object_reference: naked_identifier: RAP_TABLE_EMPLOYEE_INFO - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: ROW - keyword: ACCESS - keyword: POLICY - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: RAP_TABLE_EMPLOYEE_INFO - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: MY_SCHEMA - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: MY_SCHEMA - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: MY_SCHEMA - keyword: CASCADE - statement_terminator: ; - statement: drop_sequence_statement: - keyword: DROP - keyword: SEQUENCE - sequence_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: INVOICE_SEQUENCE_NUMBER - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: INVOICE_SEQUENCE_NUMBER - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SEQUENCE - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: INVOICE_SEQUENCE_NUMBER - keyword: CASCADE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SESSION - keyword: POLICY - naked_identifier: SESSION_POLICY_PRODUCTION_1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SESSION - keyword: POLICY - keyword: IF - keyword: EXISTS - naked_identifier: SESSION_POLICY_PRODUCTION_1 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: SHARE - object_reference: naked_identifier: SALES_S - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: STAGE - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_STAGE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: STAGE - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_STAGE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: STREAM - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: STREAM - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: T2 - keyword: CASCADE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: TAG - object_reference: naked_identifier: COST_CENTER - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: TAG - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: COST_CENTER - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: TASK - object_reference: - naked_identifier: MY_SCHEAM - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: TASK - keyword: IF - keyword: EXISTS - object_reference: - naked_identifier: MY_SCHEAM - dot: . - naked_identifier: T2 - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - role_reference: naked_identifier: USER1 - statement_terminator: ; - statement: drop_user_statement: - keyword: DROP - keyword: USER - keyword: IF - keyword: EXISTS - role_reference: naked_identifier: USER1 - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_VIEW - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: - naked_identifier: MY_SCHEMA - dot: . - naked_identifier: MY_VIEW - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: WAREHOUSE - naked_identifier: MY_WAREHOUSE - statement_terminator: ; - statement: drop_object_statement: - keyword: DROP - keyword: WAREHOUSE - keyword: IF - keyword: EXISTS - naked_identifier: MY_WAREHOUSE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/escape.sql000066400000000000000000000001351451700765000237410ustar00rootroot00000000000000-- Backslash escapes work in snowflake select 'c\' ' as escaped, 'c\' '' ' as escaped_double sqlfluff-2.3.5/test/fixtures/dialects/snowflake/escape.yml000066400000000000000000000014611451700765000237460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6e72a3a8c339582009afd1363d51f488cf1de090bbcdeccdf61454c73b35bc9a file: statement: select_statement: select_clause: - keyword: select - select_clause_element: quoted_literal: "'c\\' '" alias_expression: keyword: as naked_identifier: escaped - comma: ',' - select_clause_element: quoted_literal: "'c\\' '' '" alias_expression: keyword: as naked_identifier: escaped_double sqlfluff-2.3.5/test/fixtures/dialects/snowflake/execute_immediate.sql000066400000000000000000000004501451700765000261610ustar00rootroot00000000000000EXECUTE IMMEDIATE 'select 1'; EXECUTE IMMEDIATE $$ SELECT PI(); $$; SET pie = $$ SELECT PI(); $$ ; SET one = 1; SET two = 2; EXECUTE IMMEDIATE $pie; EXECUTE IMMEDIATE $pie USING (one, two); SET three = 'select ? + ?'; EXECUTE IMMEDIATE :three; EXECUTE IMMEDIATE :three USING (one, two); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/execute_immediate.yml000066400000000000000000000046361451700765000261750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8e04c349292c858194d9339483207da94a5fdf7483fa466ba1eb05a2eae1efb6 file: - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - quoted_literal: "'select 1'" - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - quoted_literal: "$$\n SELECT PI();\n$$" - statement_terminator: ; - statement: set_statement: keyword: SET variable: pie comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "$$\n SELECT PI();\n$$" - statement_terminator: ; - statement: set_statement: keyword: SET variable: one comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' - statement_terminator: ; - statement: set_statement: keyword: SET variable: two comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '2' - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - variable: $pie - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - variable: $pie - keyword: USING - bracketed: - start_bracket: ( - variable: one - comma: ',' - variable: two - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: three comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'select ? + ?'" - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - colon: ':' - variable: three - statement_terminator: ; - statement: execute_immediate_clause: - keyword: EXECUTE - keyword: IMMEDIATE - colon: ':' - variable: three - keyword: USING - bracketed: - start_bracket: ( - variable: one - comma: ',' - variable: two - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/execute_task.sql000066400000000000000000000000661451700765000251700ustar00rootroot00000000000000EXECUTE TASK my_task; EXECUTE TASK myschema.my_task; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/execute_task.yml000066400000000000000000000014061451700765000251710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd10bd2ab1e3898dc8e3124691d2007bbdc93d92d1a00b098cbfd5e3e10430a2 file: - statement: execute_task_clause: - keyword: EXECUTE - keyword: TASK - object_reference: naked_identifier: my_task - statement_terminator: ; - statement: execute_task_clause: - keyword: EXECUTE - keyword: TASK - object_reference: - naked_identifier: myschema - dot: . - naked_identifier: my_task - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/explain.sql000066400000000000000000000001571451700765000241450ustar00rootroot00000000000000explain using tabular select 1; explain using json select 1; explain using text select 1; explain select 1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/explain.yml000066400000000000000000000026101451700765000241430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bca49ca75c902d69398ecb6af0c0692a1d9fa6d181a81349205f544d0e5091d7 file: - statement: explain_statement: - keyword: explain - keyword: using - keyword: tabular - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: using - keyword: json - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: - keyword: explain - keyword: using - keyword: text - select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: explain_statement: keyword: explain select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/first_value_ignore_nulls.sql000066400000000000000000000001741451700765000276070ustar00rootroot00000000000000select a, coalesce(first_value(case when a then b else null end) ignore nulls over (order by e), false) as c from d sqlfluff-2.3.5/test/fixtures/dialects/snowflake/first_value_ignore_nulls.yml000066400000000000000000000050511451700765000276100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1780bc1b0a80daf11a8cf042139cb566504f310f930cc6dce88febf2470b40b4 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: function_name: function_name_identifier: coalesce bracketed: - start_bracket: ( - expression: function: - function_name: function_name_identifier: first_value - bracketed: start_bracket: ( expression: case_expression: - keyword: case - when_clause: - keyword: when - expression: column_reference: naked_identifier: a - keyword: then - expression: column_reference: naked_identifier: b - else_clause: keyword: else expression: null_literal: 'null' - keyword: end end_bracket: ) - keyword: ignore - keyword: nulls - over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: e end_bracket: ) - comma: ',' - expression: boolean_literal: 'false' - end_bracket: ) alias_expression: keyword: as naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: d sqlfluff-2.3.5/test/fixtures/dialects/snowflake/frame_clause.sql000066400000000000000000000003061451700765000251270ustar00rootroot00000000000000SELECT a, LAST_VALUE(foo) IGNORE NULLS OVER ( PARTITION BY bar ORDER BY baz ASC ROWS BETWEEN $my_var PRECEDING AND CURRENT ROW ) AS vehicle_type_id_last_value FROM foo ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/frame_clause.yml000066400000000000000000000042201451700765000251300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0e1728a3d30a3a0255a6e7c3669cb5d0b83b21017da070cd60e71bc62ffb179c file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: LAST_VALUE - bracketed: start_bracket: ( expression: column_reference: naked_identifier: foo end_bracket: ) - keyword: IGNORE - keyword: NULLS - over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: bar orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: baz - keyword: ASC frame_clause: - keyword: ROWS - keyword: BETWEEN - variable: $my_var - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: keyword: AS naked_identifier: vehicle_type_id_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/get_statement.sql000066400000000000000000000003331451700765000253440ustar00rootroot00000000000000get @%mytable file://C:\temp\load; get @~/myfiles file:///tmp/data/; get @~/myfiles file:///tmp/data/ PATTERN = '.*foo.*'; get @~/myfiles file:///tmp/data/ PATTERN = $foo; get @~/myfiles file:///tmp/data/ PARALLEL = 1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/get_statement.yml000066400000000000000000000027331451700765000253540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d52b5bbb775e0a3d534c87ef38be635e991e1e69028687f1e5e5bc461731e624 file: - statement: get_statement: keyword: get stage_path: '@%mytable' unquoted_file_path: file://C:\temp\load - statement_terminator: ; - statement: get_statement: keyword: get stage_path: '@~/myfiles' unquoted_file_path: file:///tmp/data/ - statement_terminator: ; - statement: get_statement: - keyword: get - stage_path: '@~/myfiles' - unquoted_file_path: file:///tmp/data/ - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*foo.*'" - statement_terminator: ; - statement: get_statement: - keyword: get - stage_path: '@~/myfiles' - unquoted_file_path: file:///tmp/data/ - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - variable: $foo - statement_terminator: ; - statement: get_statement: - keyword: get - stage_path: '@~/myfiles' - unquoted_file_path: file:///tmp/data/ - keyword: PARALLEL - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/grant_revoke.sql000066400000000000000000000111561451700765000251740ustar00rootroot00000000000000GRANT OWNERSHIP ON SCHEMA MY_DATABASE.MY_SCHEMA TO ROLE MY_ROLE; GRANT ROLE MY_ROLE TO ROLE MY_OTHER_ROLE; grant use_any_role on integration external_oauth_1 to role1; grant ownership on table myschema.mytable to role analyst; grant ownership on all tables in schema public to role analyst; grant ownership on all tables in schema mydb.public to role analyst; grant ownership on all tables in schema mydb.public to role analyst copy current grants; GRANT ROLE ROLENAME TO ROLE IDENTIFIER($THIS_ROLE); GRANT OWNERSHIP ON ROLE TEST_ROLE TO ROLE DIFFERENT_ROLE; grant all on all materialized views in database my_db to role analyst; grant all on all file formats in database my_db to role analyst; grant create temporary table on schema my_db.my_schema to role analyst; grant all on future pipes in database my_db to role analyst; grant all on future file formats in database my_db to role analyst; grant all on future materialized views in database my_db to role analyst; grant all on future pipes in database my_db to role analyst; grant usage on all sequences in database my_db to role analyst; grant all on all materialized views in database my_db to role analyst; grant all on all sequences in database my_db to role analyst; grant all on all functions in database my_db to role analyst; grant all on all file formats in database my_db to role analyst; grant all on all stages in database my_db to role analyst; grant select on all views in database my_db to role analyst; revoke role analyst from role sysadmin; revoke select,insert on future tables in schema mydb.myschema from role role1; revoke all privileges on function add5(number) from role analyst; revoke grant option for operate on warehouse report_wh from role analyst; revoke select on all tables in schema mydb.myschema from role analyst; revoke operate on warehouse report_wh from role analyst; revoke reference_usage on database database2 from share share1; REVOKE OWNERSHIP ON ROLE TEST_ROLE FROM ROLE DIFFERENT_ROLE; grant operate on warehouse report_wh to role analyst; grant operate on warehouse report_wh to role analyst with grant option; grant select on all tables in schema mydb.myschema to role analyst; grant all privileges on function mydb.myschema.add5(number) to role analyst; grant all privileges on function mydb.myschema.add5(string) to role analyst; grant usage on procedure mydb.myschema.myprocedure(number) to role analyst; grant create materialized view on schema mydb.myschema to role myrole; grant select,insert on future tables in schema mydb.myschema to role role1; grant usage on future schemas in database mydb to role role1; grant usage on database database1 to share share1; grant usage on schema database1.schema1 to share share1; grant reference_usage on database database2 to share share1; grant select on view view2 to share share1; grant usage on database mydb to share share1; grant usage on schema mydb.public to share share1; grant usage on function mydb.shared_schema.function1 to share share1; grant select on all tables in schema mydb.public to share share1; grant usage on schema mydb.shared_schema to share share1; grant select on view mydb.shared_schema.view1 to share share1; grant select on view mydb.shared_schema.view3 to share share1; grant role analyst to user user1; revoke all privileges on procedure clean_schema(string) from role analyst; revoke all privileges on function add5(string) from role analyst; revoke select on view mydb.shared_schema.view1 from share share1; revoke usage on schema mydb.shared_schema from share share1; revoke select on all tables in schema mydb.public from share share1; revoke usage on schema mydb.public from share share1; revoke usage on database mydb from share share1; grant apply masking policy on account to role my_role; grant apply row access policy on account to role my_role; grant apply session policy on account to role my_role; grant apply tag on account to role my_role; grant attach policy on account to role my_role; grant execute task on account to role my_role; grant import share on account to role my_role; grant manage grants on account to role my_role; grant monitor execution on account to role my_role; grant monitor usage on account to role my_role; grant override share restrictions on account to role my_role; grant create account on account to role my_role; grant create share on account to role my_role; grant create network policy on account to role my_role; grant create data exchange listing on account to role my_role; GRANT MANAGE ACCOUNT SUPPORT CASES ON ACCOUNT TO ROLE my_role; GRANT MANAGE ORGANIZATION SUPPORT CASES ON ACCOUNT TO ROLE my_role; GRANT MANAGE USER SUPPORT CASES ON ACCOUNT TO ROLE my_role; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/grant_revoke.yml000066400000000000000000000656761451700765000252160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: aeee2f396458b254c167e80035f3da42c44feeb3cb7ace72d3ccb39f9357113e file: - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: SCHEMA - object_reference: - naked_identifier: MY_DATABASE - dot: . - naked_identifier: MY_SCHEMA - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_ROLE - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ROLE - object_reference: naked_identifier: MY_ROLE - keyword: TO - keyword: ROLE - role_reference: naked_identifier: MY_OTHER_ROLE - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: use_any_role - keyword: 'on' - keyword: integration - object_reference: naked_identifier: external_oauth_1 - keyword: to - role_reference: naked_identifier: role1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: ownership - keyword: 'on' - keyword: table - object_reference: - naked_identifier: myschema - dot: . - naked_identifier: mytable - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: ownership - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: naked_identifier: public - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: ownership - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: public - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: ownership - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: public - keyword: to - keyword: role - role_reference: naked_identifier: analyst - keyword: copy - keyword: current - keyword: grants - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: ROLE - object_reference: naked_identifier: ROLENAME - keyword: TO - keyword: ROLE - role_reference: keyword: IDENTIFIER bracketed: start_bracket: ( variable: $THIS_ROLE end_bracket: ) - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: OWNERSHIP - keyword: 'ON' - keyword: ROLE - object_reference: naked_identifier: TEST_ROLE - keyword: TO - keyword: ROLE - role_reference: naked_identifier: DIFFERENT_ROLE - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: all - keyword: materialized - keyword: views - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: all - keyword: file - keyword: formats - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: create - keyword: temporary - keyword: table - keyword: 'on' - keyword: schema - object_reference: - naked_identifier: my_db - dot: . - naked_identifier: my_schema - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: future - keyword: pipes - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: future - keyword: file - keyword: formats - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: future - keyword: materialized - keyword: views - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: future - keyword: pipes - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: all - keyword: sequences - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: all - keyword: materialized - keyword: views - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: all - keyword: sequences - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: all - keyword: functions - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: all - keyword: file - keyword: formats - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: 'on' - keyword: all - keyword: stages - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - keyword: all - keyword: views - keyword: in - keyword: database - object_reference: naked_identifier: my_db - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: role - object_reference: naked_identifier: analyst - keyword: from - keyword: role - object_reference: naked_identifier: sysadmin - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: select - comma: ',' - keyword: insert - keyword: 'on' - keyword: future - keyword: tables - keyword: in - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: myschema - keyword: from - keyword: role - object_reference: naked_identifier: role1 - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: all - keyword: privileges - keyword: 'on' - keyword: function - function_name: function_name_identifier: add5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: from - keyword: role - object_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: grant - keyword: option - keyword: for - keyword: operate - keyword: 'on' - keyword: warehouse - object_reference: naked_identifier: report_wh - keyword: from - keyword: role - object_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: select - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: myschema - keyword: from - keyword: role - object_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: operate - keyword: 'on' - keyword: warehouse - object_reference: naked_identifier: report_wh - keyword: from - keyword: role - object_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: reference_usage - keyword: 'on' - keyword: database - object_reference: naked_identifier: database2 - keyword: from - keyword: share - object_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: REVOKE - keyword: OWNERSHIP - keyword: 'ON' - keyword: ROLE - object_reference: naked_identifier: TEST_ROLE - keyword: FROM - keyword: ROLE - object_reference: naked_identifier: DIFFERENT_ROLE - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: operate - keyword: 'on' - keyword: warehouse - object_reference: naked_identifier: report_wh - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: operate - keyword: 'on' - keyword: warehouse - object_reference: naked_identifier: report_wh - keyword: to - keyword: role - role_reference: naked_identifier: analyst - keyword: with - keyword: grant - keyword: option - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: myschema - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: privileges - keyword: 'on' - keyword: function - function_name: - naked_identifier: mydb - dot: . - naked_identifier: myschema - dot: . - function_name_identifier: add5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: all - keyword: privileges - keyword: 'on' - keyword: function - function_name: - naked_identifier: mydb - dot: . - naked_identifier: myschema - dot: . - function_name_identifier: add5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: string end_bracket: ) - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: procedure - function_name: - naked_identifier: mydb - dot: . - naked_identifier: myschema - dot: . - function_name_identifier: myprocedure - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: number end_bracket: ) - keyword: to - keyword: role - role_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: create - keyword: materialized - keyword: view - keyword: 'on' - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: myschema - keyword: to - keyword: role - role_reference: naked_identifier: myrole - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: select - comma: ',' - keyword: insert - keyword: 'on' - keyword: future - keyword: tables - keyword: in - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: myschema - keyword: to - keyword: role - role_reference: naked_identifier: role1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: future - keyword: schemas - keyword: in - keyword: database - object_reference: naked_identifier: mydb - keyword: to - keyword: role - role_reference: naked_identifier: role1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: database - object_reference: naked_identifier: database1 - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: schema - object_reference: - naked_identifier: database1 - dot: . - naked_identifier: schema1 - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: reference_usage - keyword: 'on' - keyword: database - object_reference: naked_identifier: database2 - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - keyword: view - object_reference: naked_identifier: view2 - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: database - object_reference: naked_identifier: mydb - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: public - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: function - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: shared_schema - dot: . - naked_identifier: function1 - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: public - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: usage - keyword: 'on' - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: shared_schema - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - keyword: view - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: shared_schema - dot: . - naked_identifier: view1 - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: select - keyword: 'on' - keyword: view - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: shared_schema - dot: . - naked_identifier: view3 - keyword: to - keyword: share - role_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: role - object_reference: naked_identifier: analyst - keyword: to - keyword: user - role_reference: naked_identifier: user1 - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: all - keyword: privileges - keyword: 'on' - keyword: procedure - function_name: function_name_identifier: clean_schema - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: string end_bracket: ) - keyword: from - keyword: role - object_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: all - keyword: privileges - keyword: 'on' - keyword: function - function_name: function_name_identifier: add5 - function_parameter_list: bracketed: start_bracket: ( data_type: data_type_identifier: string end_bracket: ) - keyword: from - keyword: role - object_reference: naked_identifier: analyst - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: select - keyword: 'on' - keyword: view - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: shared_schema - dot: . - naked_identifier: view1 - keyword: from - keyword: share - object_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: usage - keyword: 'on' - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: shared_schema - keyword: from - keyword: share - object_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: select - keyword: 'on' - keyword: all - keyword: tables - keyword: in - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: public - keyword: from - keyword: share - object_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: usage - keyword: 'on' - keyword: schema - object_reference: - naked_identifier: mydb - dot: . - naked_identifier: public - keyword: from - keyword: share - object_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: revoke - keyword: usage - keyword: 'on' - keyword: database - object_reference: naked_identifier: mydb - keyword: from - keyword: share - object_reference: naked_identifier: share1 - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: apply - keyword: masking - keyword: policy - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: apply - keyword: row - keyword: access - keyword: policy - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: apply - keyword: session - keyword: policy - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: apply - keyword: tag - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: attach - keyword: policy - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: execute - keyword: task - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: import - keyword: share - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: manage - keyword: grants - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: monitor - keyword: execution - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: monitor - keyword: usage - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: override - keyword: share - keyword: restrictions - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: create - keyword: account - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: create - keyword: share - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: create - keyword: network - keyword: policy - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: grant - keyword: create - keyword: data - keyword: exchange - keyword: listing - keyword: 'on' - keyword: account - keyword: to - keyword: role - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MANAGE - keyword: ACCOUNT - keyword: SUPPORT - keyword: CASES - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MANAGE - keyword: ORGANIZATION - keyword: SUPPORT - keyword: CASES - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: my_role - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: MANAGE - keyword: USER - keyword: SUPPORT - keyword: CASES - keyword: 'ON' - keyword: ACCOUNT - keyword: TO - keyword: ROLE - role_reference: naked_identifier: my_role - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/group_by_all.sql000066400000000000000000000001471451700765000251620ustar00rootroot00000000000000select state, city, sum(retail_price * quantity) as gross_revenue from sales group by all; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/group_by_all.yml000066400000000000000000000030051451700765000251600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12f9880b53f72cb92330ed22b593972385f50a472deb2febd34721e86676ad29 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: state - comma: ',' - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: - column_reference: naked_identifier: retail_price - binary_operator: '*' - column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: as naked_identifier: gross_revenue from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sales groupby_clause: - keyword: group - keyword: by - keyword: all statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/identifier_pseudo_function.sql000066400000000000000000000016601451700765000301130ustar00rootroot00000000000000-- https://docs.snowflake.com/en/sql-reference/identifier-literal.html -- Although IDENTIFIER(...) uses the syntax of a function, it is not a true function and is not returned by commands such as SHOW FUNCTIONS. USE SCHEMA identifier('my_schema'); USE SCHEMA identifier('{{ params.schema_name }}'); create or replace database identifier('my_db'); create or replace schema identifier('my_schema'); create or replace table identifier('my_db.my_schema.my_table') (c1 number); create or replace table identifier('"my_table"') (c1 number); show tables in schema identifier('my_schema'); use schema identifier($schema_name); insert into identifier($table_name) values (1), (2), (3); select * from identifier($table_name) order by 1; select * from identifier('my_table') order by 1; select speed_of_light(); select identifier($my_function_name)(); select identifier('my_function_name')(); select identifier('my_function_name')(1, 2, 3); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/identifier_pseudo_function.yml000066400000000000000000000156351451700765000301240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bdd7d356ea2c7ef3bc8091e3cc967c8129b2ef875e37027755b9529a777828c4 file: - statement: use_statement: - keyword: USE - keyword: SCHEMA - schema_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_schema'" end_bracket: ) - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SCHEMA - schema_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'{{ params.schema_name }}'" end_bracket: ) - statement_terminator: ; - statement: create_statement: - keyword: create - keyword: or - keyword: replace - keyword: database - object_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_db'" end_bracket: ) - statement_terminator: ; - statement: create_schema_statement: - keyword: create - keyword: or - keyword: replace - keyword: schema - schema_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_schema'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: table - table_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_db.my_schema.my_table'" end_bracket: ) - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: create - keyword: or - keyword: replace - keyword: table - table_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'\"my_table\"'" end_bracket: ) - bracketed: start_bracket: ( column_definition: naked_identifier: c1 data_type: data_type_identifier: number end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: show - keyword: tables - keyword: in - keyword: schema - object_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_schema'" end_bracket: ) - statement_terminator: ; - statement: use_statement: - keyword: use - keyword: schema - schema_reference: keyword: identifier bracketed: start_bracket: ( variable: $schema_name end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: into - table_reference: keyword: identifier bracketed: start_bracket: ( variable: $table_name end_bracket: ) - values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: keyword: identifier bracketed: start_bracket: ( variable: $table_name end_bracket: ) orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_table'" end_bracket: ) orderby_clause: - keyword: order - keyword: by - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: speed_of_light bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: identifier bracketed: start_bracket: ( variable: $my_function_name end_bracket: ) bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_function_name'" end_bracket: ) bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: identifier bracketed: start_bracket: ( quoted_identifier: "'my_function_name'" end_bracket: ) bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/inline_comment.sql000066400000000000000000000004101451700765000254750ustar00rootroot00000000000000# Classic Inline Comment SELECT 1; -- Classic Inline Comment SELECT 1; # Classic Inline Comment SELECT 1; //Snowflake Inline Comment SELECT 1;-- Classic Inline Comment No Space SELECT 1;# Classic Inline Comment No Space SELECT 1//Snowflake Inline Comment No Space sqlfluff-2.3.5/test/fixtures/dialects/snowflake/inline_comment.yml000066400000000000000000000025401451700765000255050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 64034404eb64dded43239909619cb8fc6801998aac47c42eeb646d053fb5c467 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/snowflake/insert.sql000066400000000000000000000024751451700765000240160ustar00rootroot00000000000000-- Single table INSERT INTO INSERT INTO foo (bar) VALUES(current_timestamp); INSERT OVERWRITE INTO foo (bar) VALUES(current_timestamp); INSERT INTO foo (bar, baz) VALUES(1, 2), (3, 4); INSERT INTO foo (bar) VALUES(DEFAULT); INSERT INTO foo (bar) VALUES(NULL); INSERT INTO films SELECT * FROM tmp_films WHERE date_prod < '2004-05-07'; -- Unconditional multi-table INSERT INTO insert all into t1 into t1 (c1, c2, c3) values (n2, n1, default) into t2 (c1, c2, c3) into t2 values (n3, n2, n1) select n1, n2, n3 from src; insert overwrite all into t1 into t1 (c1, c2, c3) values (n2, n1, default) into t2 (c1, c2, c3) into t2 values (n3, n2, n1) select n1, n2, n3 from src; insert all into t1 values ($1, an_alias, "10 + 20") select 1, 50 as an_alias, 10 + 20; insert all into t1 values (key, a) select src1.key as key, src1.a as a from src1, src2 where src1.key = src2.key; -- Conditional multi-table INSERT INTO insert all when n1 > 100 then into t1 when n1 > 10 then into t1 into t2 else into t2 select n1 from src; insert first when n1 > 100 then into t1 when n1 > 10 then into t1 into t2 else into t2 select n1 from src; insert all when c > 10 then into t1 (col1, col2) values (a, b) select a, b, c from src; INSERT INTO foo.bar ( SELECT foo.bar ); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/insert.yml000066400000000000000000000372321451700765000240170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a6ea753bffd94b5fd9c7c5829eca5a08b7a926134b1cf9510805c219aae77f38 file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( expression: bare_function: current_timestamp end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_reference: naked_identifier: bar - comma: ',' - column_reference: naked_identifier: baz - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( keyword: DEFAULT end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_reference: naked_identifier: bar end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( keyword: 'NULL' end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: films - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tmp_films where_clause: keyword: WHERE expression: column_reference: naked_identifier: date_prod comparison_operator: raw_comparison_operator: < quoted_literal: "'2004-05-07'" - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: into - table_reference: naked_identifier: t1 - keyword: into - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: n2 - comma: ',' - expression: column_reference: naked_identifier: n1 - comma: ',' - keyword: default - end_bracket: ) - keyword: into - table_reference: naked_identifier: t2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - keyword: into - table_reference: naked_identifier: t2 - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: n3 - comma: ',' - expression: column_reference: naked_identifier: n2 - comma: ',' - expression: column_reference: naked_identifier: n1 - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: n1 - comma: ',' - select_clause_element: column_reference: naked_identifier: n2 - comma: ',' - select_clause_element: column_reference: naked_identifier: n3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: overwrite - keyword: all - keyword: into - table_reference: naked_identifier: t1 - keyword: into - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: n2 - comma: ',' - expression: column_reference: naked_identifier: n1 - comma: ',' - keyword: default - end_bracket: ) - keyword: into - table_reference: naked_identifier: t2 - bracketed: - start_bracket: ( - column_reference: naked_identifier: c1 - comma: ',' - column_reference: naked_identifier: c2 - comma: ',' - column_reference: naked_identifier: c3 - end_bracket: ) - keyword: into - table_reference: naked_identifier: t2 - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: n3 - comma: ',' - expression: column_reference: naked_identifier: n2 - comma: ',' - expression: column_reference: naked_identifier: n1 - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: n1 - comma: ',' - select_clause_element: column_reference: naked_identifier: n2 - comma: ',' - select_clause_element: column_reference: naked_identifier: n3 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: into - table_reference: naked_identifier: t1 - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: column_index_identifier_segment: $1 - comma: ',' - expression: column_reference: naked_identifier: an_alias - comma: ',' - expression: column_reference: quoted_identifier: '"10 + 20"' - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '50' alias_expression: keyword: as naked_identifier: an_alias - comma: ',' - select_clause_element: expression: - numeric_literal: '10' - binary_operator: + - numeric_literal: '20' - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: into - table_reference: naked_identifier: t1 - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: key - comma: ',' - expression: column_reference: naked_identifier: a - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: src1 - dot: . - naked_identifier: key alias_expression: keyword: as naked_identifier: key - comma: ',' - select_clause_element: column_reference: - naked_identifier: src1 - dot: . - naked_identifier: a alias_expression: keyword: as naked_identifier: a from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src1 - comma: ',' - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src2 where_clause: keyword: where expression: - column_reference: - naked_identifier: src1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: when - expression: column_reference: naked_identifier: n1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - keyword: when - expression: column_reference: naked_identifier: n1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - keyword: into - table_reference: naked_identifier: t2 - keyword: else - keyword: into - table_reference: naked_identifier: t2 - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: n1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: first - keyword: when - expression: column_reference: naked_identifier: n1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - keyword: when - expression: column_reference: naked_identifier: n1 comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - keyword: into - table_reference: naked_identifier: t2 - keyword: else - keyword: into - table_reference: naked_identifier: t2 - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: n1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: insert - keyword: all - keyword: when - expression: column_reference: naked_identifier: c comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - keyword: then - keyword: into - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - values_clause: keyword: values bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: src - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: foo - dot: . - naked_identifier: bar - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: foo - dot: . - naked_identifier: bar end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/json_underscore_key.sql000066400000000000000000000000351451700765000265520ustar00rootroot00000000000000select x.y:_z::string from x sqlfluff-2.3.5/test/fixtures/dialects/snowflake/json_underscore_key.yml000066400000000000000000000021151451700765000265550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8a5489149e51eeeb71a5fed628312d609c54af92e23a45bd9c812272df2568f5 file: statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: column_reference: - naked_identifier: x - dot: . - naked_identifier: y semi_structured_expression: colon: ':' semi_structured_element: _z casting_operator: '::' data_type: data_type_identifier: string from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: x sqlfluff-2.3.5/test/fixtures/dialects/snowflake/lateral_flatten_after_join.sql000066400000000000000000000004151451700765000300430ustar00rootroot00000000000000select value as p_id, name, iff( rank() over ( partition by id order by t_id desc ) = 1 , true, false ) as most_recent from a inner join b on (b.c_id = a.c_id) , lateral flatten (input => b.cool_ids) sqlfluff-2.3.5/test/fixtures/dialects/snowflake/lateral_flatten_after_join.yml000066400000000000000000000075641451700765000300610ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2afc5679e2f6222a23b44a89c5bbce53d71f5869fd4b0e4f4eb791dc3bcdbd61 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: value alias_expression: keyword: as naked_identifier: p_id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: iff bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: rank bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: id orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: t_id - keyword: desc end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - comma: ',' - expression: boolean_literal: 'true' - comma: ',' - expression: boolean_literal: 'false' - end_bracket: ) alias_expression: keyword: as naked_identifier: most_recent from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: a join_clause: - keyword: inner - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: b - join_on_condition: keyword: 'on' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: b - dot: . - naked_identifier: c_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a - dot: . - naked_identifier: c_id end_bracket: ) - comma: ',' - from_expression: from_expression_element: keyword: lateral table_expression: function: function_name: function_name_identifier: flatten bracketed: start_bracket: ( snowflake_keyword_expression: parameter: input parameter_assigner: => column_reference: - naked_identifier: b - dot: . - naked_identifier: cool_ids end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/snowflake/let.sql000066400000000000000000000010501451700765000232620ustar00rootroot00000000000000begin -- variable based let somevariable := 5; let somevariable number(38, 0) := 5; let somevariable number(38, 0) default 5; let somevariable default 5; -- variable reassignment somevariable := 5; -- cursor based let somevariable cursor for select some_col from some_database.schema.some_table; let somevariable cursor for somevariable; let someresult resultset := (select some_col from some_database.schema.some_table); -- resultset reassignment someresult := (select SOME_COL from some_database.schema.some_table); end; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/let.yml000066400000000000000000000111661451700765000232750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cebbdec1262f1c649e6a473626c05bb44a8f96d50daf31c8b46e03e24213a7d5 file: - statement: scripting_block_statement: keyword: begin statement: scripting_let_statement: keyword: let variable: somevariable assignment_operator: := expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: keyword: let variable: somevariable data_type: data_type_identifier: number bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) assignment_operator: := expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: - keyword: let - variable: somevariable - data_type: data_type_identifier: number bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '38' - comma: ',' - numeric_literal: '0' - end_bracket: ) - keyword: default - expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: - keyword: let - variable: somevariable - keyword: default - expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: variable: somevariable assignment_operator: := expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_let_statement: - keyword: let - variable: somevariable - keyword: cursor - keyword: for - select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: some_col from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: some_database - dot: . - naked_identifier: schema - dot: . - naked_identifier: some_table - statement_terminator: ; - statement: scripting_let_statement: - keyword: let - variable: somevariable - keyword: cursor - keyword: for - variable: somevariable - statement_terminator: ; - statement: scripting_let_statement: keyword: let variable: someresult data_type: data_type_identifier: resultset assignment_operator: := expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: some_col from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: some_database - dot: . - naked_identifier: schema - dot: . - naked_identifier: some_table end_bracket: ) - statement_terminator: ; - statement: scripting_let_statement: variable: someresult assignment_operator: := expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: SOME_COL from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: some_database - dot: . - naked_identifier: schema - dot: . - naked_identifier: some_table end_bracket: ) - statement_terminator: ; - statement: scripting_block_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/limit.sql000066400000000000000000000014431451700765000236220ustar00rootroot00000000000000select c1 from testtable order by c1 limit 3; select c1 from testtable order by c1 limit 3 offset 3; select * from demo1 order by i limit null offset null; select * from demo1 order by i limit '' offset ''; select * from demo1 order by i limit $$$$ offset $$$$; select c1 from testtable order by c1 fetch 3; select c1 from testtable order by c1 fetch first 3; select c1 from testtable order by c1 fetch next 3; select c1 from testtable order by c1 fetch 1 row; select c1 from testtable order by c1 fetch 3 rows; select c1 from testtable order by c1 fetch 3 only; select c1 from testtable order by c1 offset 3 fetch 3; select c1 from testtable order by c1 offset 1 row fetch 1 row; select c1 from testtable order by c1 offset 3 rows fetch 3 rows; select c1 from testtable offset 3 fetch 3; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/limit.yml000066400000000000000000000232761451700765000236340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6847a028964c21715a4a2028dda8526e55c1486313a7de70e74b84c54a015d59 file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: keyword: limit numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: limit - numeric_literal: '3' - keyword: offset - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: demo1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: i limit_clause: - keyword: limit - keyword: 'null' - keyword: offset - keyword: 'null' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: demo1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: i limit_clause: - keyword: limit - quoted_literal: "''" - keyword: offset - quoted_literal: "''" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: demo1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: i limit_clause: - keyword: limit - quoted_literal: $$$$ - keyword: offset - quoted_literal: $$$$ - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: keyword: fetch numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - keyword: first - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - keyword: next - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - numeric_literal: '1' - keyword: row - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - numeric_literal: '3' - keyword: rows - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: fetch - numeric_literal: '3' - keyword: only - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: offset - numeric_literal: '3' - keyword: fetch - numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: offset - numeric_literal: '1' - keyword: row - keyword: fetch - numeric_literal: '1' - keyword: row - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: c1 limit_clause: - keyword: offset - numeric_literal: '3' - keyword: rows - keyword: fetch - numeric_literal: '3' - keyword: rows - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: c1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: testtable limit_clause: - keyword: offset - numeric_literal: '3' - keyword: fetch - numeric_literal: '3' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/list_statement.sql000066400000000000000000000002101451700765000255320ustar00rootroot00000000000000list @%mytable; list @mystage/path1; list @%mytable pattern='.*data_0.*'; list @my_csv_stage/analysis/ pattern='.*data_0.*'; ls @~; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/list_statement.yml000066400000000000000000000022551451700765000255470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8c2304cccb04982c6abb2941c7c46fdda339d134893c5970f249015f75f04b34 file: - statement: list_statement: keyword: list stage_path: '@%mytable' - statement_terminator: ; - statement: list_statement: keyword: list stage_path: '@mystage/path1' - statement_terminator: ; - statement: list_statement: - keyword: list - stage_path: '@%mytable' - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*data_0.*'" - statement_terminator: ; - statement: list_statement: - keyword: list - stage_path: '@my_csv_stage/analysis/' - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*data_0.*'" - statement_terminator: ; - statement: list_statement: keyword: ls stage_path: '@~' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/match_recognize.sql000066400000000000000000000121251451700765000256440ustar00rootroot00000000000000-- Examples from snowflake docs. select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern(row_before_decrease row_with_price_decrease+ row_with_price_increase+) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; select price_date, match_number, msq, price, cl from (select * from stock_price_history where company='ABCD') match_recognize( order by price_date measures match_number() as "MATCH_NUMBER", match_sequence_number() as msq, classifier() as cl all rows per match pattern(any_row up+) define any_row as true, up as price > lag(price) ) order by match_number, msq; select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as "MATCH_NUMBER" all rows per match omit empty matches pattern(overavg*) define overavg as price > avg(price) over (rows between unbounded preceding and unbounded following) ) order by company, price_date; select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as "MATCH_NUMBER", classifier() as cl all rows per match with unmatched rows pattern(overavg+) define overavg as price > avg(price) over (rows between unbounded preceding and unbounded following) ) order by company, price_date; select company, price_date, price, "FINAL FIRST(LT45.price)", "FINAL LAST(LT45.price)" from stock_price_history match_recognize ( partition by company order by price_date measures final first(lt45.price) as "FINAL FIRST(LT45.price)", final last(lt45.price) as "FINAL LAST(LT45.price)" all rows per match after match skip past last row pattern (lt45 lt45) define lt45 as price < 45.00 ) where company = 'ABCD' order by price_date; -- Testing all quantifiers. select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern(^ S1+ S2* S3? S4{1} S5{1,} S6{,1} S7{1,1} S8*? $) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; -- Testing operators. select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern(^ ( S1 | S2* )? S3 PERMUTE(S4+, S5*?) {- S6 -}+ $) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern((A {- B+ C+ -} D+)) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; select * from stock_price_history match_recognize( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern((A | B){5} C+) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/match_recognize.yml000066400000000000000000001346511451700765000256570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1bb2154575c562e5c462b3970a44c2698ad7c5563132e74f508ce8661e73cdca file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - naked_identifier: row_before_decrease - naked_identifier: row_with_price_decrease - sign_indicator: + - naked_identifier: row_with_price_increase - sign_indicator: + end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: price_date - comma: ',' - select_clause_element: column_reference: naked_identifier: match_number - comma: ',' - select_clause_element: column_reference: naked_identifier: msq - comma: ',' - select_clause_element: column_reference: naked_identifier: price - comma: ',' - select_clause_element: column_reference: naked_identifier: cl from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history where_clause: keyword: where expression: column_reference: naked_identifier: company comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ABCD'" end_bracket: ) match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as quoted_identifier: '"MATCH_NUMBER"' - comma: ',' - expression: function: function_name: function_name_identifier: match_sequence_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as naked_identifier: msq - comma: ',' - expression: function: function_name: function_name_identifier: classifier bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as naked_identifier: cl - keyword: all - keyword: rows - keyword: per - keyword: match - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - naked_identifier: any_row - naked_identifier: up - sign_indicator: + end_bracket: ) - keyword: define - naked_identifier: any_row - keyword: as - expression: boolean_literal: 'true' - comma: ',' - naked_identifier: up - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: match_number - comma: ',' - column_reference: naked_identifier: msq - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as quoted_identifier: '"MATCH_NUMBER"' - keyword: all - keyword: rows - keyword: per - keyword: match - keyword: omit - keyword: empty - keyword: matches - keyword: pattern - bracketed: start_bracket: ( pattern_expression: naked_identifier: overavg star: '*' end_bracket: ) - keyword: define - naked_identifier: overavg - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: frame_clause: - keyword: rows - keyword: between - keyword: unbounded - keyword: preceding - keyword: and - keyword: unbounded - keyword: following end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: price_date - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as quoted_identifier: '"MATCH_NUMBER"' - comma: ',' - expression: function: function_name: function_name_identifier: classifier bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as naked_identifier: cl - keyword: all - keyword: rows - keyword: per - keyword: match - keyword: with - keyword: unmatched - keyword: rows - keyword: pattern - bracketed: start_bracket: ( pattern_expression: naked_identifier: overavg sign_indicator: + end_bracket: ) - keyword: define - naked_identifier: overavg - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: frame_clause: - keyword: rows - keyword: between - keyword: unbounded - keyword: preceding - keyword: and - keyword: unbounded - keyword: following end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: price_date - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: company - comma: ',' - select_clause_element: column_reference: naked_identifier: price_date - comma: ',' - select_clause_element: column_reference: naked_identifier: price - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"FINAL FIRST(LT45.price)"' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '"FINAL LAST(LT45.price)"' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - keyword: final - expression: function: function_name: function_name_identifier: first bracketed: start_bracket: ( expression: column_reference: - naked_identifier: lt45 - dot: . - naked_identifier: price end_bracket: ) - alias_expression: keyword: as quoted_identifier: '"FINAL FIRST(LT45.price)"' - comma: ',' - keyword: final - expression: function: function_name: function_name_identifier: last bracketed: start_bracket: ( expression: column_reference: - naked_identifier: lt45 - dot: . - naked_identifier: price end_bracket: ) - alias_expression: keyword: as quoted_identifier: '"FINAL LAST(LT45.price)"' - keyword: all - keyword: rows - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: past - keyword: last - keyword: row - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - naked_identifier: lt45 - naked_identifier: lt45 end_bracket: ) - keyword: define - naked_identifier: lt45 - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < numeric_literal: '45.00' - end_bracket: ) where_clause: keyword: where expression: column_reference: naked_identifier: company comparison_operator: raw_comparison_operator: '=' quoted_literal: "'ABCD'" orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - caret: ^ - naked_identifier: S1 - sign_indicator: + - naked_identifier: S2 - star: '*' - naked_identifier: S3 - question_mark: '?' - naked_identifier: S4 - start_curly_bracket: '{' - numeric_literal: '1' - end_curly_bracket: '}' - naked_identifier: S5 - start_curly_bracket: '{' - numeric_literal: '1' - comma: ',' - end_curly_bracket: '}' - naked_identifier: S6 - start_curly_bracket: '{' - comma: ',' - numeric_literal: '1' - end_curly_bracket: '}' - naked_identifier: S7 - start_curly_bracket: '{' - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_curly_bracket: '}' - naked_identifier: S8 - star: '*' - question_mark: '?' - dollar: $ end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: - caret: ^ - bracketed: - start_bracket: ( - naked_identifier: S1 - binary_operator: pipe: '|' - naked_identifier: S2 - star: '*' - end_bracket: ) - question_mark: '?' - naked_identifier: S3 - keyword: PERMUTE - bracketed: - start_bracket: ( - naked_identifier: S4 - sign_indicator: + - comma: ',' - naked_identifier: S5 - star: '*' - question_mark: '?' - end_bracket: ) - bracketed: start_exclude_bracket: '{-' naked_identifier: S6 end_exclude_bracket: -} - sign_indicator: + - dollar: $ end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: bracketed: - start_bracket: ( - naked_identifier: A - bracketed: - start_exclude_bracket: '{-' - naked_identifier: B - sign_indicator: + - naked_identifier: C - sign_indicator: + - end_exclude_bracket: -} - naked_identifier: D - sign_indicator: + - end_bracket: ) end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: stock_price_history match_recognize_clause: keyword: match_recognize bracketed: - start_bracket: ( - partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: company - orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: price_date - keyword: measures - expression: function: function_name: function_name_identifier: match_number bracketed: start_bracket: ( end_bracket: ) - alias_expression: keyword: as naked_identifier: match_number - comma: ',' - expression: function: function_name: function_name_identifier: first bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: start_date - comma: ',' - expression: function: function_name: function_name_identifier: last bracketed: start_bracket: ( expression: column_reference: naked_identifier: price_date end_bracket: ) - alias_expression: keyword: as naked_identifier: end_date - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: rows_in_sequence - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_decrease dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_decreases - comma: ',' - expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( expression: naked_identifier: row_with_price_increase dot: . star: '*' end_bracket: ) - alias_expression: keyword: as naked_identifier: num_increases - keyword: one - keyword: row - keyword: per - keyword: match - keyword: after - keyword: match - keyword: skip - keyword: to - keyword: last - naked_identifier: row_with_price_increase - keyword: pattern - bracketed: start_bracket: ( pattern_expression: bracketed: - start_bracket: ( - naked_identifier: A - binary_operator: pipe: '|' - naked_identifier: B - end_bracket: ) start_curly_bracket: '{' numeric_literal: '5' end_curly_bracket: '}' naked_identifier: C sign_indicator: + end_bracket: ) - keyword: define - naked_identifier: row_with_price_decrease - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: < function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - comma: ',' - naked_identifier: row_with_price_increase - keyword: as - expression: column_reference: naked_identifier: price comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: lag bracketed: start_bracket: ( expression: column_reference: naked_identifier: price end_bracket: ) - end_bracket: ) orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: company - comma: ',' - column_reference: naked_identifier: match_number - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/merge_into.sql000066400000000000000000000007561451700765000246420ustar00rootroot00000000000000ALTER TABLE xxxx.example_table MODIFY COLUMN employeeCode SET MASKING POLICY example_MASKING_POLICY; merge into target_table using source_table on target_table.id = source_table.id when matched then update set target_table.description = source_table.description; merge into t1 using t2 on t1.t1key = t2.t2key when matched and t2.marked = 1 then delete; merge into t1 using t2 on t1.t1key = t2.t2key when not matched and t2.marked = 1 then insert (marked) values (1); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/merge_into.yml000066400000000000000000000112021451700765000246300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4fc9381cf19925291a0cb4b9a028ac88f1518d8d63395677b1d2a1cbfdd6da1a file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: xxxx - dot: . - naked_identifier: example_table - alter_table_table_column_action: - keyword: MODIFY - keyword: COLUMN - column_reference: naked_identifier: employeeCode - keyword: SET - keyword: MASKING - keyword: POLICY - function_name: function_name_identifier: example_MASKING_POLICY - statement_terminator: ; - statement: merge_statement: - keyword: merge - keyword: into - table_reference: naked_identifier: target_table - keyword: using - table_reference: naked_identifier: source_table - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: target_table - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source_table - dot: . - naked_identifier: id - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: keyword: set set_clause: - column_reference: - naked_identifier: target_table - dot: . - naked_identifier: description - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: source_table - dot: . - naked_identifier: description - statement_terminator: ; - statement: merge_statement: - keyword: merge - keyword: into - table_reference: naked_identifier: t1 - keyword: using - table_reference: naked_identifier: t2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: t1key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: t2key - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: and - expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: marked comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: then - merge_delete_clause: keyword: delete - statement_terminator: ; - statement: merge_statement: - keyword: merge - keyword: into - table_reference: naked_identifier: t1 - keyword: using - table_reference: naked_identifier: t2 - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: t1key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: t2key - merge_match: merge_when_not_matched_clause: - keyword: when - keyword: not - keyword: matched - keyword: and - expression: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: marked comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: then - merge_insert_clause: keyword: insert bracketed: start_bracket: ( column_reference: naked_identifier: marked end_bracket: ) values_clause: keyword: values bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/multiple_shorthand_casts.sql000066400000000000000000000002041451700765000276000ustar00rootroot00000000000000select '1'::int::boolean as bool; update table_name set col1 = CURRENT_TIMESTAMP::TIMESTAMP_TZ, col2 = '1'::int::boolean ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/multiple_shorthand_casts.yml000066400000000000000000000036271451700765000276160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d4da930e99a348f0e3aedda22c084498cab4794b1c3d2620880d13ea47cfe5b4 file: - statement: select_statement: select_clause: keyword: select select_clause_element: expression: cast_expression: - quoted_literal: "'1'" - casting_operator: '::' - data_type: data_type_identifier: int - casting_operator: '::' - data_type: data_type_identifier: boolean alias_expression: keyword: as naked_identifier: bool - statement_terminator: ; - statement: update_statement: keyword: update table_reference: naked_identifier: table_name set_clause_list: - keyword: set - set_clause: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' expression: cast_expression: bare_function: CURRENT_TIMESTAMP casting_operator: '::' data_type: data_type_identifier: TIMESTAMP_TZ - comma: ',' - set_clause: column_reference: naked_identifier: col2 comparison_operator: raw_comparison_operator: '=' expression: cast_expression: - quoted_literal: "'1'" - casting_operator: '::' - data_type: data_type_identifier: int - casting_operator: '::' - data_type: data_type_identifier: boolean - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/non_reserved_keywords.sql000066400000000000000000000002141451700765000271170ustar00rootroot00000000000000SELECT account FROM foo; CREATE TABLE IF NOT EXISTS table_name( organization VARCHAR ); with pivot as (select 1) select * from pivot; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/non_reserved_keywords.yml000066400000000000000000000037671451700765000271410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 239f6fcece51a5dbd7ce49cb7468efeb74f8a4d75d4daf47bd322281e85db407 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: account from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_name - bracketed: start_bracket: ( column_definition: naked_identifier: organization data_type: data_type_identifier: VARCHAR end_bracket: ) - statement_terminator: ; - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: pivot keyword: as bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pivot - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/object_literals.sql000066400000000000000000000001071451700765000256450ustar00rootroot00000000000000SELECT {'a': 1, 'b': 'foo', 'c': 4 + 5, 'd': some_column_ref} FROM foo sqlfluff-2.3.5/test/fixtures/dialects/snowflake/object_literals.yml000066400000000000000000000030161451700765000256510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 400fa940a29b0a3210e974e81d6373cb8dd89c4bbb6ff6b5ad8549c269279a90 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: object_literal: - start_curly_bracket: '{' - object_literal_element: quoted_literal: "'a'" colon: ':' numeric_literal: '1' - comma: ',' - object_literal_element: - quoted_literal: "'b'" - colon: ':' - quoted_literal: "'foo'" - comma: ',' - object_literal_element: quoted_literal: "'c'" colon: ':' expression: - numeric_literal: '4' - binary_operator: + - numeric_literal: '5' - comma: ',' - object_literal_element: quoted_literal: "'d'" colon: ':' column_reference: naked_identifier: some_column_ref - end_curly_bracket: '}' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo sqlfluff-2.3.5/test/fixtures/dialects/snowflake/pivot.sql000066400000000000000000000005301451700765000236410ustar00rootroot00000000000000-- NB This is a pivot expression With and Alias. The alias should be parsed seperately to the pivot. SELECT * FROM my_tbl PIVOT (min(f_val) FOR f_id IN (1, 2)) AS f (a, b); SELECT * FROM my_tbl UNPIVOT (val FOR col_name IN (a, b)); select * from table_a unpivot (a for b in (col_1, col_2, col_3)) unpivot (c for d in (col_a, col_b, col_c)) ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/pivot.yml000066400000000000000000000102161451700765000236450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 57ff3afe4fe78cf0a36a3a3a561a8ebd0578fbb29eb444fdb7f84cfcd389cca3 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl from_pivot_expression: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: min bracketed: start_bracket: ( expression: column_reference: naked_identifier: f_val end_bracket: ) - keyword: FOR - naked_identifier: f_id - keyword: IN - bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - end_bracket: ) alias_expression: keyword: AS naked_identifier: f bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_tbl from_unpivot_expression: keyword: UNPIVOT bracketed: - start_bracket: ( - naked_identifier: val - keyword: FOR - naked_identifier: col_name - keyword: IN - bracketed: - start_bracket: ( - naked_identifier: a - comma: ',' - naked_identifier: b - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: table_a - from_unpivot_expression: keyword: unpivot bracketed: - start_bracket: ( - naked_identifier: a - keyword: for - naked_identifier: b - keyword: in - bracketed: - start_bracket: ( - naked_identifier: col_1 - comma: ',' - naked_identifier: col_2 - comma: ',' - naked_identifier: col_3 - end_bracket: ) - end_bracket: ) - from_unpivot_expression: keyword: unpivot bracketed: - start_bracket: ( - naked_identifier: c - keyword: for - naked_identifier: d - keyword: in - bracketed: - start_bracket: ( - naked_identifier: col_a - comma: ',' - naked_identifier: col_b - comma: ',' - naked_identifier: col_c - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/put_statement.sql000066400000000000000000000006551451700765000254040ustar00rootroot00000000000000put file:///tmp/data/mydata.csv @my_int_stage; put file:///tmp/data/orders_001.csv @%orderstiny_ext auto_compress=false; put file:///tmp/data/orders_*01.csv @%orderstiny_ext auto_compress=false; put file://c:\temp\data\mydata.csv @~ auto_compress=true; put file://c:\temp\data\mydata.csv @~ parallel=1; put file://c:\temp\data\mydata.csv @~ source_compression='auto_detect'; put file://c:\temp\data\mydata.csv @~ overwrite=true; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/put_statement.yml000066400000000000000000000043421451700765000254030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1bad013dedbfea5e554ea3f923c67d68c17ace2d864143f106729147b9a966e5 file: - statement: put_statement: keyword: put unquoted_file_path: file:///tmp/data/mydata.csv stage_path: '@my_int_stage' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file:///tmp/data/orders_001.csv - stage_path: '@%orderstiny_ext' - keyword: auto_compress - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file:///tmp/data/orders_*01.csv - stage_path: '@%orderstiny_ext' - keyword: auto_compress - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'false' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file://c:\temp\data\mydata.csv - stage_path: '@~' - keyword: auto_compress - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file://c:\temp\data\mydata.csv - stage_path: '@~' - keyword: parallel - comparison_operator: raw_comparison_operator: '=' - integer_literal: '1' - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file://c:\temp\data\mydata.csv - stage_path: '@~' - keyword: source_compression - comparison_operator: raw_comparison_operator: '=' - compression_type: "'auto_detect'" - statement_terminator: ; - statement: put_statement: - keyword: put - unquoted_file_path: file://c:\temp\data\mydata.csv - stage_path: '@~' - keyword: overwrite - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'true' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/qualify.sql000066400000000000000000000001521451700765000241520ustar00rootroot00000000000000select col1, col2 from some_table qualify row_number() over (partition by col1 order by col1) = 1 sqlfluff-2.3.5/test/fixtures/dialects/snowflake/qualify.yml000066400000000000000000000034471451700765000241660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d3c6c019479ddab3b31950d7d036b1a4d9694dd8eb73d5c55673508369f4852d file: statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table qualify_clause: keyword: qualify expression: function: function_name: function_name_identifier: row_number bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: col1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/snowflake/qualify_union.sql000066400000000000000000000003361451700765000253660ustar00rootroot00000000000000select col1, col2 from some_table qualify row_number() over (partition by col1 order by col1) = 1 union all select col1, col2 from some_table qualify row_number() over (partition by col1 order by col1) = 1 sqlfluff-2.3.5/test/fixtures/dialects/snowflake/qualify_union.yml000066400000000000000000000067031451700765000253740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6c296b4ef1b5c85ed1f47862ae387fe5da2ce0695f429a9e9509155f5766fe8 file: statement: set_expression: - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table qualify_clause: keyword: qualify expression: function: function_name: function_name_identifier: row_number bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: col1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - set_operator: - keyword: union - keyword: all - select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table qualify_clause: keyword: qualify expression: function: function_name: function_name_identifier: row_number bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: col1 orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: col1 end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/snowflake/remove_statement.sql000066400000000000000000000002631451700765000260640ustar00rootroot00000000000000rm @%mytable/myobject; rm @%mytable/myobject/; remove @mystage/path1/subpath2; remove @%orders; rm @~ pattern='.*jun.*'; REMOVE @foo.bar PATTERN = '\w'; RM @foo.foo PATTERN=$bar; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/remove_statement.yml000066400000000000000000000030021451700765000260600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 473c65321c316f23d9a239e9fb8073e1434e4e3e6068a33fc762c5efb3756ad8 file: - statement: remove_statement: keyword: rm stage_path: '@%mytable/myobject' - statement_terminator: ; - statement: remove_statement: keyword: rm stage_path: '@%mytable/myobject/' - statement_terminator: ; - statement: remove_statement: keyword: remove stage_path: '@mystage/path1/subpath2' - statement_terminator: ; - statement: remove_statement: keyword: remove stage_path: '@%orders' - statement_terminator: ; - statement: remove_statement: - keyword: rm - stage_path: '@~' - keyword: pattern - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'.*jun.*'" - statement_terminator: ; - statement: remove_statement: - keyword: REMOVE - stage_path: '@foo.bar' - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\\w'" - statement_terminator: ; - statement: remove_statement: - keyword: RM - stage_path: '@foo.foo' - keyword: PATTERN - comparison_operator: raw_comparison_operator: '=' - variable: $bar - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/return.sql000066400000000000000000000000511451700765000240150ustar00rootroot00000000000000begin select 1; select 2; return 5; end; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/return.yml000066400000000000000000000020161451700765000240220ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9a088da7bbc56ac35ac5889d6b880afbd85daea734461fe88353e97860cb5599 file: - statement: scripting_block_statement: keyword: begin statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' - statement_terminator: ; - statement: return_statement: keyword: return expression: numeric_literal: '5' - statement_terminator: ; - statement: scripting_block_statement: keyword: end - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/sample.sql000066400000000000000000000004561451700765000237700ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/547 select * -- 20% sample from real_data sample (20) ; SET sample_size = 10; WITH dummy_data AS ( SELECT SEQ4() AS row_number FROM TABLE(GENERATOR(rowcount => 1000)) ORDER BY row_number ) SELECT * FROM dummy_data SAMPLE ($sample_size ROWS); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/sample.yml000066400000000000000000000071231451700765000237700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8fbecdbf9ac58a9c95d293d289de060182d5029064ef777bf3ef52f139d59d91 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: real_data sample_expression: keyword: sample bracketed: start_bracket: ( numeric_literal: '20' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: sample_size comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '10' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: dummy_data keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SEQ4 bracketed: start_bracket: ( end_bracket: ) alias_expression: keyword: AS naked_identifier: row_number from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: TABLE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GENERATOR bracketed: start_bracket: ( snowflake_keyword_expression: parameter: rowcount parameter_assigner: => numeric_literal: '1000' end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: row_number end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dummy_data sample_expression: keyword: SAMPLE bracketed: start_bracket: ( variable: $sample_size keyword: ROWS end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select.sql000066400000000000000000000003351451700765000237620ustar00rootroot00000000000000SELECT a FROM b; SELECT view FROM foo; SELECT view FROM case; SELECT issue FROM issue; SELECT customer_id, TRIM(value:cross) AS cross FROM my_table; SELECT customer_id FROM my_table cross join my_table2; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select.yml000066400000000000000000000067721451700765000237770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d10a05e1154523702380484f2ad9a65b7e909134045efeb252c034eee4560446 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: b - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: view from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: view from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: case - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: issue from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: issue - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: customer_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: TRIM bracketed: start_bracket: ( expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: cross end_bracket: ) alias_expression: keyword: AS naked_identifier: cross from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: customer_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table join_clause: - keyword: cross - keyword: join - from_expression_element: table_expression: table_reference: naked_identifier: my_table2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_clause_modifiers.sql000066400000000000000000000007301451700765000273560ustar00rootroot00000000000000SELECT *, col1, col2, my_table.col1, my_table.* FROM my_table; SELECT DISTINCT * FROM my_table; SELECT DISTINCT col1 FROM my_table; SELECT ALL my_table.* FROM my_table; SELECT TOP 1 * FROM my_table; SELECT TOP 2 col1 FROM my_table; SELECT TOP 3 col1, my_table.* FROM my_table; SELECT ALL TOP 10 col1 FROM my_table; SELECT DISTINCT TOP 20 my_table.col1 FROM my_table; SELECT DISTINCT TOP 30 * FROM my_table; SELECT DISTINCT TOP 40 col1, my_table.* FROM my_table; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_clause_modifiers.yml000066400000000000000000000155031451700765000273640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8258a3eb788c43beddbbe82c824dffe3e0ea9a0d6bd048ea3f6760a9ade2d1fe file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: wildcard_expression: wildcard_identifier: star: '*' - comma: ',' - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: col1 - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: my_table dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: DISTINCT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: DISTINCT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: ALL select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: my_table dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP numeric_literal: '1' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP numeric_literal: '2' select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP numeric_literal: '3' - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: my_table dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: ALL - keyword: TOP - numeric_literal: '10' select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: DISTINCT - keyword: TOP - numeric_literal: '20' select_clause_element: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: DISTINCT - keyword: TOP - numeric_literal: '30' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: DISTINCT - keyword: TOP - numeric_literal: '40' - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: my_table dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_except.sql000066400000000000000000000000621451700765000253270ustar00rootroot00000000000000select * from table1 EXCEPT (select * from table1)sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_except.yml000066400000000000000000000025661451700765000253440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1168e16f50bc0b751155f77f5d7ab6479913404503c8886bd22653cf4fa2daf1 file: statement: set_expression: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 set_operator: keyword: EXCEPT bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_exclude.sql000066400000000000000000000002471451700765000254750ustar00rootroot00000000000000select * exclude col1 from table1; select * exclude (col1) from table1; select * exclude (col1, col2) from table1; select * exclude (col1, col2, coln) from table1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_exclude.yml000066400000000000000000000056551451700765000255070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 715ebad5a3924bf92c80d7bcc4d1a2116ab81a6115381e685b077c9b22f2a302 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude naked_identifier: col1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: start_bracket: ( naked_identifier: col1 end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - comma: ',' - naked_identifier: coln - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_exclude_rename.sql000066400000000000000000000003411451700765000270170ustar00rootroot00000000000000select * exclude col1 rename (col1 as alias1, col2 as alias2) from table1; select * exclude (col1, col2) rename col1 as alias1 from table1; select * exclude (col1, col2) rename (col1 as alias1, col2 as alias2) from table1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_exclude_rename.yml000066400000000000000000000062731451700765000270330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ce51f7f9bc723be4c5e395ec41a9b20c7d1a314b704226037000c5e3693618cb file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude naked_identifier: col1 select_rename_clause: keyword: rename bracketed: - start_bracket: ( - naked_identifier: col1 - keyword: as - naked_identifier: alias1 - comma: ',' - naked_identifier: col2 - keyword: as - naked_identifier: alias2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - end_bracket: ) select_rename_clause: - keyword: rename - naked_identifier: col1 - keyword: as - naked_identifier: alias1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_exclude_clause: keyword: exclude bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - end_bracket: ) select_rename_clause: keyword: rename bracketed: - start_bracket: ( - naked_identifier: col1 - keyword: as - naked_identifier: alias1 - comma: ',' - naked_identifier: col2 - keyword: as - naked_identifier: alias2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_group_by_cube_rollup.sql000066400000000000000000000003771451700765000302710ustar00rootroot00000000000000-- CUBE within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY CUBE (name, age); -- ROLLUP within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY ROLLUP (name, age); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_group_by_cube_rollup.yml000066400000000000000000000052441451700765000302710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f87666ca56011ac296f9d5713d781ea4d3579f504e59fcb6a1e6a3aa8ad5aec7 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - keyword: CUBE - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - keyword: ROLLUP - bracketed: - start_bracket: ( - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_grouping_sets.sql000066400000000000000000000004601451700765000267310ustar00rootroot00000000000000SELECT foo, bar FROM baz GROUP BY GROUPING SETS (foo, bar); select count(*), medical_license, radio_license from nurses group by grouping sets (medical_license, radio_license); select count(*), medical_license, radio_license from nurses group by grouping sets (medical_license, radio_license); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_grouping_sets.yml000066400000000000000000000066641451700765000267470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0cc7539cfacaea779991399a74316f23911acc1c6ea79a02f493c3a4a26fa793 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: foo - comma: ',' - select_clause_element: column_reference: naked_identifier: bar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: baz groupby_clause: - keyword: GROUP - keyword: BY - keyword: GROUPING - keyword: SETS - bracketed: - start_bracket: ( - column_reference: naked_identifier: foo - comma: ',' - column_reference: naked_identifier: bar - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) - comma: ',' - select_clause_element: column_reference: naked_identifier: medical_license - comma: ',' - select_clause_element: column_reference: naked_identifier: radio_license from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nurses groupby_clause: - keyword: group - keyword: by - keyword: grouping - keyword: sets - bracketed: - start_bracket: ( - column_reference: naked_identifier: medical_license - comma: ',' - column_reference: naked_identifier: radio_license - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) - comma: ',' - select_clause_element: column_reference: naked_identifier: medical_license - comma: ',' - select_clause_element: column_reference: naked_identifier: radio_license from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: nurses groupby_clause: - keyword: group - keyword: by - keyword: grouping - keyword: sets - bracketed: - start_bracket: ( - column_reference: naked_identifier: medical_license - comma: ',' - column_reference: naked_identifier: radio_license - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_like_clause.sql000066400000000000000000000013341451700765000263220ustar00rootroot00000000000000SELECT a, b FROM person WHERE name LIKE 'M%'; SELECT a, b FROM person WHERE name NOT ILIKE 'M_ry'; SELECT a, b FROM person WHERE name RLIKE 'M+'; SELECT a, b FROM person WHERE name REGEXP 'M+'; SELECT a, b FROM person WHERE name LIKE '%$_%' ESCAPE '$'; SELECT a, b FROM person WHERE name LIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name LIKE ALL ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE ALL ('%an%', '%an'); SELECT a, b FROM person WHERE name ILIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT ILIKE ANY ('%an%', '%an'); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_like_clause.yml000066400000000000000000000217031451700765000263260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f9c8b6970ebb41409ae39e02a303e32ba94bf4794e675c3e17f2f42d27a4884 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: LIKE quoted_literal: "'M%'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: ILIKE - quoted_literal: "'M_ry'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: RLIKE quoted_literal: "'M+'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: REGEXP quoted_literal: "'M+'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - quoted_literal: "'%$_%'" - keyword: ESCAPE - quoted_literal: "'$'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: ALL - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: ALL - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: ILIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: ILIKE - keyword: ANY - bracketed: - start_bracket: ( - quoted_literal: "'%an%'" - comma: ',' - quoted_literal: "'%an'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_rename.sql000066400000000000000000000002301451700765000253030ustar00rootroot00000000000000select * rename col1 as alias from table1; select * rename (col1 as alias) from table1; select * rename (col1 as alias1, col2 as alias2) from table1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_rename.yml000066400000000000000000000047031451700765000253160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 87de1a02545a55820d8cdf2dbcd84308a29eb689f10d14667be31c8c55f41872 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_rename_clause: - keyword: rename - naked_identifier: col1 - keyword: as - naked_identifier: alias from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_rename_clause: keyword: rename bracketed: - start_bracket: ( - naked_identifier: col1 - keyword: as - naked_identifier: alias - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_rename_clause: keyword: rename bracketed: - start_bracket: ( - naked_identifier: col1 - keyword: as - naked_identifier: alias1 - comma: ',' - naked_identifier: col2 - keyword: as - naked_identifier: alias2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_replace.sql000066400000000000000000000002441451700765000254540ustar00rootroot00000000000000select * replace ('DEPT-' || department_id as department_id) from table1; select * replace ('prefix1' || col1 as alias1, 'prefix2' || col2 as alias2) from table1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_replace.yml000066400000000000000000000047701451700765000254660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 986831b7a097a626e6a78ed075c5f1be3f4e76518894334a47a2b1bbaeda9a22 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: replace bracketed: start_bracket: ( expression: quoted_literal: "'DEPT-'" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: department_id keyword: as naked_identifier: department_id end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_replace_clause: keyword: replace bracketed: - start_bracket: ( - expression: quoted_literal: "'prefix1'" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: col1 - keyword: as - naked_identifier: alias1 - comma: ',' - expression: quoted_literal: "'prefix2'" binary_operator: - pipe: '|' - pipe: '|' column_reference: naked_identifier: col2 - keyword: as - naked_identifier: alias2 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_stages_files.sql000066400000000000000000000005501451700765000265110ustar00rootroot00000000000000SELECT t.$1, t.$2 FROM @mystage1 (file_format => myformat) t ; select t.$1, t.$2 from @mystage1 (file_format => 'myformat', pattern=>'.*data.*[.]csv.gz') t; select t.$1, t.$2 from @mystage1 (pattern=>'.*data.*[.]csv.gz', file_format => 'myformat') t; select t.$1, t.$2 from @mystage1 (pattern=>'.*data.*[.]csv.gz') t; select t.$1, t.$2 from @mystage1 t; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_stages_files.yml000066400000000000000000000117551451700765000265240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2411842892cde7247ba74e0ab69635007361306e0338e02178cde8bdcf303808 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: stage_path: '@mystage1' bracketed: start_bracket: ( keyword: file_format parameter_assigner: => file_format_segment: object_reference: naked_identifier: myformat end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: from from_expression: from_expression_element: table_expression: stage_path: '@mystage1' bracketed: - start_bracket: ( - keyword: file_format - parameter_assigner: => - file_format_segment: quoted_literal: "'myformat'" - comma: ',' - keyword: pattern - parameter_assigner: => - quoted_literal: "'.*data.*[.]csv.gz'" - end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: from from_expression: from_expression_element: table_expression: stage_path: '@mystage1' bracketed: - start_bracket: ( - keyword: pattern - parameter_assigner: => - quoted_literal: "'.*data.*[.]csv.gz'" - comma: ',' - keyword: file_format - parameter_assigner: => - file_format_segment: quoted_literal: "'myformat'" - end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: from from_expression: from_expression_element: table_expression: stage_path: '@mystage1' bracketed: start_bracket: ( keyword: pattern parameter_assigner: => quoted_literal: "'.*data.*[.]csv.gz'" end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $1 - comma: ',' - select_clause_element: column_reference: naked_identifier: t dot: . column_index_identifier_segment: $2 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: stage_path: '@mystage1' alias_expression: naked_identifier: t - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_system_function.sql000066400000000000000000000001561451700765000272740ustar00rootroot00000000000000SELECT SYSTEM$STREAM_HAS_DATA('SCH.MY_STREAM'); SELECT SYSTEM$USER_TASK_CANCEL_ONGOING_EXECUTIONS('MY_TASK'); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_system_function.yml000066400000000000000000000020161451700765000272730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00fe9093281e1147d50bf758ca517889e42697e9d726d5a48ca6cf91c025e59e file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_function_name: SYSTEM$STREAM_HAS_DATA bracketed: start_bracket: ( quoted_literal: "'SCH.MY_STREAM'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_function_name: SYSTEM$USER_TASK_CANCEL_ONGOING_EXECUTIONS bracketed: start_bracket: ( quoted_literal: "'MY_TASK'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_transient_table.sql000066400000000000000000000000771451700765000272230ustar00rootroot00000000000000CREATE OR REPLACE TRANSIENT TABLE new_tab AS SELECT * FROM tab sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_transient_table.yml000066400000000000000000000020111451700765000272130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6ff289a22d04fe83a31450eace665ffa9db6ea0bdb1e5f3fc987512a00b33296 file: statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TRANSIENT - keyword: TABLE - table_reference: naked_identifier: new_tab - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_union.sql000066400000000000000000000000431451700765000251660ustar00rootroot00000000000000SELECT 1 UNION SELECT 2 ORDER BY 1 sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_union.yml000066400000000000000000000015121451700765000251720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c0de8d8aa9756ca2fc964d76a0fd7bda3b733e0c83020bafafc843249f959708 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' - orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_values.sql000066400000000000000000000001711451700765000253370ustar00rootroot00000000000000select * from (values (1, 'one'), (2, 'two'), (3, 'three')); select * from values (1, 'one'), (2, 'two'), (3, 'three'); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_values.yml000066400000000000000000000060231451700765000253430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a21d2a2be65d777a38db0653b7a1d99cb6c782626e30f330f17c291194e939ad file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'one'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'two'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'three'" - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'one'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'two'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: quoted_literal: "'three'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_where_is_distinct_from.sql000066400000000000000000000001751451700765000305750ustar00rootroot00000000000000SELECT a, b FROM person where a IS DISTINCT FROM b; SELECT a, b FROM person where a IS NOT DISTINCT FROM b; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/select_where_is_distinct_from.yml000066400000000000000000000036371451700765000306050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41fdee8d74dfd8b584cfc398396a214dbcc8f09be2edf68e0fdfe2332a0ebfa3 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: where expression: - column_reference: naked_identifier: a - keyword: IS - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: where expression: - column_reference: naked_identifier: a - keyword: IS - keyword: NOT - keyword: DISTINCT - keyword: FROM - column_reference: naked_identifier: b - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/semi_structured.sql000066400000000000000000000010301451700765000257150ustar00rootroot00000000000000-- tests parsing of table functions and semi structured accessing. SELECT ticket_id, value:value AS uncasted, value:id::bigint AS field_id, value:value::STRING AS field_val, value:thing[4].foo AS another_val, value:thing[4].bar.baz[0].foo::bigint AS another_val, array_field[0].array_element_property as test_array_access FROM raw_tickets, lateral flatten(INPUT => custom_fields); SELECT value:point:from:latitude::NUMBER(10, 6) AS lat, value:point:from:longitude::NUMBER(10, 6) AS lng FROM table1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/semi_structured.yml000066400000000000000000000152361451700765000257340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bb6046b43b0d0fc5451fc777357451cd2ebbce591a7cc1d418c8e453eb99d68d file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ticket_id - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: value alias_expression: keyword: AS naked_identifier: uncasted - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: id casting_operator: '::' data_type: data_type_identifier: bigint alias_expression: keyword: AS naked_identifier: field_id - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: colon: ':' semi_structured_element: value casting_operator: '::' data_type: data_type_identifier: STRING alias_expression: keyword: AS naked_identifier: field_val - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: thing - array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' - dot: . - semi_structured_element: foo alias_expression: keyword: AS naked_identifier: another_val - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: thing - array_accessor: start_square_bracket: '[' numeric_literal: '4' end_square_bracket: ']' - dot: . - semi_structured_element: bar - dot: . - semi_structured_element: baz - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' - dot: . - semi_structured_element: foo casting_operator: '::' data_type: data_type_identifier: bigint alias_expression: keyword: AS naked_identifier: another_val - comma: ',' - select_clause_element: expression: column_reference: naked_identifier: array_field array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' semi_structured_expression: dot: . semi_structured_element: array_element_property alias_expression: keyword: as naked_identifier: test_array_access from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: raw_tickets - comma: ',' - from_expression: from_expression_element: keyword: lateral table_expression: function: function_name: function_name_identifier: flatten bracketed: start_bracket: ( snowflake_keyword_expression: parameter: INPUT parameter_assigner: => column_reference: naked_identifier: custom_fields end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: point - colon: ':' - semi_structured_element: from - colon: ':' - semi_structured_element: latitude casting_operator: '::' data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '6' - end_bracket: ) alias_expression: keyword: AS naked_identifier: lat - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: point - colon: ':' - semi_structured_element: from - colon: ':' - semi_structured_element: longitude casting_operator: '::' data_type: data_type_identifier: NUMBER bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '10' - comma: ',' - numeric_literal: '6' - end_bracket: ) alias_expression: keyword: AS naked_identifier: lng from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/semi_structured_2.sql000066400000000000000000000001751451700765000261470ustar00rootroot00000000000000select value:data:to::string AS TO_PHONE_NUMBER, value:data:from::string AS FROM_PHONE_NUMBER FROM a.b.ticket_audits sqlfluff-2.3.5/test/fixtures/dialects/snowflake/semi_structured_2.yml000066400000000000000000000035731451700765000261560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a720837903ff876f4ee7516d52de72abaa89209df48cac70b765bb2ab8a73073 file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: data - colon: ':' - semi_structured_element: to casting_operator: '::' data_type: data_type_identifier: string alias_expression: keyword: AS naked_identifier: TO_PHONE_NUMBER - comma: ',' - select_clause_element: expression: cast_expression: column_reference: naked_identifier: value semi_structured_expression: - colon: ':' - semi_structured_element: data - colon: ':' - semi_structured_element: from casting_operator: '::' data_type: data_type_identifier: string alias_expression: keyword: AS naked_identifier: FROM_PHONE_NUMBER from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: a - dot: . - naked_identifier: b - dot: . - naked_identifier: ticket_audits sqlfluff-2.3.5/test/fixtures/dialects/snowflake/semi_structured_3.sql000066400000000000000000000002051451700765000261420ustar00rootroot00000000000000SELECT PARSE_JSON(t.metadata)['names'][0] AS first_name, PARSE_JSON(t.metadata):customer_id AS customer_id FROM tickets AS t sqlfluff-2.3.5/test/fixtures/dialects/snowflake/semi_structured_3.yml000066400000000000000000000043151451700765000261520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8d87ced2cb76bdc46947437b43aeacc28944904f5747df8f6bde8556a59c7527 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: - function: function_name: function_name_identifier: PARSE_JSON bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: metadata end_bracket: ) - array_accessor: start_square_bracket: '[' expression: quoted_literal: "'names'" end_square_bracket: ']' - array_accessor: start_square_bracket: '[' numeric_literal: '0' end_square_bracket: ']' alias_expression: keyword: AS naked_identifier: first_name - comma: ',' - select_clause_element: expression: function: function_name: function_name_identifier: PARSE_JSON bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: metadata end_bracket: ) semi_structured_expression: colon: ':' semi_structured_element: customer_id alias_expression: keyword: AS naked_identifier: customer_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tickets alias_expression: keyword: AS naked_identifier: t sqlfluff-2.3.5/test/fixtures/dialects/snowflake/set_call_variable.sql000066400000000000000000000000651451700765000261360ustar00rootroot00000000000000SET _VARIABLE1 = 'Hello World'; SELECT $_VARIABLE1; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/set_call_variable.yml000066400000000000000000000014251451700765000261410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f3cb485972fb6c848837633114e0e19a37f062a1b47d1540c782986f5a70996f file: - statement: set_statement: keyword: SET variable: _VARIABLE1 comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Hello World'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: variable: $_VARIABLE1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/set_command.sql000066400000000000000000000003271451700765000247750ustar00rootroot00000000000000set v1 = 10; set v2 = 'example'; set (v1, v2) = (10, 'example'); set id_threshold = (select count(*) from table1) / 2; set (min, max) = (40, 70); set (min, max) = (50, 2 * $min); SET THIS_ROLE=CURRENT_ROLE(); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/set_command.yml000066400000000000000000000070001451700765000247720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b6a01ccb10ee6afc5a2fd0c802d14c70616fa9cfb74a0541f04ea5e4034e88f1 file: - statement: set_statement: keyword: set variable: v1 comparison_operator: raw_comparison_operator: '=' expression: numeric_literal: '10' - statement_terminator: ; - statement: set_statement: keyword: set variable: v2 comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'example'" - statement_terminator: ; - statement: set_statement: - keyword: set - bracketed: - start_bracket: ( - variable: v1 - comma: ',' - variable: v2 - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: quoted_literal: "'example'" - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: set variable: id_threshold comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 end_bracket: ) binary_operator: / numeric_literal: '2' - statement_terminator: ; - statement: set_statement: - keyword: set - bracketed: - start_bracket: ( - variable: min - comma: ',' - variable: max - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - expression: numeric_literal: '40' - comma: ',' - expression: numeric_literal: '70' - end_bracket: ) - statement_terminator: ; - statement: set_statement: - keyword: set - bracketed: - start_bracket: ( - variable: min - comma: ',' - variable: max - end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - expression: numeric_literal: '50' - comma: ',' - expression: numeric_literal: '2' binary_operator: '*' variable: $min - end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET variable: THIS_ROLE comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: CURRENT_ROLE bracketed: start_bracket: ( end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/string_literal.sql000066400000000000000000000004061451700765000255240ustar00rootroot00000000000000-- In snowflake, a double single quote resolves as a single quote in the string. -- https://docs.snowflake.com/en/sql-reference/data-types-text.html#single-quoted-string-constants SELECT '['']'; -- Snowflake allows dollar quoted string literals select $$abc$$; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/string_literal.yml000066400000000000000000000013411451700765000255250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8026d140cc21fee5684e63cbd743c482aa50f18cbb0d789f3ac95176dd50d254 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'['']'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: quoted_literal: $$abc$$ - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/transactions.sql000066400000000000000000000002611451700765000252110ustar00rootroot00000000000000begin; begin work; begin transaction; begin name t4; begin work name t4; begin transaction name t4; start transaction; start transaction name t4; rollback; commit; commit work; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/transactions.yml000066400000000000000000000034301451700765000252140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6550f6f5929d6d869bf393fbef0c42b2a484fb882175303e39c534b4ac54b510 file: - statement: transaction_statement: keyword: begin - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: work - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: transaction - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: name - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: work - keyword: name - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: transaction_statement: - keyword: begin - keyword: transaction - keyword: name - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: transaction_statement: - keyword: start - keyword: transaction - statement_terminator: ; - statement: transaction_statement: - keyword: start - keyword: transaction - keyword: name - object_reference: naked_identifier: t4 - statement_terminator: ; - statement: transaction_statement: keyword: rollback - statement_terminator: ; - statement: transaction_statement: keyword: commit - statement_terminator: ; - statement: transaction_statement: - keyword: commit - keyword: work - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/truncate_table.sql000066400000000000000000000001231451700765000254720ustar00rootroot00000000000000truncate table temp; truncate table if exists temp; truncate table something.temp; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/truncate_table.yml000066400000000000000000000016771451700765000255130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4efc78fdb1e3d21be500ec3c7eb1efb94708abf5cd62c35683a2395f46d8a766 file: - statement: truncate_table: - keyword: truncate - keyword: table - table_reference: naked_identifier: temp - statement_terminator: ; - statement: truncate_table: - keyword: truncate - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: temp - statement_terminator: ; - statement: truncate_table: - keyword: truncate - keyword: table - table_reference: - naked_identifier: something - dot: . - naked_identifier: temp - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/undrop.sql000066400000000000000000000001041451700765000240040ustar00rootroot00000000000000UNDROP database mytestdb2; undrop schema myschema; undrop table t2; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/undrop.yml000066400000000000000000000015631451700765000240200ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 838d75cabfaa26669ef8ee2837f2c286b8b34c3cb9a06cd10612193b6ed2e1dc file: - statement: undrop_statement: - keyword: UNDROP - keyword: database - database_reference: naked_identifier: mytestdb2 - statement_terminator: ; - statement: undrop_statement: - keyword: undrop - keyword: schema - schema_reference: naked_identifier: myschema - statement_terminator: ; - statement: undrop_statement: - keyword: undrop - keyword: table - table_reference: naked_identifier: t2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/unset.sql000066400000000000000000000000461451700765000236400ustar00rootroot00000000000000unset v1; unset v2; unset (v1, v2); sqlfluff-2.3.5/test/fixtures/dialects/snowflake/unset.yml000066400000000000000000000014411451700765000236420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 17c30e3033979c450023ffe9cb916954587005749f7d32c4841c4b91bd1adde0 file: - statement: unset_statement: keyword: unset variable: v1 - statement_terminator: ; - statement: unset_statement: keyword: unset variable: v2 - statement_terminator: ; - statement: unset_statement: keyword: unset bracketed: - start_bracket: ( - variable: v1 - comma: ',' - variable: v2 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/use.sql000066400000000000000000000005331451700765000232770ustar00rootroot00000000000000use role my_role; use warehouse my_warehouse; use database my_database; use schema my_schema; USE ROLE "MY_ROLE"; USE WAREHOUSE "MY_WAREHOUSE"; USE DATABASE "MY_DATABASE"; USE "MY_DATABASE"; USE SCHEMA "MY_DATABASE"."MY_SCHEMA"; USE SCHEMA "MY_SCHEMA"; USE "MY_DATABASE"."MY_SCHEMA"; USE SECONDARY ROLES ALL; USE SECONDARY ROLES NONE; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/use.yml000066400000000000000000000047751451700765000233150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ccec964f5d1d6a5487cf768b3f963f6a47fe9b86abda53240e605d8e8580a8e1 file: - statement: use_statement: - keyword: use - keyword: role - object_reference: naked_identifier: my_role - statement_terminator: ; - statement: use_statement: - keyword: use - keyword: warehouse - object_reference: naked_identifier: my_warehouse - statement_terminator: ; - statement: use_statement: - keyword: use - keyword: database - database_reference: naked_identifier: my_database - statement_terminator: ; - statement: use_statement: - keyword: use - keyword: schema - schema_reference: naked_identifier: my_schema - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: ROLE - object_reference: quoted_identifier: '"MY_ROLE"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: WAREHOUSE - object_reference: quoted_identifier: '"MY_WAREHOUSE"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: DATABASE - database_reference: quoted_identifier: '"MY_DATABASE"' - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: quoted_identifier: '"MY_DATABASE"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SCHEMA - schema_reference: - quoted_identifier: '"MY_DATABASE"' - dot: . - quoted_identifier: '"MY_SCHEMA"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SCHEMA - schema_reference: quoted_identifier: '"MY_SCHEMA"' - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: - quoted_identifier: '"MY_DATABASE"' - dot: . - quoted_identifier: '"MY_SCHEMA"' - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SECONDARY - keyword: ROLES - keyword: ALL - statement_terminator: ; - statement: use_statement: - keyword: USE - keyword: SECONDARY - keyword: ROLES - keyword: NONE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/window_function_ignore_nulls.sql000066400000000000000000000003321451700765000304740ustar00rootroot00000000000000 SELECT FIRST_VALUE(foo) IGNORE NULLS over ( PARTITION BY buzz ORDER BY bar ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS bat from some_table sqlfluff-2.3.5/test/fixtures/dialects/snowflake/window_function_ignore_nulls.yml000066400000000000000000000037321451700765000305050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 24b3b3cbfd9990418a4b456c5c443f94639d0ec00d3e329e739e28150c62a2c4 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: function_name_identifier: FIRST_VALUE - bracketed: start_bracket: ( expression: column_reference: naked_identifier: foo end_bracket: ) - keyword: IGNORE - keyword: NULLS - over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: buzz orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: bar frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: keyword: AS naked_identifier: bat from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-2.3.5/test/fixtures/dialects/snowflake/within_group.sql000066400000000000000000000031411451700765000252170ustar00rootroot00000000000000-- Snowflake style WITHIN GROUP window functions with favourite_fruits as ( select column1 as name, column2 as colour from (values ('apple', 'green'), ('unripe banana', 'green'), ('kiwi', 'green'), ('blueberry', 'blue'), ('strawberry', 'red'), ('grape', 'red') ) ) select colour, listagg(name, ', ') within group (order by name) as fruits from favourite_fruits group by colour; SELECT ARRAY_AGG(o_orderkey) WITHIN GROUP (ORDER BY o_orderkey ASC) FROM orders; select array_agg(o_orderkey) within group (order by o_orderkey asc) from orders where o_totalprice > 450000; select array_agg(distinct o_orderstatus) within group (order by o_orderstatus asc) from orders where o_totalprice > 450000 order by o_orderstatus asc; select o_orderstatus, array_agg(o_clerk) within group (order by o_totalprice desc) from orders where o_totalprice > 450000 group by o_orderstatus order by o_orderstatus desc; select listagg(o_orderkey, ' ') from orders where o_totalprice > 450000; select listagg(distinct o_orderstatus, '|') from orders where o_totalprice > 450000; select o_orderstatus, listagg(o_clerk, ', ') within group (order by o_totalprice desc) from orders where o_totalprice > 450000 group by o_orderstatus; select listagg(spanish_phrase, '|') within group (order by collate(spanish_phrase, 'sp')) from collation_demo group by english_phrase; select listagg(spanish_phrase, '|') within group (order by collate(spanish_phrase, 'utf8')) from collation_demo group by english_phrase; sqlfluff-2.3.5/test/fixtures/dialects/snowflake/within_group.yml000066400000000000000000000421321451700765000252240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c1d29888808dc13339ff7d776df74d2f9c43d7f6fcf81f2a7b604134c1cf1f30 file: - statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: favourite_fruits keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: column1 alias_expression: keyword: as naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: column2 alias_expression: keyword: as naked_identifier: colour from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: quoted_literal: "'apple'" - comma: ',' - expression: quoted_literal: "'green'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'unripe banana'" - comma: ',' - expression: quoted_literal: "'green'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'kiwi'" - comma: ',' - expression: quoted_literal: "'green'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'blueberry'" - comma: ',' - expression: quoted_literal: "'blue'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'strawberry'" - comma: ',' - expression: quoted_literal: "'red'" - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'grape'" - comma: ',' - expression: quoted_literal: "'red'" - end_bracket: ) end_bracket: ) end_bracket: ) select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: colour - comma: ',' - select_clause_element: function: function_name: function_name_identifier: listagg bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "', '" - end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: name end_bracket: ) alias_expression: keyword: as naked_identifier: fruits from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: favourite_fruits groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: colour - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: ARRAY_AGG bracketed: start_bracket: ( expression: column_reference: naked_identifier: o_orderkey end_bracket: ) withingroup_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: o_orderkey - keyword: ASC end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: array_agg bracketed: start_bracket: ( expression: column_reference: naked_identifier: o_orderkey end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_orderkey - keyword: asc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: array_agg bracketed: start_bracket: ( keyword: distinct expression: column_reference: naked_identifier: o_orderstatus end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_orderstatus - keyword: asc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_orderstatus - keyword: asc - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: o_orderstatus - comma: ',' - select_clause_element: function: function_name: function_name_identifier: array_agg bracketed: start_bracket: ( expression: column_reference: naked_identifier: o_clerk end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_totalprice - keyword: desc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: o_orderstatus orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_orderstatus - keyword: desc - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: listagg bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: o_orderkey - comma: ',' - expression: quoted_literal: "' '" - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: listagg bracketed: - start_bracket: ( - keyword: distinct - expression: column_reference: naked_identifier: o_orderstatus - comma: ',' - expression: quoted_literal: "'|'" - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: o_orderstatus - comma: ',' - select_clause_element: function: function_name: function_name_identifier: listagg bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: o_clerk - comma: ',' - expression: quoted_literal: "', '" - end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: o_totalprice - keyword: desc end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders where_clause: keyword: where expression: column_reference: naked_identifier: o_totalprice comparison_operator: raw_comparison_operator: '>' numeric_literal: '450000' groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: o_orderstatus - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: listagg bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: spanish_phrase - comma: ',' - expression: quoted_literal: "'|'" - end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - expression: function: function_name: function_name_identifier: collate bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: spanish_phrase - comma: ',' - expression: quoted_literal: "'sp'" - end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: collation_demo groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: english_phrase - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: listagg bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: spanish_phrase - comma: ',' - expression: quoted_literal: "'|'" - end_bracket: ) withingroup_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - expression: function: function_name: function_name_identifier: collate bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: spanish_phrase - comma: ',' - expression: quoted_literal: "'utf8'" - end_bracket: ) end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: collation_demo groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: english_phrase - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/soql/000077500000000000000000000000001451700765000207465ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/soql/.sqlfluff000066400000000000000000000000321451700765000225640ustar00rootroot00000000000000[sqlfluff] dialect = soql sqlfluff-2.3.5/test/fixtures/dialects/soql/date_literals.sql000066400000000000000000000001661451700765000243060ustar00rootroot00000000000000SELECT * FROM LiveChatTranscript WHERE LastModifiedDate >= 2022-08-28T00:00:00Z AND LastModifiedDate < 2023-01-01 sqlfluff-2.3.5/test/fixtures/dialects/soql/date_literals.yml000066400000000000000000000024531451700765000243110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a2a975041ccad2721f7111224ee87a04580072232caaebb4419c3e79557ba39c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: LiveChatTranscript where_clause: keyword: WHERE expression: - column_reference: naked_identifier: LastModifiedDate - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - datetime_literal: '2022-08-28T00:00:00Z' - binary_operator: AND - column_reference: naked_identifier: LastModifiedDate - comparison_operator: raw_comparison_operator: < - date_literal: '2023-01-01' sqlfluff-2.3.5/test/fixtures/dialects/soql/select_where_date_literals.sql000066400000000000000000000000701451700765000270310ustar00rootroot00000000000000SELECT Id FROM Account WHERE CreatedDate = NEXT_90_DAYS sqlfluff-2.3.5/test/fixtures/dialects/soql/select_where_date_literals.yml000066400000000000000000000017651451700765000270470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db28f921856ff81069b50adffd26b6355e3611d71897aa8f86aa4e2d2eaaa9f5 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: Id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Account where_clause: keyword: WHERE expression: column_reference: naked_identifier: CreatedDate comparison_operator: raw_comparison_operator: '=' bare_function: NEXT_90_DAYS sqlfluff-2.3.5/test/fixtures/dialects/soql/select_where_date_n_literals.sql000066400000000000000000000000721451700765000273500ustar00rootroot00000000000000SELECT Id FROM Account WHERE CreatedDate = LAST_N_WEEKS:5 sqlfluff-2.3.5/test/fixtures/dialects/soql/select_where_date_n_literals.yml000066400000000000000000000021031451700765000273470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a902fc817e4b3517a9998c2a8f9f2d7d30b6a511b4d18933c25dab439932d2d6 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: Id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Account where_clause: keyword: WHERE expression: column_reference: naked_identifier: CreatedDate comparison_operator: raw_comparison_operator: '=' date_n_literal: keyword: LAST_N_WEEKS colon: ':' numeric_literal: '5' sqlfluff-2.3.5/test/fixtures/dialects/sparksql/000077500000000000000000000000001451700765000216305ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/sparksql/.sqlfluff000066400000000000000000000000361451700765000234520ustar00rootroot00000000000000[sqlfluff] dialect = sparksql sqlfluff-2.3.5/test/fixtures/dialects/sparksql/add_file.sql000066400000000000000000000004451451700765000241030ustar00rootroot00000000000000ADD FILE "/path/to/file/abc.txt"; ADD FILE '/another/test.txt'; ADD FILE "/path with space/abc.txt"; ADD FILE "/path/to/some/directory"; ADD FILES "/path with space/cde.txt" '/path with space/fgh.txt'; -- NB: Non-quoted paths are not supported in SQLFluff currently --ADD FILE /tmp/test; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/add_file.yml000066400000000000000000000023111451700765000240770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e12d4826b7072fddd53e9e4c040cd96ae651d7999d1230aac1ae9466599d511 file: - statement: add_file_statement: keyword: ADD file_keyword: FILE quoted_literal: '"/path/to/file/abc.txt"' - statement_terminator: ; - statement: add_file_statement: keyword: ADD file_keyword: FILE quoted_literal: "'/another/test.txt'" - statement_terminator: ; - statement: add_file_statement: keyword: ADD file_keyword: FILE quoted_literal: '"/path with space/abc.txt"' - statement_terminator: ; - statement: add_file_statement: keyword: ADD file_keyword: FILE quoted_literal: '"/path/to/some/directory"' - statement_terminator: ; - statement: add_file_statement: - keyword: ADD - file_keyword: FILES - quoted_literal: '"/path with space/cde.txt"' - quoted_literal: "'/path with space/fgh.txt'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/add_jar.sql000066400000000000000000000011731451700765000237370ustar00rootroot00000000000000ADD JAR "/path/to/some.jar"; ADD JAR '/some/other.jar'; ADD JAR "/path with space/abc.jar"; ADD JARS "/path with space/def.jar" '/path with space/ghi.jar'; ADD JAR "ivy://group:module:version"; ADD JAR "ivy://group:module:version?transitive=false"; ADD JAR "ivy://group:module:version?transitive=true"; ADD JAR "ivy://group:module:version?exclude=group:module&transitive=true"; ADD JAR ivy://group:module:version?exclude=group:module&transitive=true; ADD JAR /path/to/some.jar; ADD JAR path/to/some.jar; ADD JAR ivy://path/to/some.jar; -- NB: Non-quoted paths do not currently support whitespaces -- e.g. /path to/some.jar sqlfluff-2.3.5/test/fixtures/dialects/sparksql/add_jar.yml000066400000000000000000000045121451700765000237410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7ba37b07b7588c894e6ad680fafcb954a373185f6623e703546834c8ad6990c file: - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"/path/to/some.jar"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: "'/some/other.jar'" - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"/path with space/abc.jar"' - statement_terminator: ; - statement: add_jar_statement: - keyword: ADD - file_keyword: JARS - quoted_literal: '"/path with space/def.jar"' - quoted_literal: "'/path with space/ghi.jar'" - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"ivy://group:module:version"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"ivy://group:module:version?transitive=false"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"ivy://group:module:version?transitive=true"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: '"ivy://group:module:version?exclude=group:module&transitive=true"' - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR file_literal: ivy://group:module:version?exclude=group:module&transitive=true - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR file_literal: /path/to/some.jar - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR file_literal: path/to/some.jar - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR file_literal: ivy://path/to/some.jar - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/alter_database.sql000066400000000000000000000005371451700765000253110ustar00rootroot00000000000000ALTER DATABASE inventory SET DBPROPERTIES ( 'Edited-by' = 'John' ); ALTER DATABASE inventory SET DBPROPERTIES ( 'Edited-by' = 'John', 'Edit-date' = '01/01/2001' ); ALTER SCHEMA inventory SET DBPROPERTIES ( 'Edited-by' = 'John' ); ALTER SCHEMA inventory SET DBPROPERTIES ( 'Edited-by' = 'John', 'Edit-date' = '01/01/2001' ); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/alter_database.yml000066400000000000000000000051441451700765000253120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ac38d2239b9a4d6f5bfbca9f35cabd66d1cc1f2dded465f7773b63f14469a8f9 file: - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'Edited-by'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'John'" end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'Edited-by'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John'" - comma: ',' - property_name_identifier: quoted_identifier: "'Edit-date'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'01/01/2001'" - end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'Edited-by'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'John'" end_bracket: ) - statement_terminator: ; - statement: alter_database_statement: - keyword: ALTER - keyword: SCHEMA - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'Edited-by'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John'" - comma: ',' - property_name_identifier: quoted_identifier: "'Edit-date'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'01/01/2001'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/alter_table.sql000066400000000000000000000035211451700765000246300ustar00rootroot00000000000000---- RENAME table ALTER TABLE Student RENAME TO StudentInfo; ---- RENAME partition ALTER TABLE Default.StudentInfo PARTITION ( Age = '10' ) RENAME TO PARTITION ( Age = '15' ); -- Add new columns to a table ALTER TABLE StudentInfo ADD COLUMNS (LastName STRING, DOB TIMESTAMP); -- ALTER OR CHANGE COLUMNS ALTER TABLE StudentInfo ALTER COLUMN Name COMMENT "new comment"; ALTER TABLE StudentInfo CHANGE COLUMN Name COMMENT "new comment"; ---- Add a new partition to a table ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION (Age = 18); -- Adding multiple partitions to the table ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION ( Age = 18 ) PARTITION (Age = 20); -- Drop a partition from the table ALTER TABLE StudentInfo DROP IF EXISTS PARTITION (Age = 18); -- SET TABLE PROPERTIES ALTER TABLE Dbx.Tab1 SET TBLPROPERTIES ('winner' = 'loser'); -- SET TABLE COMMENT Using SET PROPERTIES ALTER TABLE Dbx.Tab1 SET TBLPROPERTIES ('comment' = 'A table comment.'); -- Alter TABLE COMMENT Using SET PROPERTIES ALTER TABLE Dbx.Tab1 SET TBLPROPERTIES ('comment' = 'This is a new comment.'); -- DROP TABLE PROPERTIES ALTER TABLE Dbx.Tab1 UNSET TBLPROPERTIES ('winner'); -- SET SERDE/ SERDE Properties ALTER TABLE Table_Identifier SET SERDEPROPERTIES ( "key1" = "val1", "key2" = "val2"); ALTER TABLE Test_Tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; ALTER TABLE Dbx.Tab1 SET SERDE 'org.apache.hadoop' WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee'); -- Change the fileformat ALTER TABLE Loc_Orc SET FILEFORMAT ORC; ALTER TABLE P1 PARTITION (Month = 2, Day = 2) SET FILEFORMAT PARQUET; -- Change the file Location ALTER TABLE Dbx.Tab1 SET LOCATION '/path/to/part/ways'; ALTER TABLE Dbx.Tab1 PARTITION (A = '1', B = '2') SET LOCATION '/path/to/part/ways'; -- Recover Partitions ALTER TABLE Dbx.Tab1 RECOVER PARTITIONS; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/alter_table.yml000066400000000000000000000237771451700765000246510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 950e623eb6f1dbd551933c652330e5fda4c2049c80cad313a2f6d4898bf4d7b7 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Student - keyword: RENAME - keyword: TO - table_reference: naked_identifier: StudentInfo - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Default - dot: . - naked_identifier: StudentInfo - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' quoted_literal: "'10'" end_bracket: ) - keyword: RENAME - keyword: TO - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' quoted_literal: "'15'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: COLUMNS - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: LastName data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: DOB data_type: primitive_type: keyword: TIMESTAMP - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: Name - keyword: COMMENT - quoted_literal: '"new comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: Name - keyword: COMMENT - quoted_literal: '"new comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: ADD - keyword: IF - keyword: NOT - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '20' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: StudentInfo - keyword: DROP - keyword: IF - keyword: EXISTS - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '18' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'winner'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'loser'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'comment'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'A table comment.'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'comment'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'This is a new comment.'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'winner'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Table_Identifier - keyword: SET - keyword: SERDEPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Test_Tab - keyword: SET - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: SERDE - quoted_literal: "'org.apache.hadoop'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'k'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'v'" - comma: ',' - property_name_identifier: quoted_identifier: "'kay'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'vee'" - end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: Loc_Orc - keyword: SET - keyword: FILEFORMAT - data_source_format: keyword: ORC - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: P1 - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: Month - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - column_reference: naked_identifier: Day - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - end_bracket: ) - keyword: SET - keyword: FILEFORMAT - data_source_format: keyword: PARQUET - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: SET - keyword: LOCATION - quoted_literal: "'/path/to/part/ways'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: A - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'1'" - comma: ',' - column_reference: naked_identifier: B - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'2'" - end_bracket: ) - keyword: SET - keyword: LOCATION - quoted_literal: "'/path/to/part/ways'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Dbx - dot: . - naked_identifier: Tab1 - keyword: RECOVER - keyword: PARTITIONS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/alter_view.sql000066400000000000000000000013501451700765000245110ustar00rootroot00000000000000-- RENAME View ALTER VIEW view_identifier RENAME TO view_identifier; ALTER VIEW tempdb1.v1 RENAME TO tempdb1.v2; --SET View Properties ALTER VIEW view_identifier SET TBLPROPERTIES ( "property_key" = "property_val"); ALTER VIEW tempdb1.v2 SET TBLPROPERTIES ( 'created.by.user' = "John", 'created.date' = '01-01-2001' ); --UNSET View Properties ALTER VIEW view_identifier UNSET TBLPROPERTIES ( "property_key"); ALTER VIEW view_identifier UNSET TBLPROPERTIES IF EXISTS ( "property_key"); ALTER VIEW tempdb1.v2 UNSET TBLPROPERTIES ('created.by.user', 'created.date'); --ALTER View AS SELECT ALTER VIEW view_identifier AS ( SELECT a, b FROM tempdb1.v1 ); ALTER VIEW tempdb1.v2 AS SELECT a, b FROM tempdb1.v1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/alter_view.yml000066400000000000000000000120161451700765000245140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e3bc08ded9eb4bb646d9da693345d24b71041990a979e2bcc6470b2874f39289 file: - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: RENAME - keyword: TO - table_reference: naked_identifier: view_identifier - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v1 - keyword: RENAME - keyword: TO - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v2 - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_key"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_val"' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v2 - keyword: SET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'created.by.user'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"John"' - comma: ',' - property_name_identifier: quoted_identifier: "'created.date'" - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'01-01-2001'" - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_key"' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: UNSET - keyword: TBLPROPERTIES - keyword: IF - keyword: EXISTS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_key"' end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v2 - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: "'created.by.user'" - comma: ',' - property_name_identifier: quoted_identifier: "'created.date'" - end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v1 end_bracket: ) - statement_terminator: ; - statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v2 - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: tempdb1 - dot: . - naked_identifier: v1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/analyze_table.sql000066400000000000000000000005031451700765000251610ustar00rootroot00000000000000ANALYZE TABLE students COMPUTE STATISTICS NOSCAN; ANALYZE TABLE students COMPUTE STATISTICS; ANALYZE TABLE students PARTITION (student_id = 111111) COMPUTE STATISTICS; ANALYZE TABLE students COMPUTE STATISTICS FOR COLUMNS name; ANALYZE TABLES IN school_db COMPUTE STATISTICS NOSCAN; ANALYZE TABLES COMPUTE STATISTICS; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/analyze_table.yml000066400000000000000000000040301451700765000251620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 056710a8f94006a6da745c58745d9f8198f33ed161a700216108d294b2de9b04 file: - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: students - keyword: COMPUTE - keyword: STATISTICS - keyword: NOSCAN - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: students - keyword: COMPUTE - keyword: STATISTICS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '111111' end_bracket: ) - keyword: COMPUTE - keyword: STATISTICS - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLE - table_reference: naked_identifier: students - keyword: COMPUTE - keyword: STATISTICS - keyword: FOR - keyword: COLUMNS - column_reference: naked_identifier: name - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLES - keyword: IN - database_reference: naked_identifier: school_db - keyword: COMPUTE - keyword: STATISTICS - keyword: NOSCAN - statement_terminator: ; - statement: analyze_table_statement: - keyword: ANALYZE - keyword: TABLES - keyword: COMPUTE - keyword: STATISTICS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/bytes_literal.sql000066400000000000000000000000621451700765000252110ustar00rootroot00000000000000SELECT X'123456' AS col; SELECT X"123456" AS col; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/bytes_literal.yml000066400000000000000000000016361451700765000252230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e725c8164dc5f07b2a3d13435de062d9aa60eac3ab24adb4138e8976d16fc43f file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bytes_quoted_literal: "X'123456'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bytes_quoted_literal: X"123456" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/cache_table.sql000066400000000000000000000005771451700765000245740ustar00rootroot00000000000000CACHE TABLE TESTCACHE OPTIONS ('storageLevel' 'DISK_ONLY') SELECT A, B FROM TESTDATA; CACHE LAZY TABLE TESTCACHE OPTIONS ('storageLevel' 'DISK_ONLY') SELECT A FROM TESTDATA; CACHE TABLE TESTCACHE OPTIONS ('storageLevel' 'DISK_ONLY') AS SELECT A FROM TESTDATA; CACHE TABLE TESTCACHE OPTIONS ('storageLevel' = 'DISK_ONLY') AS SELECT A FROM TESTDATA; CACHE TABLE TESTCACHE; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/cache_table.yml000066400000000000000000000073311451700765000245710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6045a35c1769a30b9615402d927e261987c94945792b366a2c8a35401c6410a4 file: - statement: cache_table: - keyword: CACHE - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - keyword: OPTIONS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'storageLevel'" quoted_literal: "'DISK_ONLY'" end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: A - comma: ',' - select_clause_element: column_reference: naked_identifier: B from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TESTDATA - statement_terminator: ; - statement: cache_table: - keyword: CACHE - keyword: LAZY - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - keyword: OPTIONS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'storageLevel'" quoted_literal: "'DISK_ONLY'" end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TESTDATA - statement_terminator: ; - statement: cache_table: - keyword: CACHE - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - keyword: OPTIONS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'storageLevel'" quoted_literal: "'DISK_ONLY'" end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TESTDATA - statement_terminator: ; - statement: cache_table: - keyword: CACHE - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - keyword: OPTIONS - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'storageLevel'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DISK_ONLY'" end_bracket: ) - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: TESTDATA - statement_terminator: ; - statement: cache_table: - keyword: CACHE - keyword: TABLE - table_reference: naked_identifier: TESTCACHE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/case_clause.sql000066400000000000000000000010151451700765000246150ustar00rootroot00000000000000SELECT id, CASE WHEN id > 200 THEN 'bigger' ELSE 'small' END FROM person; SELECT id, CASE WHEN id > 200 THEN 'bigger' ELSE 'small' END AS id_size FROM person; SELECT id, CASE id WHEN 100 THEN 'bigger' WHEN id > 300 THEN '300' ELSE 'small' END FROM person; SELECT id FROM person WHERE CASE 1 = 1 WHEN 100 THEN 'big' WHEN 200 THEN 'bigger' WHEN 300 THEN 'biggest' ELSE 'small' END = 'small'; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/case_clause.yml000066400000000000000000000130051451700765000246210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0b778250fdcce13dff7f32fa97a4100fea959a433afca2dbbdeb79d46a6a42f0 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '>' numeric_literal: '200' - keyword: THEN - expression: quoted_literal: "'bigger'" - else_clause: keyword: ELSE expression: quoted_literal: "'small'" - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '>' numeric_literal: '200' - keyword: THEN - expression: quoted_literal: "'bigger'" - else_clause: keyword: ELSE expression: quoted_literal: "'small'" - keyword: END alias_expression: keyword: AS naked_identifier: id_size from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - expression: column_reference: naked_identifier: id - when_clause: - keyword: WHEN - expression: numeric_literal: '100' - keyword: THEN - expression: quoted_literal: "'bigger'" - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '>' numeric_literal: '300' - keyword: THEN - expression: quoted_literal: "'300'" - else_clause: keyword: ELSE expression: quoted_literal: "'small'" - keyword: END from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: case_expression: - keyword: CASE - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - when_clause: - keyword: WHEN - expression: numeric_literal: '100' - keyword: THEN - expression: quoted_literal: "'big'" - when_clause: - keyword: WHEN - expression: numeric_literal: '200' - keyword: THEN - expression: quoted_literal: "'bigger'" - when_clause: - keyword: WHEN - expression: numeric_literal: '300' - keyword: THEN - expression: quoted_literal: "'biggest'" - else_clause: keyword: ELSE expression: quoted_literal: "'small'" - keyword: END comparison_operator: raw_comparison_operator: '=' quoted_literal: "'small'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/clear_cache.sql000066400000000000000000000000151451700765000245560ustar00rootroot00000000000000CLEAR CACHE; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/clear_cache.yml000066400000000000000000000007541451700765000245720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 88187c4784eba3d8ed01791d6bb354c7e716f93c2526abf5e4b76d77e47499c9 file: statement: clear_cache: - keyword: CLEAR - keyword: CACHE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/common_table_expressions.sql000066400000000000000000000016331451700765000274550ustar00rootroot00000000000000-- CTE with multiple column aliases WITH t(x, y) AS ( SELECT 1, 2 ) SELECT * FROM t WHERE x = 1 AND y = 2; -- CTE in CTE definition WITH t AS ( WITH t2 AS (SELECT 1) SELECT * FROM t2 ) SELECT * FROM t; -- CTE in subquery SELECT max(c) FROM ( WITH t(c) AS (SELECT 1) SELECT * FROM t ); -- CTE in subquery expression SELECT ( WITH t AS (SELECT 1) SELECT * FROM t ); -- CTE in CREATE VIEW statement CREATE VIEW v AS WITH t(a, b, c, d) AS ( SELECT 1, 2, 3, 4 ) SELECT * FROM t; SELECT * FROM v; -- If name conflict is detected in nested CTE, then AnalysisException is thrown by default. -- SET spark.sql.legacy.ctePrecedencePolicy = CORRECTED (which is recommended), -- inner CTE definitions take precedence over outer definitions. WITH t AS ( SELECT 1 ), t2 AS ( WITH t AS (SELECT 2) SELECT * FROM t ) SELECT * FROM t2; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/common_table_expressions.yml000066400000000000000000000250771451700765000274670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f6284ad17cca775e8dc9aee5fe276a1e65b6d4f00bfdaa01ddbc5827a1ac8761 file: - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: x - comma: ',' - naked_identifier: y end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '2' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t where_clause: keyword: WHERE expression: - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: y - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t2 keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: naked_identifier: c end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: v - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b - comma: ',' - naked_identifier: c - comma: ',' - naked_identifier: d end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '1' - comma: ',' - select_clause_element: numeric_literal: '2' - comma: ',' - select_clause_element: numeric_literal: '3' - comma: ',' - select_clause_element: numeric_literal: '4' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: v - statement_terminator: ; - statement: with_compound_statement: - keyword: WITH - common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' end_bracket: ) - comma: ',' - common_table_expression: naked_identifier: t2 keyword: AS bracketed: start_bracket: ( with_compound_statement: keyword: WITH common_table_expression: naked_identifier: t keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '2' end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_database.sql000066400000000000000000000016371451700765000254470ustar00rootroot00000000000000-- Create database with all optional syntax CREATE DATABASE IF NOT EXISTS database_name COMMENT "database_comment" LOCATION "root/database_directory" WITH DBPROPERTIES ( "property_name" = "property_value"); -- Create schema with all optional syntax CREATE SCHEMA IF NOT EXISTS database_name COMMENT "database_comment" LOCATION "root/database_directory" WITH DBPROPERTIES ( "property_name" = "property_value" ); -- Create database `customer_db`. CREATE DATABASE customer_db; -- Create database `customer_db` only if database with same name doesn't exist. CREATE DATABASE IF NOT EXISTS customer_db; -- `Comments`,`Specific Location` and `Database properties`. CREATE DATABASE IF NOT EXISTS customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES ("ID" = "001", "Name" = 'John'); -- Create `inventory_db` Database CREATE DATABASE inventory_db COMMENT 'This database is used to maintain Inventory'; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_database.yml000066400000000000000000000063461451700765000254530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0cbca08dc935648cfc41b8f023b108c9d931dde9b687859bf660e6557639680b file: - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: COMMENT - quoted_literal: '"database_comment"' - keyword: LOCATION - quoted_literal: '"root/database_directory"' - keyword: WITH - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_name"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_value"' end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: SCHEMA - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: COMMENT - quoted_literal: '"database_comment"' - keyword: LOCATION - quoted_literal: '"root/database_directory"' - keyword: WITH - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_name"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_value"' end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: customer_db - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: customer_db - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: customer_db - keyword: COMMENT - quoted_literal: "'This is customer database'" - keyword: LOCATION - quoted_literal: "'/user'" - keyword: WITH - keyword: DBPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"ID"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"001"' - comma: ',' - property_name_identifier: quoted_identifier: '"Name"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'John'" - end_bracket: ) - statement_terminator: ; - statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - database_reference: naked_identifier: inventory_db - keyword: COMMENT - quoted_literal: "'This database is used to maintain Inventory'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_function.sql000066400000000000000000000014561451700765000255270ustar00rootroot00000000000000-- Create FUNCTION with all optional syntax CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS function_name AS "class_name" USING FILE "resource_locations"; -- Create a permanent function called `simple_udf`. CREATE FUNCTION simple_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar'; -- Created a temporary function. CREATE TEMPORARY FUNCTION simple_temp_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar'; -- Replace the implementation of `simple_udf` CREATE OR REPLACE FUNCTION simple_udf AS 'SimpleUdfR' USING JAR '/tmp/SimpleUdfR.jar'; -- Create a permanent function `test_avg` CREATE FUNCTION test_avg AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'; ---- Create Temporary function `test_avg` CREATE TEMPORARY FUNCTION test_avg AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_function.yml000066400000000000000000000044561451700765000255340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 40517fe1d1589508ea7e1ea2a3179256d8d7cfdcf5dd24ac85ef41ef8f036cee file: - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: NOT - keyword: EXISTS - function_name_identifier: function_name - keyword: AS - quoted_literal: '"class_name"' - keyword: USING - file_keyword: FILE - quoted_literal: '"resource_locations"' - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name_identifier: simple_udf - keyword: AS - quoted_literal: "'SimpleUdf'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdf.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name_identifier: simple_temp_udf - keyword: AS - quoted_literal: "'SimpleUdf'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdf.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: FUNCTION - function_name_identifier: simple_udf - keyword: AS - quoted_literal: "'SimpleUdfR'" - keyword: USING - file_keyword: JAR - quoted_literal: "'/tmp/SimpleUdfR.jar'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - function_name_identifier: test_avg - keyword: AS - quoted_literal: "'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'" - statement_terminator: ; - statement: create_function_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: FUNCTION - function_name_identifier: test_avg - keyword: AS - quoted_literal: "'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_complex_datatypes.sql000066400000000000000000000014451451700765000305740ustar00rootroot00000000000000--Create Table with complex datatypes CREATE TABLE table_identifier ( a STRUCT, d MAP, e ARRAY); --Create Table with complex datatypes and comments CREATE TABLE table_identifier ( a STRUCT COMMENT 'col_comment', d MAP COMMENT 'col_comment', e ARRAY COMMENT 'col_comment'); --Create Table with nested complex datatypes CREATE TABLE table_identifier ( a STRUCT>, d MAP>>, g ARRAY>>); --Create Table with complex datatypes and quoted identifiers CREATE TABLE table_identifier ( a STRUCT<`b`: STRING, c: BOOLEAN>, `d` MAP, e ARRAY); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_complex_datatypes.yml000066400000000000000000000213451451700765000305770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9630e84f8a1e62373243524752cc6eb7ab25f89c00d9d3c3b29fa70b2f564a4c file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: b - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: c - colon: ':' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: d data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: e data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: b - colon: ':' - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: "'struct_comment'" - comma: ',' - naked_identifier: c - colon: ':' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col_comment'" - comma: ',' - column_definition: naked_identifier: d data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col_comment'" - comma: ',' - column_definition: naked_identifier: e data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'col_comment'" - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: b - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: c - colon: ':' - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: d data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: e - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: f - colon: ':' - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: g data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: h - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: i - colon: ':' - data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - end_angle_bracket: '>' end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - quoted_identifier: '`b`' - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: c - colon: ':' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: quoted_identifier: '`d`' data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: BOOLEAN - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: e data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_datasource.sql000066400000000000000000000026001451700765000271730ustar00rootroot00000000000000--Create Datasource Table with all optional syntax CREATE TABLE IF NOT EXISTS table_identifier ( test STRING COMMENT "col_comment1" ) USING PARQUET OPTIONS ( "key1" = "val1", "key2" = "val2") PARTITIONED BY ( col_name1, col_name2 ) CLUSTERED BY ( col_name3, col_name4) SORTED BY ( col_name3 ASC, col_name4 DESC) INTO 4 BUCKETS LOCATION 'root/database/table' COMMENT 'this is a comment' TBLPROPERTIES ( "key1" = "val1", "key2" = "val2" ) AS SELECT id, name FROM student; --Use data source CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV; --Use data from another table CREATE TABLE student_copy USING CSV AS SELECT id, name FROM student; --Omit the USING clause --uses the default data source (parquet by default) CREATE TABLE student (id INT, student_name STRING, age INT); --Specify table comment and properties CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV COMMENT 'this is a comment' TBLPROPERTIES ('foo' = 'bar'); --Create partitioned and bucketed table CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV PARTITIONED BY (age) CLUSTERED BY (id) INTO 4 BUCKETS; CREATE EXTERNAL TABLE IF NOT EXISTS student (id INT, student_name STRING, age INT) USING iceberg PARTITIONED BY (age); CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV COMMENT "this is a comment" PARTITIONED BY (age) STORED AS PARQUET; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_datasource.yml000066400000000000000000000237241451700765000272070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 50ba3daf8fbee551483efd319027afab8246365b1f64835f1d92b8d5e25bd25d file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_identifier - bracketed: start_bracket: ( column_definition: naked_identifier: test data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment1"' end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name1 - comma: ',' - column_reference: naked_identifier: col_name2 - end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name3 - comma: ',' - column_reference: naked_identifier: col_name4 - end_bracket: ) - keyword: SORTED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name3 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col_name4 - keyword: DESC - end_bracket: ) - keyword: INTO - numeric_literal: '4' - keyword: BUCKETS - keyword: LOCATION - quoted_literal: "'root/database/table'" - keyword: COMMENT - quoted_literal: "'this is a comment'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_copy - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: COMMENT - quoted_literal: "'this is a comment'" - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'foo'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: age end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: INTO - numeric_literal: '4' - keyword: BUCKETS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - keyword: COMMENT - quoted_literal: '"this is a comment"' - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: age end_bracket: ) - keyword: STORED - keyword: AS - keyword: PARQUET - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_hiveformat.sql000066400000000000000000000063751451700765000272220ustar00rootroot00000000000000--Create Hiveformat Table with all optional syntax CREATE EXTERNAL TABLE IF NOT EXISTS table_identifier ( col_name1 STRING COMMENT "col_comment1") COMMENT "table_comment" PARTITIONED BY ( col_name2 STRING COMMENT "col_comment2" ) CLUSTERED BY ( col_name1, col_name2) SORTED BY ( col_name1 ASC, col_name2 DESC ) INTO 3 BUCKETS ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS PARQUET LOCATION "path/to/files" TBLPROPERTIES ( "key1" = "val1", "key2" = "val2") AS (SELECT * FROM table_identifier); --Use hive format CREATE TABLE student (id INT, student_name STRING, age INT) STORED AS ORC; --Use data from another table CREATE TABLE student_copy STORED AS ORC AS SELECT * FROM student; --Specify table comment and properties CREATE TABLE student (id INT, student_name STRING, age INT) COMMENT 'this is a comment' STORED AS ORC TBLPROPERTIES ('foo' = 'bar'); --Specify table comment and properties with different clauses order CREATE TABLE student (id INT, student_name STRING, age INT) STORED AS ORC TBLPROPERTIES ('foo' = 'bar') COMMENT 'this is a comment'; --Create partitioned table CREATE TABLE student (id INT, student_name STRING) PARTITIONED BY (age INT) STORED AS ORC; --Create partitioned table with different clauses order CREATE TABLE student (id INT, student_name STRING) STORED AS ORC PARTITIONED BY (age INT); --Use Row Format and file format CREATE TABLE student (id INT, student_name STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE; --Use complex datatype CREATE EXTERNAL TABLE family( student_name STRING, friends ARRAY, children MAP, address STRUCT ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\' COLLECTION ITEMS TERMINATED BY '_' MAP KEYS TERMINATED BY ':' LINES TERMINATED BY '\n' NULL DEFINED AS 'foonull' STORED AS TEXTFILE LOCATION '/tmp/family/'; --Use predefined custom SerDe CREATE TABLE avroexample ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' TBLPROPERTIES ( 'avro.schema.literal' = '{ "namespace": "org.apache.hive", "name": "first_schema", "type": "record", "fields": [ { "name":"string1", "type":"string" }, { "name":"string2", "type":"string" }] }' ); --Use personalized custom SerDe --(we may need to `ADD JAR xxx.jar` first to ensure we can find the serde_class, --or you may run into `CLASSNOTFOUND` exception) ADD JAR '/tmp/hive_serde_example.jar'; CREATE EXTERNAL TABLE family (id INT, family_name STRING) ROW FORMAT SERDE 'com.ly.spark.serde.SerDeExample' STORED AS INPUTFORMAT 'com.ly.spark.example.serde.io.SerDeExampleInputFormat' OUTPUTFORMAT 'com.ly.spark.example.serde.io.SerDeExampleOutputFormat' LOCATION '/tmp/family/'; --Use `CLUSTERED BY` clause to create bucket table without `SORTED BY` CREATE TABLE clustered_by_test1 (id INT, age STRING) CLUSTERED BY (id) INTO 4 BUCKETS STORED AS ORC; --Use `CLUSTERED BY` clause to create bucket table with `SORTED BY` CREATE TABLE clustered_by_test2 (id INT, test_name STRING) PARTITIONED BY (test_year STRING) CLUSTERED BY (id, name) SORTED BY (id ASC) INTO 3 BUCKETS STORED AS PARQUET; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_hiveformat.yml000066400000000000000000000402721451700765000272160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca09e9acf6052bcb32612db25cff0ca38f9bb3ae63da9e9f5aa873bbd34ae2c8 file: - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_identifier - bracketed: start_bracket: ( column_definition: naked_identifier: col_name1 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment1"' end_bracket: ) - keyword: COMMENT - quoted_literal: '"table_comment"' - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: col_name2 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment2"' end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name1 - comma: ',' - column_reference: naked_identifier: col_name2 - end_bracket: ) - keyword: SORTED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name1 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col_name2 - keyword: DESC - end_bracket: ) - keyword: INTO - numeric_literal: '3' - keyword: BUCKETS - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: LOCATION - quoted_literal: '"path/to/files"' - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_identifier end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_copy - keyword: STORED - keyword: AS - keyword: ORC - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: COMMENT - quoted_literal: "'this is a comment'" - keyword: STORED - keyword: AS - keyword: ORC - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'foo'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'foo'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'bar'" end_bracket: ) - keyword: COMMENT - quoted_literal: "'this is a comment'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: age data_type: primitive_type: keyword: INT end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: age data_type: primitive_type: keyword: INT end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - file_format: TEXTFILE - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: family - bracketed: - start_bracket: ( - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: friends data_type: array_type: keyword: ARRAY start_angle_bracket: < data_type: primitive_type: keyword: STRING end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: children data_type: - keyword: MAP - start_angle_bracket: < - data_type: primitive_type: keyword: STRING - comma: ',' - data_type: primitive_type: keyword: INT - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: address data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: street - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: city - colon: ':' - data_type: primitive_type: keyword: STRING - end_angle_bracket: '>' - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: ESCAPED - keyword: BY - quoted_literal: "'\\\\'" - keyword: COLLECTION - keyword: ITEMS - keyword: TERMINATED - keyword: BY - quoted_literal: "'_'" - keyword: MAP - keyword: KEYS - keyword: TERMINATED - keyword: BY - quoted_literal: "':'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: 'NULL' - keyword: DEFINED - keyword: AS - quoted_literal: "'foonull'" - keyword: STORED - keyword: AS - file_format: TEXTFILE - keyword: LOCATION - quoted_literal: "'/tmp/family/'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: avroexample - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.avro.AvroSerDe'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'" - keyword: OUTPUTFORMAT - quoted_literal: "'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'" - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'avro.schema.literal'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'{\n \"namespace\": \"org.apache.hive\",\n \ \ \"name\": \"first_schema\",\n \"type\": \"record\",\n \"\ fields\": [ { \"name\":\"string1\", \"type\":\"string\" }, { \"name\":\"\ string2\", \"type\":\"string\" }]\n }'" end_bracket: ) - statement_terminator: ; - statement: add_jar_statement: keyword: ADD file_keyword: JAR quoted_literal: "'/tmp/hive_serde_example.jar'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: family - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: family_name data_type: primitive_type: keyword: STRING - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'com.ly.spark.serde.SerDeExample'" - keyword: STORED - keyword: AS - keyword: INPUTFORMAT - quoted_literal: "'com.ly.spark.example.serde.io.SerDeExampleInputFormat'" - keyword: OUTPUTFORMAT - quoted_literal: "'com.ly.spark.example.serde.io.SerDeExampleOutputFormat'" - keyword: LOCATION - quoted_literal: "'/tmp/family/'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: clustered_by_test1 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: STRING - end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: INTO - numeric_literal: '4' - keyword: BUCKETS - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: clustered_by_test2 - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: test_name data_type: primitive_type: keyword: STRING - end_bracket: ) - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_definition: naked_identifier: test_year data_type: primitive_type: keyword: STRING end_bracket: ) - keyword: CLUSTERED - keyword: BY - bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - keyword: SORTED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: id keyword: ASC end_bracket: ) - keyword: INTO - numeric_literal: '3' - keyword: BUCKETS - keyword: STORED - keyword: AS - keyword: PARQUET - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_like.sql000066400000000000000000000013641451700765000257730ustar00rootroot00000000000000-- Create Table Like with all optional syntax CREATE TABLE IF NOT EXISTS table_identifier LIKE source_table_identifier USING PARQUET ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS PARQUET TBLPROPERTIES ( "key1" = "val1", "key2" = "val2") LOCATION "path/to/files"; -- Create table using an existing table CREATE TABLE student_dupli LIKE student; -- Create table like using a data source CREATE TABLE student_dupli LIKE student USING CSV; -- Table is created as external table at the location specified CREATE TABLE student_dupli LIKE student LOCATION '/root1/home'; -- Create table like using a rowformat CREATE TABLE student_dupli LIKE student ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE TBLPROPERTIES ('owner' = 'xxxx'); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_like.yml000066400000000000000000000063401451700765000257740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0e80c3cac8e3f8c1f6caf134e0df5bbbbe78906073995922778be82d8ff71f76 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: table_identifier - keyword: LIKE - table_reference: naked_identifier: source_table_identifier - using_clause: keyword: USING data_source_format: keyword: PARQUET - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - keyword: PARQUET - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: LOCATION - quoted_literal: '"path/to/files"' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - using_clause: keyword: USING data_source_format: keyword: CSV - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - keyword: LOCATION - quoted_literal: "'/root1/home'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - file_format: TEXTFILE - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'owner'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'xxxx'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.sql000066400000000000000000000000741451700765000311320ustar00rootroot00000000000000CREATE TABLE table_identifier ( a LONG, b INT, c SMALLINT); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_primitive_datatypes.yml000066400000000000000000000020661451700765000311370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d8f20b8c155640694cd84362ac48a58f7ee08fb6a53bb6636bbaf51d260f107d file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: table_identifier - bracketed: - start_bracket: ( - column_definition: naked_identifier: a data_type: primitive_type: keyword: LONG - comma: ',' - column_definition: naked_identifier: b data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: c data_type: primitive_type: keyword: SMALLINT - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_select.sql000066400000000000000000000002461451700765000263240ustar00rootroot00000000000000-- create table select without `AS` keyword CREATE TABLE tab1 SELECT * FROM tab2; -- create table select with `AS` keyword CREATE TABLE tab1 AS SELECT * FROM tab2; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_table_select.yml000066400000000000000000000030421451700765000263230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c638fbc846086000ecd6a0d36354551fe8ed4377abce53076ab6ab93b98bd53 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tab1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab2 - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: tab1 - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tab2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_view.sql000066400000000000000000000015641451700765000246540ustar00rootroot00000000000000-- Create view basic syntax CREATE VIEW experienced_employee_extended AS SELECT * from experienced_employee ; -- Create VIEW with all optional syntax CREATE OR REPLACE GLOBAL TEMPORARY VIEW IF NOT EXISTS experienced_employee (ID COMMENT 'Unique identification number', Name) COMMENT 'View for experienced employees' TBLPROPERTIES ( "key1" = "val1", "key2" = "val2" ) AS SELECT ID, Name from temp2 ; -- Created a temporary function with TEMP. CREATE TEMP VIEW experienced_employee_temp AS SELECT * from experienced_employee limit 2 ; -- Replace the implementation of `simple_udf` CREATE OR REPLACE VIEW experienced_employee_rep AS SELECT * from experienced_employee limit 2 ; CREATE TEMPORARY VIEW pulse_article_search_data USING org.apache.spark.sql.jdbc OPTIONS ( url "jdbc:postgresql:dbserver", dbtable "schema.tablename", user 'username', password 'password' ) sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_view.yml000066400000000000000000000122721451700765000246540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9707e977b1efba1e0a3ad720e525968b4b83816679c979340c02dbc037a3c47b file: - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: experienced_employee_extended - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: experienced_employee - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: GLOBAL - keyword: TEMPORARY - keyword: VIEW - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: experienced_employee - bracketed: - start_bracket: ( - column_reference: naked_identifier: ID - keyword: COMMENT - quoted_literal: "'Unique identification number'" - comma: ',' - column_reference: naked_identifier: Name - end_bracket: ) - keyword: COMMENT - quoted_literal: "'View for experienced employees'" - keyword: TBLPROPERTIES - bracketed: - start_bracket: ( - property_name_identifier: quoted_identifier: '"key1"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val1"' - comma: ',' - property_name_identifier: quoted_identifier: '"key2"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"val2"' - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ID - comma: ',' - select_clause_element: column_reference: naked_identifier: Name from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: temp2 - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMP - keyword: VIEW - table_reference: naked_identifier: experienced_employee_temp - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: experienced_employee limit_clause: keyword: limit numeric_literal: '2' - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: VIEW - table_reference: naked_identifier: experienced_employee_rep - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: experienced_employee limit_clause: keyword: limit numeric_literal: '2' - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: VIEW - table_reference: naked_identifier: pulse_article_search_data - keyword: USING - data_source_format: object_reference: - naked_identifier: org - dot: . - naked_identifier: apache - dot: . - naked_identifier: spark - dot: . - naked_identifier: sql - dot: . - naked_identifier: jdbc - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: url - quoted_literal: '"jdbc:postgresql:dbserver"' - comma: ',' - property_name_identifier: properties_naked_identifier: dbtable - quoted_literal: '"schema.tablename"' - comma: ',' - property_name_identifier: properties_naked_identifier: user - quoted_literal: "'username'" - comma: ',' - property_name_identifier: properties_naked_identifier: password - quoted_literal: "'password'" - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_widget.sql000066400000000000000000000003571451700765000251640ustar00rootroot00000000000000-- Create dropdown widget CREATE WIDGET DROPDOWN state DEFAULT "CA" CHOICES SELECT * FROM (VALUES ("CA"), ("IL"), ("MI"), ("NY"), ("OR"), ("VA")); -- Create text widget CREATE WIDGET TEXT database DEFAULT "customers_dev"; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/create_widget.yml000066400000000000000000000051371451700765000251670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: deac0f7076a50c434b37acd68ecc449ad30920e19cd822fe5675f3f20f5fb8eb file: - statement: create_widget_statement: - keyword: CREATE - keyword: WIDGET - keyword: DROPDOWN - widget_name_identifier: state - keyword: DEFAULT - quoted_literal: '"CA"' - keyword: CHOICES - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: VALUES - bracketed: start_bracket: ( expression: quoted_literal: '"CA"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"IL"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"MI"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"NY"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"OR"' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: quoted_literal: '"VA"' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_widget_statement: - keyword: CREATE - keyword: WIDGET - keyword: TEXT - widget_name_identifier: database - keyword: DEFAULT - quoted_literal: '"customers_dev"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_dlt_apply_changes_into.sql000066400000000000000000000023351451700765000312540ustar00rootroot00000000000000-- Create and populate the target table. CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (user_id) APPLY AS DELETE WHEN operation = "DELETE" APPLY AS TRUNCATE WHEN operation = "TRUNCATE" SEQUENCE BY sequence_num COLUMNS * EXCEPT (operation, sequence_num) STORED AS SCD TYPE 1; -- Create and populate the target table. CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (userid) APPLY AS DELETE WHEN operation = "DELETE" SEQUENCE BY sequencenum COLUMNS * EXCEPT (operation, sequencenum) STORED AS SCD TYPE 2; -- Create and populate the target table. CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (userid) SEQUENCE BY sequencenum COLUMNS * EXCEPT (operation, sequencenum); -- Create and populate the target table. CREATE OR REFRESH STREAMING LIVE TABLE target; APPLY CHANGES INTO live.target FROM STREAM(cdc_data.users) KEYS (user_id) IGNORE NULL UPDATES WHERE state = "NY" APPLY AS DELETE WHEN operation = "DELETE" APPLY AS TRUNCATE WHEN operation = "TRUNCATE" SEQUENCE BY sequence_num COLUMNS * EXCEPT (operation, sequence_num) STORED AS SCD TYPE 1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_dlt_apply_changes_into.yml000066400000000000000000000204671451700765000312640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5e45c610ddacccbffcbf4ed49cd0db81a4283d99450908feee8fd16b86c4f242 file: - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: APPLY - keyword: AS - keyword: DELETE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DELETE"' - keyword: APPLY - keyword: AS - keyword: TRUNCATE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"TRUNCATE"' - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequence_num - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequence_num - end_bracket: ) - keyword: STORED - keyword: AS - keyword: SCD - keyword: TYPE - numeric_literal: '1' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: userid end_bracket: ) - keyword: APPLY - keyword: AS - keyword: DELETE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DELETE"' - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequencenum - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequencenum - end_bracket: ) - keyword: STORED - keyword: AS - keyword: SCD - keyword: TYPE - numeric_literal: '2' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: userid end_bracket: ) - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequencenum - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequencenum - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: target - statement_terminator: ; - statement: apply_changes_into_statement: - keyword: APPLY - keyword: CHANGES - keyword: INTO - table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: target - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM bracketed: start_bracket: ( expression: column_reference: - naked_identifier: cdc_data - dot: . - naked_identifier: users end_bracket: ) - keyword: KEYS - bracketed: start_bracket: ( column_reference: naked_identifier: user_id end_bracket: ) - keyword: IGNORE - keyword: 'NULL' - keyword: UPDATES - where_clause: keyword: WHERE expression: column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: '"NY"' - keyword: APPLY - keyword: AS - keyword: DELETE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"DELETE"' - keyword: APPLY - keyword: AS - keyword: TRUNCATE - keyword: WHEN - column_reference: naked_identifier: operation - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"TRUNCATE"' - keyword: SEQUENCE - keyword: BY - column_reference: naked_identifier: sequence_num - keyword: COLUMNS - star: '*' - keyword: EXCEPT - bracketed: - start_bracket: ( - column_reference: naked_identifier: operation - comma: ',' - column_reference: naked_identifier: sequence_num - end_bracket: ) - keyword: STORED - keyword: AS - keyword: SCD - keyword: TYPE - numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_dlt_constraint.sql000066400000000000000000000004061451700765000275670ustar00rootroot00000000000000CONSTRAINT valid_timestamp EXPECT (event_ts > '2012-01-01'); CONSTRAINT valid_current_page EXPECT ( current_page_id IS NOT NULL AND current_page_title IS NOT NULL ) ON VIOLATION DROP ROW; CONSTRAINT valid_count EXPECT (count > 0) ON VIOLATION FAIL UPDATE; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_dlt_constraint.yml000066400000000000000000000037651451700765000276040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fc0702d568526354356fa9c90e28d354993cea13d9ac764cc10acfa08bfa3c50 file: - statement: constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_timestamp - keyword: EXPECT - bracketed: start_bracket: ( expression: column_reference: naked_identifier: event_ts comparison_operator: raw_comparison_operator: '>' quoted_literal: "'2012-01-01'" end_bracket: ) - statement_terminator: ; - statement: constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_current_page - keyword: EXPECT - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: current_page_id - keyword: IS - keyword: NOT - null_literal: 'NULL' - binary_operator: AND - column_reference: naked_identifier: current_page_title - keyword: IS - keyword: NOT - null_literal: 'NULL' end_bracket: ) - keyword: 'ON' - keyword: VIOLATION - keyword: DROP - keyword: ROW - statement_terminator: ; - statement: constraint_statement: - keyword: CONSTRAINT - object_reference: naked_identifier: valid_count - keyword: EXPECT - bracketed: start_bracket: ( expression: column_reference: naked_identifier: count comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) - keyword: 'ON' - keyword: VIOLATION - keyword: FAIL - keyword: UPDATE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_dlt_create_table.sql000066400000000000000000000012651451700765000300210ustar00rootroot00000000000000CREATE OR REFRESH LIVE TABLE taxi_raw AS SELECT a, b FROM JSON.`/databricks-datasets/nyctaxi/sample/json/`; CREATE OR REFRESH LIVE TABLE filtered_data AS SELECT a, b FROM live.taxi_raw; CREATE OR REFRESH STREAMING LIVE TABLE customers_bronze AS SELECT a, b FROM CLOUD_FILES("/databricks-datasets/retail-org/customers/", "csv"); CREATE OR REFRESH STREAMING LIVE TABLE customers_silver AS SELECT a, b FROM STREAM(live.customers_bronze); CREATE OR REFRESH TEMPORARY LIVE TABLE filtered_data AS SELECT a, b FROM live.taxi_raw; CREATE OR REFRESH TEMPORARY STREAMING LIVE TABLE customers_silver AS SELECT a, b FROM STREAM(live.customers_bronze); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_dlt_create_table.yml000066400000000000000000000143251451700765000300240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9750d4c4207e59c2192fde953b250ed0456fd3ab755f5e21b24309d66f934273 file: - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: taxi_raw - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: JSON dot: . quoted_identifier: '`/databricks-datasets/nyctaxi/sample/json/`' - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: filtered_data - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: taxi_raw - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: customers_bronze - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: CLOUD_FILES bracketed: - start_bracket: ( - expression: quoted_literal: '"/databricks-datasets/retail-org/customers/"' - comma: ',' - expression: quoted_literal: '"csv"' - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: customers_silver - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM bracketed: start_bracket: ( expression: column_reference: - naked_identifier: live - dot: . - naked_identifier: customers_bronze end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: TEMPORARY - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: filtered_data - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: taxi_raw - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REFRESH - keyword: TEMPORARY - keyword: STREAMING - keyword: LIVE - keyword: TABLE - table_reference: naked_identifier: customers_silver - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: STREAM bracketed: start_bracket: ( expression: column_reference: - naked_identifier: live - dot: . - naked_identifier: customers_bronze end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_dlt_create_view.sql000066400000000000000000000004551451700765000277040ustar00rootroot00000000000000-- https://docs.databricks.com/workflows/delta-live-tables/delta-live-tables-sql-ref.html#create-view CREATE TEMPORARY LIVE VIEW filtered_data AS SELECT a, b FROM live.taxi_raw; CREATE TEMPORARY STREAMING LIVE VIEW customers_silver AS SELECT a, b FROM stream(live.customers_bronze); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_dlt_create_view.yml000066400000000000000000000044121451700765000277030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 408ecb04633aa2ae4271bb41dd60e415eb94b20cde131120e50c133b6f3d536e file: - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: LIVE - keyword: VIEW - table_reference: naked_identifier: filtered_data - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: live - dot: . - naked_identifier: taxi_raw - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: TEMPORARY - keyword: STREAMING - keyword: LIVE - keyword: VIEW - table_reference: naked_identifier: customers_silver - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: stream bracketed: start_bracket: ( expression: column_reference: - naked_identifier: live - dot: . - naked_identifier: customers_bronze end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_operator_colon_sign.sql000066400000000000000000000002041451700765000306010ustar00rootroot00000000000000SELECT c1:price FROM VALUES('{ "price": 5 }') AS T(c1); SELECT c1:['price']::DECIMAL(5, 2) FROM VALUES('{ "price": 5 }') AS T(c1); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/databricks_operator_colon_sign.yml000066400000000000000000000055171451700765000306170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a42f528b0577d8142a3e493762f623fdf3f3f3c76108910e3bd7446fac280748 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: c1 semi_structured_expression: colon: ':' semi_structured_element: price from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'{ \"price\": 5 }'" end_bracket: ) alias_expression: keyword: AS naked_identifier: T bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: cast_expression: column_reference: naked_identifier: c1 semi_structured_expression: colon: ':' start_square_bracket: '[' semi_structured_element: "'price'" end_square_bracket: ']' casting_operator: '::' data_type: primitive_type: keyword: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '5' - comma: ',' - numeric_literal: '2' - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: values_clause: keyword: VALUES bracketed: start_bracket: ( expression: quoted_literal: "'{ \"price\": 5 }'" end_bracket: ) alias_expression: keyword: AS naked_identifier: T bracketed: start_bracket: ( identifier_list: naked_identifier: c1 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_change_data_feed.sql000066400000000000000000000004271451700765000267260ustar00rootroot00000000000000CREATE TABLE student (id INT, student_name STRING, age INT) TBLPROPERTIES (delta.enableChangeDataFeed = true); ALTER TABLE my_delta_table SET TBLPROPERTIES (delta.enableChangeDataFeed = true); SET spark.databricks.delta.properties.defaults.enableChangeDataFeed = true; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_change_data_feed.yml000066400000000000000000000047471451700765000267410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 61190da440866bf23e484e6ad9fe064c906b70a0861f09a40d4d0f8c14abf777 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: - properties_naked_identifier: delta - dot: . - properties_naked_identifier: enableChangeDataFeed comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_delta_table - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: - properties_naked_identifier: delta - dot: . - properties_naked_identifier: enableChangeDataFeed comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' end_bracket: ) - statement_terminator: ; - statement: set_statement: keyword: SET property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: databricks - dot: . - properties_naked_identifier: delta - dot: . - properties_naked_identifier: properties - dot: . - properties_naked_identifier: defaults - dot: . - properties_naked_identifier: enableChangeDataFeed comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_constraints.sql000066400000000000000000000005151451700765000260720ustar00rootroot00000000000000ALTER TABLE default.people10m CHANGE COLUMN middle_name DROP NOT NULL; ALTER TABLE default.people10m ADD CONSTRAINT date_within_range CHECK (birthDate > '1900-01-01'); ALTER TABLE default.people10m DROP CONSTRAINT date_within_range; ALTER TABLE default.people10m ADD CONSTRAINT valid_ids CHECK (id > 1 and id < 99999999); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_constraints.yml000066400000000000000000000047221451700765000261000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6cb5616dfacdb4d462880862d15122a820e15134e1282a001ece3c737e93c2b8 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: middle_name - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: ADD - keyword: CONSTRAINT - column_reference: naked_identifier: date_within_range - keyword: CHECK - bracketed: start_bracket: ( expression: column_reference: naked_identifier: birthDate comparison_operator: raw_comparison_operator: '>' quoted_literal: "'1900-01-01'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: DROP - keyword: CONSTRAINT - column_reference: naked_identifier: date_within_range - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: ADD - keyword: CONSTRAINT - column_reference: naked_identifier: valid_ids - keyword: CHECK - bracketed: start_bracket: ( expression: - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' - binary_operator: and - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: < - numeric_literal: '99999999' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_convert_to.sql000066400000000000000000000010611451700765000257020ustar00rootroot00000000000000-- Convert unpartitioned Parquet table at path '' CONVERT TO DELTA PARQUET.`/data/events/`; -- Convert partitioned Parquet table at path '' -- and partitioned by integer columns named 'part' and 'part2' CONVERT TO DELTA PARQUET.`/data/events/` PARTITIONED BY (part int, part2 int); -- Convert the Iceberg table in the path . CONVERT TO DELTA ICEBERG.`/data/events/`; -- Convert the Iceberg table in the path -- without collecting statistics CONVERT TO DELTA ICEBERG.`/data/events/` NO STATISTICS; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_convert_to.yml000066400000000000000000000034121451700765000257060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a3d34caef12a18197d02c733c3cce7611de20948b410c4900b9995a6c479b36c file: - statement: convert_to_delta_statement: - keyword: CONVERT - keyword: TO - keyword: DELTA - file_reference: keyword: PARQUET dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: convert_to_delta_statement: - keyword: CONVERT - keyword: TO - keyword: DELTA - file_reference: keyword: PARQUET dot: . quoted_identifier: '`/data/events/`' - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - column_definition: naked_identifier: part data_type: primitive_type: keyword: int - comma: ',' - column_definition: naked_identifier: part2 data_type: primitive_type: keyword: int - end_bracket: ) - statement_terminator: ; - statement: convert_to_delta_statement: - keyword: CONVERT - keyword: TO - keyword: DELTA - file_reference: keyword: ICEBERG dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: convert_to_delta_statement: - keyword: CONVERT - keyword: TO - keyword: DELTA - file_reference: keyword: ICEBERG dot: . quoted_identifier: '`/data/events/`' - keyword: 'NO' - keyword: STATISTICS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_create_table.sql000066400000000000000000000026161451700765000261410ustar00rootroot00000000000000-- Create table if not exists CREATE TABLE IF NOT EXISTS default.people10m ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, ssn STRING, salary INT ) USING DELTA; -- Create or replace table CREATE OR REPLACE TABLE default.people10m ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, ssn STRING, salary INT ) USING DELTA; -- Create or replace table with path CREATE OR REPLACE TABLE DELTA.`/delta/people10m` ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, ssn STRING, salary INT ) USING DELTA; -- Partition data CREATE TABLE default.people10m ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, ssn STRING, salary INT ) USING DELTA PARTITIONED BY (gender); -- Control data location CREATE TABLE default.people10m USING DELTA LOCATION '/delta/people10m'; -- Generated columns CREATE TABLE default.people10m ( id INT, first_name STRING, middle_name STRING, last_name STRING, gender STRING, birth_date TIMESTAMP, date_of_birth DATE GENERATED ALWAYS AS (CAST(birth_date AS DATE)), ssn STRING, salary INT ) USING DELTA PARTITIONED BY (gender); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_create_table.yml000066400000000000000000000240001451700765000261320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0383611d0a30333c17e59452d7179e43e5fa4775f31282d3dc3bf41a19bf53ad file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - column_definition: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - column_definition: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - column_definition: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - column_definition: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: gender end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - using_clause: keyword: USING data_source_format: keyword: DELTA - keyword: LOCATION - quoted_literal: "'/delta/people10m'" - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: first_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: middle_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: last_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: gender data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: birth_date data_type: primitive_type: keyword: TIMESTAMP - comma: ',' - generated_column_definition: - naked_identifier: date_of_birth - data_type: primitive_type: keyword: DATE - keyword: GENERATED - keyword: ALWAYS - keyword: AS - bracketed: start_bracket: ( function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: column_reference: naked_identifier: birth_date keyword: AS data_type: primitive_type: keyword: DATE end_bracket: ) end_bracket: ) - comma: ',' - column_definition: naked_identifier: ssn data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: salary data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: DELTA - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: gender end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_delete_from.sql000066400000000000000000000007151451700765000260120ustar00rootroot00000000000000DELETE FROM events WHERE date < '2017-01-01'; DELETE FROM DELTA.`/data/events/` WHERE date < '2017-01-01'; DELETE FROM all_events WHERE session_time < ( SELECT min(session_time) FROM good_events ); DELETE FROM orders AS t1 WHERE EXISTS ( SELECT returned_orders.oid FROM returned_orders WHERE t1.oid = returned_orders.oid ); DELETE FROM events WHERE category NOT IN ( SELECT category FROM events2 WHERE date > '2001-01-01' ); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_delete_from.yml000066400000000000000000000132271451700765000260160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb9e7dc34f4b21e8f79008a50df7bc2c1e2ad75a3163333471d5adb25a3d5e9f file: - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: events where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: < quoted_literal: "'2017-01-01'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: < quoted_literal: "'2017-01-01'" - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: all_events where_clause: keyword: WHERE expression: column_reference: naked_identifier: session_time comparison_operator: raw_comparison_operator: < bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: min bracketed: start_bracket: ( expression: column_reference: naked_identifier: session_time end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: good_events end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders alias_expression: keyword: AS naked_identifier: t1 where_clause: keyword: WHERE expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: returned_orders - dot: . - naked_identifier: oid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: returned_orders where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned_orders - dot: . - naked_identifier: oid end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: events where_clause: keyword: WHERE expression: - column_reference: naked_identifier: category - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: category from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: events2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: '>' quoted_literal: "'2001-01-01'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_describe_detail.sql000066400000000000000000000001471451700765000266260ustar00rootroot00000000000000DESCRIBE DETAIL '/data/events/'; DESCRIBE DETAIL eventstable; DESCRIBE DETAIL DELTA.`/data/events/`; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_describe_detail.yml000066400000000000000000000016641451700765000266350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f0e9a8ba7769d9aa6938ad447278fcc1671a02c1e41c0696916b1603e246ae1d file: - statement: describe_detail_statement: - keyword: DESCRIBE - keyword: DETAIL - quoted_literal: "'/data/events/'" - statement_terminator: ; - statement: describe_detail_statement: - keyword: DESCRIBE - keyword: DETAIL - table_reference: naked_identifier: eventstable - statement_terminator: ; - statement: describe_detail_statement: - keyword: DESCRIBE - keyword: DETAIL - file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_describe_history.sql000066400000000000000000000003311451700765000270600ustar00rootroot00000000000000-- get the full history of the table DESCRIBE HISTORY '/data/events/'; DESCRIBE HISTORY DELTA.`/data/events/`; -- get the last operation only DESCRIBE HISTORY '/data/events/' LIMIT 1; DESCRIBE HISTORY EVENTSTABLE; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_describe_history.yml000066400000000000000000000022401451700765000270630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab3cc93c4fd69f7cfaf790028d61e8eadc03e958315323fece411b6d0c45c75b file: - statement: describe_history_statement: - keyword: DESCRIBE - keyword: HISTORY - quoted_literal: "'/data/events/'" - statement_terminator: ; - statement: describe_history_statement: - keyword: DESCRIBE - keyword: HISTORY - file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: describe_history_statement: - keyword: DESCRIBE - keyword: HISTORY - quoted_literal: "'/data/events/'" - limit_clause: keyword: LIMIT numeric_literal: '1' - statement_terminator: ; - statement: describe_history_statement: - keyword: DESCRIBE - keyword: HISTORY - table_reference: naked_identifier: EVENTSTABLE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_drop_column_metadata_change.sql000066400000000000000000000001541451700765000312100ustar00rootroot00000000000000ALTER TABLE table_name DROP COLUMN col_name; ALTER TABLE table_name DROP COLUMNS (col_name_1, col_name_2); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_drop_column_metadata_change.yml000066400000000000000000000021001451700765000312030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ab0871335f7960fea1a362e0173897fcf4447bacee1fc60e1e85e2be3eab39e6 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: col_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: DROP - keyword: COLUMNS - bracketed: - start_bracket: ( - column_reference: naked_identifier: col_name_1 - comma: ',' - column_reference: naked_identifier: col_name_2 - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_generate_manifest.sql000066400000000000000000000002631451700765000272030ustar00rootroot00000000000000GENERATE symlink_format_manifest FOR TABLE DELTA.`/data/events/`; GENERATE symlink_format_manifest FOR TABLE '/data/events/'; GENERATE symlink_format_manifest FOR TABLE events; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_generate_manifest.yml000066400000000000000000000022371451700765000272100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 948d1aa854413e5a6814df2afa04d9391abf9f565944b9545153ba86f4eb075f file: - statement: generate_manifest_file_statement: - keyword: GENERATE - symlink_format_manifest: symlink_format_manifest - keyword: FOR - keyword: TABLE - file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: generate_manifest_file_statement: - keyword: GENERATE - symlink_format_manifest: symlink_format_manifest - keyword: FOR - keyword: TABLE - quoted_literal: "'/data/events/'" - statement_terminator: ; - statement: generate_manifest_file_statement: - keyword: GENERATE - symlink_format_manifest: symlink_format_manifest - keyword: FOR - keyword: TABLE - table_reference: naked_identifier: events - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_merge.sql000066400000000000000000000057151451700765000246310ustar00rootroot00000000000000-- upsert into a table MERGE INTO people10m USING people10mupdates ON people10m.id = people10mupdates.id WHEN MATCHED THEN UPDATE SET id = people10mupdates.id, first_name = people10mupdates.first_name, middle_name = people10mupdates.middle_name, last_name = people10mupdates.last_name, gender = people10mupdates.gender, birth_date = people10mupdates.birth_date, ssn = people10mupdates.ssn, salary = people10mupdates.salary WHEN NOT MATCHED THEN INSERT ( id, first_name, middle_name, last_name, gender, birth_date, ssn, salary ) VALUES ( people10mupdates.id, people10mupdates.first_name, people10mupdates.middle_name, people10mupdates.last_name, people10mupdates.gender, people10mupdates.birth_date, people10mupdates.ssn, people10mupdates.salary ); -- data deduplication MERGE INTO logs USING new_deduped_logs ON logs.unique_id = new_deduped_logs.unique_id WHEN NOT MATCHED THEN INSERT *; -- data deduplication with additional predicate MERGE INTO logs USING new_deduped_logs ON logs.unique_id = new_deduped_logs.unique_id AND logs.date > current_date() - INTERVAL 7 DAYS WHEN NOT MATCHED AND new_deduped_logs.date > current_date() - INTERVAL 7 DAYS THEN INSERT *; -- SCD Type 2 using MERGE MERGE INTO customers USING ( SELECT updates.customer_id AS merge_unique_key, updates.* FROM updates UNION ALL SELECT NULL AS merge_unique_key, updates.* FROM updates INNER JOIN customers ON updates.customer_id = customers.customer_id WHERE customers.current = TRUE AND updates.address != customers.address ) staged_updates ON customers.customer_id = merge_unique_key WHEN MATCHED AND customers.current = TRUE AND customers.address != staged_updates.address THEN UPDATE SET current = FALSE, end_date = staged_updates.effective_date WHEN NOT MATCHED THEN INSERT( customer_id, address, current, effective_date, end_date ) VALUES( staged_updates.customer_id, staged_updates.address, TRUE, staged_updates.effective_date, NULL ); -- ingest CDC using MERGE MERGE INTO target t USING ( SELECT changes.unique_key, changes.latest.new_value AS new_value, changes.latest.deleted AS deleted FROM ( SELECT unique_key, max(struct(change_time, new_value, deleted)) AS latest FROM changes GROUP BY unique_key ) ) s ON s.unique_key = t.unique_key WHEN MATCHED AND s.deleted = TRUE THEN DELETE WHEN MATCHED THEN UPDATE SET unique_key = s.unique_key, record_value = s.new_value WHEN NOT MATCHED AND s.deleted = FALSE THEN INSERT ( unique_key, record_value ) VALUES ( unique_key, new_value ); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_merge.yml000066400000000000000000000572121451700765000246320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c4059991d0b3d710932cb8c93294b824cb99997a328bde7be4272f3feb3b92bb file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: people10m - keyword: USING - table_reference: naked_identifier: people10mupdates - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: people10m - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: id - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: id - comma: ',' - set_clause: - column_reference: naked_identifier: first_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: first_name - comma: ',' - set_clause: - column_reference: naked_identifier: middle_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: middle_name - comma: ',' - set_clause: - column_reference: naked_identifier: last_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: last_name - comma: ',' - set_clause: - column_reference: naked_identifier: gender - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: gender - comma: ',' - set_clause: - column_reference: naked_identifier: birth_date - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: birth_date - comma: ',' - set_clause: - column_reference: naked_identifier: ssn - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: ssn - comma: ',' - set_clause: - column_reference: naked_identifier: salary - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: salary merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: first_name - comma: ',' - column_reference: naked_identifier: middle_name - comma: ',' - column_reference: naked_identifier: last_name - comma: ',' - column_reference: naked_identifier: gender - comma: ',' - column_reference: naked_identifier: birth_date - comma: ',' - column_reference: naked_identifier: ssn - comma: ',' - column_reference: naked_identifier: salary - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: id - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: first_name - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: middle_name - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: last_name - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: gender - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: birth_date - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: ssn - comma: ',' - expression: column_reference: - naked_identifier: people10mupdates - dot: . - naked_identifier: salary - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: logs - keyword: USING - table_reference: naked_identifier: new_deduped_logs - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: logs - dot: . - naked_identifier: unique_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: new_deduped_logs - dot: . - naked_identifier: unique_id - merge_match: merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT wildcard_identifier: star: '*' - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: logs - keyword: USING - table_reference: naked_identifier: new_deduped_logs - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: logs - dot: . - naked_identifier: unique_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: new_deduped_logs - dot: . - naked_identifier: unique_id - binary_operator: AND - column_reference: - naked_identifier: logs - dot: . - naked_identifier: date - comparison_operator: raw_comparison_operator: '>' - function: function_name: function_name_identifier: current_date bracketed: start_bracket: ( end_bracket: ) - binary_operator: '-' - interval_expression: keyword: INTERVAL interval_literal: numeric_literal: '7' date_part: DAYS - merge_match: merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: AND - expression: column_reference: - naked_identifier: new_deduped_logs - dot: . - naked_identifier: date comparison_operator: raw_comparison_operator: '>' function: function_name: function_name_identifier: current_date bracketed: start_bracket: ( end_bracket: ) binary_operator: '-' interval_expression: keyword: INTERVAL interval_literal: numeric_literal: '7' date_part: DAYS - keyword: THEN - merge_insert_clause: keyword: INSERT wildcard_identifier: star: '*' - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: customers - keyword: USING - bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: updates - dot: . - naked_identifier: customer_id alias_expression: keyword: AS naked_identifier: merge_unique_key - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: updates dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: updates - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: null_literal: 'NULL' alias_expression: keyword: AS naked_identifier: merge_unique_key - comma: ',' - select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: updates dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: updates join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: customers - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: updates - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: current - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - binary_operator: AND - column_reference: - naked_identifier: updates - dot: . - naked_identifier: address - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: - naked_identifier: customers - dot: . - naked_identifier: address end_bracket: ) - alias_expression: naked_identifier: staged_updates - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: customer_id - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: merge_unique_key - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: customers - dot: . - naked_identifier: current - comparison_operator: raw_comparison_operator: '=' - boolean_literal: 'TRUE' - binary_operator: AND - column_reference: - naked_identifier: customers - dot: . - naked_identifier: address - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: address - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: current comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - comma: ',' - set_clause: - column_reference: naked_identifier: end_date - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: effective_date merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: customer_id - comma: ',' - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: current - comma: ',' - column_reference: naked_identifier: effective_date - comma: ',' - column_reference: naked_identifier: end_date - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: customer_id - comma: ',' - expression: column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: address - comma: ',' - expression: boolean_literal: 'TRUE' - comma: ',' - expression: column_reference: - naked_identifier: staged_updates - dot: . - naked_identifier: effective_date - comma: ',' - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: target - alias_expression: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: changes - dot: . - naked_identifier: unique_key - comma: ',' - select_clause_element: column_reference: - naked_identifier: changes - dot: . - naked_identifier: latest - dot: . - naked_identifier: new_value alias_expression: keyword: AS naked_identifier: new_value - comma: ',' - select_clause_element: column_reference: - naked_identifier: changes - dot: . - naked_identifier: latest - dot: . - naked_identifier: deleted alias_expression: keyword: AS naked_identifier: deleted from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: unique_key - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: change_time - comma: ',' - expression: column_reference: naked_identifier: new_value - comma: ',' - expression: column_reference: naked_identifier: deleted - end_bracket: ) end_bracket: ) alias_expression: keyword: AS naked_identifier: latest from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: changes groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: unique_key end_bracket: ) end_bracket: ) - alias_expression: naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: unique_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t - dot: . - naked_identifier: unique_key - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: deleted comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - keyword: THEN - merge_delete_clause: keyword: DELETE - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: unique_key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: unique_key - comma: ',' - set_clause: - column_reference: naked_identifier: record_value - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: s - dot: . - naked_identifier: new_value - merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: AND - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: deleted comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: unique_key - comma: ',' - column_reference: naked_identifier: record_value - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: unique_key - comma: ',' - expression: column_reference: naked_identifier: new_value - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_read_table.sql000066400000000000000000000017051451700765000256070ustar00rootroot00000000000000-- query table in the metastore SELECT a, b FROM default.people10m; -- query table by path SELECT a, b FROM DELTA.`/delta/people10m`; -- query old snapshot by timestamp SELECT a, b FROM default.people10m@20190101000000000; SELECT count(*) FROM DELTA.`/delta/people10m@20190101000000000`; SELECT count(*) FROM DELTA.`/delta/people10m` TIMESTAMP AS OF "2019-01-01"; SELECT count(*) FROM default.people10m TIMESTAMP AS OF "2019-01-01"; SELECT count(*) FROM default.people10m TIMESTAMP AS OF date_sub(current_date(), 1); SELECT count(*) FROM default.people10m TIMESTAMP AS OF "2019-01-01 01:30:00.000"; -- query old snapshot by version SELECT a, b FROM default.people10m@v123; SELECT count(*) FROM default.people10m VERSION AS OF 5238; SELECT count(*) FROM default.people10m@v5238; SELECT count(*) FROM DELTA.`/delta/people10m@v5238`; SELECT count(*) FROM DELTA.`/delta/people10m` VERSION AS OF 5238; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_read_table.yml000066400000000000000000000226471451700765000256210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1e6fdbf904841a7c9fbd6fe5dcf1ab2040db9586004d93354c40afa0cf3ab84c file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m at_sign_literal: '@20190101000000000' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m@20190101000000000`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01"' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TIMESTAMP - keyword: AS - keyword: OF - function: function_name: function_name_identifier: date_sub bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: current_date bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01 01:30:00.000"' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m at_sign_literal: '@v123' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: VERSION - keyword: AS - keyword: OF - numeric_literal: '5238' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m at_sign_literal: '@v5238' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m@v5238`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - keyword: VERSION - keyword: AS - keyword: OF - numeric_literal: '5238' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_restore.sql000066400000000000000000000006411451700765000252060ustar00rootroot00000000000000RESTORE TABLE DELTA.`/delta/people10m` TO TIMESTAMP AS OF "2019-01-01"; RESTORE TABLE default.people10m TO TIMESTAMP AS OF "2019-01-01"; RESTORE TABLE default.people10m TO TIMESTAMP AS OF date_sub(current_date(), 1); RESTORE TABLE default.people10m TO TIMESTAMP AS OF "2019-01-01 01:30:00.000"; RESTORE TABLE DELTA.`/delta/people10m` TO VERSION AS OF 5238; RESTORE TABLE default.people10m TO VERSION AS OF 5238; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_restore.yml000066400000000000000000000054001451700765000252060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b0533a4086a4a1240f4920f5d1d1a657eb89eb7248622ee0d81cf538cfc01242 file: - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - keyword: TO - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01"' - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TO - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01"' - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TO - keyword: TIMESTAMP - keyword: AS - keyword: OF - function: function_name: function_name_identifier: date_sub bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: current_date bracketed: start_bracket: ( end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TO - keyword: TIMESTAMP - keyword: AS - keyword: OF - quoted_literal: '"2019-01-01 01:30:00.000"' - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - file_reference: keyword: DELTA dot: . quoted_identifier: '`/delta/people10m`' - keyword: TO - keyword: VERSION - keyword: AS - keyword: OF - numeric_literal: '5238' - statement_terminator: ; - statement: restore_table_statement: - keyword: RESTORE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - keyword: TO - keyword: VERSION - keyword: AS - keyword: OF - numeric_literal: '5238' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_update_table.sql000066400000000000000000000011041451700765000261470ustar00rootroot00000000000000UPDATE events SET event_type = 'click' WHERE event_type = 'clck'; UPDATE DELTA.`/data/events/` SET event_type = 'click' WHERE event_type = 'clck'; UPDATE all_events SET session_time = 0, ignored = true WHERE session_time < ( SELECT min(session_time) FROM good_events ); UPDATE orders AS t1 SET order_status = 'returned' WHERE EXISTS ( SELECT returned_orders.oid FROM returned_orders WHERE t1.oid = returned_orders.oid ); UPDATE events SET category = 'undefined' WHERE category NOT IN ( SELECT category FROM events2 WHERE date > '2001-01-01' ); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_update_table.yml000066400000000000000000000144461451700765000261660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ed30f4be30e506cc082f7ec9f657e666a5c481314f3b68a2931be0b6933d3a03 file: - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: events set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: event_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'click'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: event_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'clck'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: event_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'click'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: event_type comparison_operator: raw_comparison_operator: '=' quoted_literal: "'clck'" - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: all_events set_clause_list: - keyword: SET - set_clause: column_reference: naked_identifier: session_time comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - comma: ',' - set_clause: column_reference: naked_identifier: ignored comparison_operator: raw_comparison_operator: '=' boolean_literal: 'true' where_clause: keyword: WHERE expression: column_reference: naked_identifier: session_time comparison_operator: raw_comparison_operator: < bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: min bracketed: start_bracket: ( expression: column_reference: naked_identifier: session_time end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: good_events end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: orders alias_expression: keyword: AS naked_identifier: t1 set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: order_status comparison_operator: raw_comparison_operator: '=' quoted_literal: "'returned'" where_clause: keyword: WHERE expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: returned_orders - dot: . - naked_identifier: oid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: returned_orders where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: oid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: returned_orders - dot: . - naked_identifier: oid end_bracket: ) - statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: events set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'undefined'" where_clause: keyword: WHERE expression: - column_reference: naked_identifier: category - keyword: NOT - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: category from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: events2 where_clause: keyword: WHERE expression: column_reference: naked_identifier: date comparison_operator: raw_comparison_operator: '>' quoted_literal: "'2001-01-01'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_update_table_schema.sql000066400000000000000000000035421451700765000274770ustar00rootroot00000000000000-- add columns ALTER TABLE table_name ADD COLUMNS col_name STRING; ALTER TABLE table_name ADD COLUMNS (col_name STRING); ALTER TABLE table_name ADD COLUMNS col_name STRING, col_name2 INT; ALTER TABLE table_name ADD COLUMNS col_name STRING COMMENT "col_comment" FIRST; ALTER TABLE table_name ADD COLUMNS col_name STRING COMMENT "col_comment" FIRST, col_name2 INT COMMENT "col_2_comment" AFTER col_b_name; -- change column comment/ordering ALTER TABLE table_name CHANGE col_name_old col_name_new STRING; ALTER TABLE table_name CHANGE COLUMN col_name_old col_name_new STRING; ALTER TABLE table_name CHANGE COLUMN col_name_old col_name_new STRING COMMENT "new_col_comment"; ALTER TABLE table_name CHANGE COLUMN col_name_old col_name_new STRING COMMENT "new_col_comment" FIRST; ALTER TABLE table_name CHANGE COLUMN col_name_old col_name_new STRING COMMENT "new_col_comment" AFTER col_a_name; ---- change column comment/ordering in a nested field ALTER TABLE table_name CHANGE col_name_1.nested_col_name nested_col_name_new STRING; ALTER TABLE table_name CHANGE COLUMN col_name_1.nested_col_name nested_col_name_new STRING; ALTER TABLE table_name CHANGE COLUMN col_name_1.nested_col_name nested_col_name_new STRING COMMENT "new_col_comment"; ALTER TABLE table_name CHANGE COLUMN col_name_1.nested_col_name nested_col_name_new STRING COMMENT "new_col_comment" FIRST; ALTER TABLE table_name CHANGE COLUMN col_name_1.nested_col_name nested_col_name_new STRING COMMENT "new_col_comment" AFTER col_a_name; ALTER TABLE boxes CHANGE COLUMN col_b.a_key_name a_new_key_name STRING FIRST; -- replace columns ALTER TABLE table_name REPLACE COLUMNS ( col_name1 STRING COMMENT "col_comment1" ); ALTER TABLE boxes REPLACE COLUMNS ( col_c STRING, col_b STRUCT, col_a STRING ); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_update_table_schema.yml000066400000000000000000000244521451700765000275040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 209c3612244aa11fa9c18b0eea34c906d0f38105c9ae65330fa8867fdf2d23dd file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: column_reference: naked_identifier: col_name2 data_type: primitive_type: keyword: INT - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment"' - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: ADD - keyword: COLUMNS - column_definition: column_reference: naked_identifier: col_name data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment"' - keyword: FIRST - comma: ',' - column_definition: column_reference: naked_identifier: col_name2 data_type: primitive_type: keyword: INT column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_2_comment"' - keyword: AFTER - column_reference: naked_identifier: col_b_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: naked_identifier: col_name_old - column_reference: naked_identifier: col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - keyword: AFTER - column_reference: naked_identifier: col_a_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_name_1 - dot: . - naked_identifier: nested_col_name - column_reference: naked_identifier: nested_col_name_new - data_type: primitive_type: keyword: STRING - keyword: COMMENT - quoted_literal: '"new_col_comment"' - keyword: AFTER - column_reference: naked_identifier: col_a_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: boxes - keyword: CHANGE - keyword: COLUMN - column_reference: - naked_identifier: col_b - dot: . - naked_identifier: a_key_name - column_reference: naked_identifier: a_new_key_name - data_type: primitive_type: keyword: STRING - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: table_name - keyword: REPLACE - keyword: COLUMNS - bracketed: start_bracket: ( column_definition: naked_identifier: col_name1 data_type: primitive_type: keyword: STRING column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: '"col_comment1"' end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: boxes - keyword: REPLACE - keyword: COLUMNS - bracketed: - start_bracket: ( - column_definition: naked_identifier: col_c data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: col_b data_type: struct_type: keyword: STRUCT struct_type_schema: - start_angle_bracket: < - naked_identifier: key2 - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: nested - colon: ':' - data_type: primitive_type: keyword: STRING - comma: ',' - naked_identifier: key1 - colon: ':' - data_type: primitive_type: keyword: STRING - end_angle_bracket: '>' - comma: ',' - column_definition: naked_identifier: col_a data_type: primitive_type: keyword: STRING - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_vacuum.sql000066400000000000000000000006021451700765000250200ustar00rootroot00000000000000-- vacuum files not required by versions older than the default retention period VACUUM EVENTSTABLE; -- vacuum files in path-based table VACUUM '/data/events'; VACUUM DELTA.`/data/events/`; -- vacuum files not required by versions more than 100 hours old VACUUM DELTA.`/data/events/` RETAIN 100 HOURS; -- do dry run to get the list of files to be deleted VACUUM EVENTSTABLE DRY RUN; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_vacuum.yml000066400000000000000000000024141451700765000250250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f367bd3c07d28f3de9e718b819350241948c11986481a7c12218a5c10351e6b6 file: - statement: vacuum_statement: keyword: VACUUM table_reference: naked_identifier: EVENTSTABLE - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM quoted_literal: "'/data/events'" - statement_terminator: ; - statement: vacuum_statement: keyword: VACUUM file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - file_reference: keyword: DELTA dot: . quoted_identifier: '`/data/events/`' - keyword: RETAIN - numeric_literal: '100' - date_part: HOURS - statement_terminator: ; - statement: vacuum_statement: - keyword: VACUUM - table_reference: naked_identifier: EVENTSTABLE - keyword: DRY - keyword: RUN - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_write_table.sql000066400000000000000000000005271451700765000260270ustar00rootroot00000000000000-- append INSERT INTO default.people10m SELECT * FROM more_people; -- overwrite INSERT OVERWRITE TABLE default.people10m SELECT * FROM more_people; -- with user-defined commit metadata SET spark.databricks.delta.commitInfo.userMetadata = "overwritten-for-fixing-incorrect-data"; INSERT OVERWRITE default.people10m SELECT * FROM more_people; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/delta_write_table.yml000066400000000000000000000054451451700765000260350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3701321590713305bb67b6c867b1d19318a4f46c706df55d2269bfdf8e1ae68b file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: more_people - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: TABLE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: more_people - statement_terminator: ; - statement: set_statement: keyword: SET property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: databricks - dot: . - properties_naked_identifier: delta - dot: . - properties_naked_identifier: commitInfo - dot: . - properties_naked_identifier: userMetadata comparison_operator: raw_comparison_operator: '=' quoted_literal: '"overwritten-for-fixing-incorrect-data"' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: - naked_identifier: default - dot: . - naked_identifier: people10m - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: more_people - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/describe_database.sql000066400000000000000000000002601451700765000257530ustar00rootroot00000000000000DESCRIBE DATABASE employees; DESCRIBE DATABASE EXTENDED employees; DESC DATABASE deployment; -- Keywords SCHEMA and DATABASE are interchangeable. DESCRIBE SCHEMA employees; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/describe_database.yml000066400000000000000000000021211451700765000257530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e519773c48713b10590187317f664206adf50309a956a1b182dbd5bc46b34917 file: - statement: describe_statement: - keyword: DESCRIBE - keyword: DATABASE - database_reference: naked_identifier: employees - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: DATABASE - keyword: EXTENDED - database_reference: naked_identifier: employees - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: DATABASE - database_reference: naked_identifier: deployment - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: SCHEMA - database_reference: naked_identifier: employees - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/describe_function.sql000066400000000000000000000001751451700765000260410ustar00rootroot00000000000000DESC FUNCTION abs; DESC FUNCTION EXTENDED abs; DESC FUNCTION max; DESC FUNCTION EXTENDED explode; DESCRIBE FUNCTION max; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/describe_function.yml000066400000000000000000000024001451700765000260340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3891454a62d3b2442c3a3c9e281ef184011606ac2a54974bbba7ed19e69b4bcc file: - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - function_name: function_name_identifier: abs - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - keyword: EXTENDED - function_name: function_name_identifier: abs - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - function_name: function_name_identifier: max - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: FUNCTION - keyword: EXTENDED - function_name: function_name_identifier: explode - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: FUNCTION - function_name: function_name_identifier: max - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/describe_query.sql000066400000000000000000000004701451700765000253570ustar00rootroot00000000000000DESCRIBE QUERY SELECT age, sum(age) AS sum_age FROM person GROUP BY age; DESCRIBE QUERY WITH all_names_cte AS (SELECT name FROM person) SELECT name FROM all_names_cte; DESC QUERY VALUES(100, 'John', 10000.20D) AS employee(id, name, salary); DESC QUERY TABLE person; DESCRIBE FROM person SELECT age; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/describe_query.yml000066400000000000000000000100501451700765000253540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c027c9519574b580a09f65ef8d25159a1a89d1ef1db3b691b475ad69ab64f4d file: - statement: describe_statement: - keyword: DESCRIBE - keyword: QUERY - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: QUERY - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: all_names_cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: all_names_cte - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: QUERY - statement: values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: numeric_literal: '100' - comma: ',' - expression: quoted_literal: "'John'" - comma: ',' - expression: numeric_literal: 10000.20D - end_bracket: ) alias_expression: keyword: AS naked_identifier: employee bracketed: start_bracket: ( identifier_list: - naked_identifier: id - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: salary end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESC - keyword: QUERY - keyword: TABLE - table_reference: naked_identifier: person - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: FROM - table_reference: naked_identifier: person - keyword: SELECT - column_reference: naked_identifier: age - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/describe_table.sql000066400000000000000000000005171451700765000253030ustar00rootroot00000000000000DESCRIBE TABLE customer; DESCRIBE customer; DESCRIBE TABLE salesdb.customer; DESCRIBE TABLE EXTENDED customer; DESCRIBE TABLE EXTENDED customer PARTITION (state = 'AR'); DESCRIBE customer salesdb.customer.name; DESCRIBE TABLE customer salesdb.customer.name; DESCRIBE TABLE customer customer.name; DESCRIBE TABLE customer name; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/describe_table.yml000066400000000000000000000047501451700765000253100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: de8998ea19aac17f1e022d0d6204dc37ec8a34f4b1ceaa4fce4e0f86a1d4f285 file: - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: customer - statement_terminator: ; - statement: describe_statement: keyword: DESCRIBE table_reference: naked_identifier: customer - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: - naked_identifier: salesdb - dot: . - naked_identifier: customer - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - keyword: EXTENDED - table_reference: naked_identifier: customer - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - keyword: EXTENDED - table_reference: naked_identifier: customer - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: "'AR'" end_bracket: ) - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - table_reference: naked_identifier: customer - naked_identifier: salesdb - dot: . - naked_identifier: customer - dot: . - naked_identifier: name - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: customer - naked_identifier: salesdb - dot: . - naked_identifier: customer - dot: . - naked_identifier: name - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: customer - naked_identifier: customer - dot: . - naked_identifier: name - statement_terminator: ; - statement: describe_statement: - keyword: DESCRIBE - keyword: TABLE - table_reference: naked_identifier: customer - naked_identifier: name - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_database.sql000066400000000000000000000004311451700765000251370ustar00rootroot00000000000000-- Drop DATABASE with all optional syntax DROP DATABASE IF EXISTS dbname RESTRICT; DROP DATABASE IF EXISTS dbname CASCADE; -- Drop the database and it's tables DROP DATABASE inventory_db CASCADE; -- Drop the database using IF EXISTS DROP DATABASE IF EXISTS inventory_db CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_database.yml000066400000000000000000000024271451700765000251500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43308924f6aa5436ed6da39b87145968be7b8145199a2e025055588d2cf5f90f file: - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: dbname - keyword: RESTRICT - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: dbname - keyword: CASCADE - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - database_reference: naked_identifier: inventory_db - keyword: CASCADE - statement_terminator: ; - statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: inventory_db - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_function.sql000066400000000000000000000003631451700765000252240ustar00rootroot00000000000000-- Drop FUNCTION with all optional syntax DROP TEMPORARY FUNCTION IF EXISTS function_name; -- Try to drop Permanent function which is not present DROP FUNCTION test_avg; -- Drop Temporary function DROP TEMPORARY FUNCTION IF EXISTS test_avg; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_function.yml000066400000000000000000000020411451700765000252210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3a4bdb250ba955d87c4d9b617ba8b3b561b5701423e49f8fea6e679eb05ca8d3 file: - statement: drop_function_statement: - keyword: DROP - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: function_name - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: test_avg - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: test_avg - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_schema.sql000066400000000000000000000004171451700765000246370ustar00rootroot00000000000000-- Drop schema with all optional syntax DROP SCHEMA IF EXISTS dbname RESTRICT; DROP SCHEMA IF EXISTS dbname CASCADE; -- Drop the database and it's tables DROP SCHEMA inventory_db CASCADE; -- Drop the database using IF EXISTS DROP SCHEMA IF EXISTS inventory_db CASCADE; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_schema.yml000066400000000000000000000023771451700765000246500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 763435cb4e9ae153438594ebc352ed261a944aa2bcb99f482894dd5541f500f5 file: - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: dbname - keyword: RESTRICT - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: dbname - keyword: CASCADE - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - schema_reference: naked_identifier: inventory_db - keyword: CASCADE - statement_terminator: ; - statement: drop_schema_statement: - keyword: DROP - keyword: SCHEMA - keyword: IF - keyword: EXISTS - schema_reference: naked_identifier: inventory_db - keyword: CASCADE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_table.sql000066400000000000000000000006211451700765000244630ustar00rootroot00000000000000-- Drop TABLE with all optional syntax DROP TABLE IF EXISTS table_identifier; -- Assumes a table named `employeetable` exists. DROP TABLE employeetable; -- Assumes a table named `employeetable` exists in the `userdb` database DROP TABLE userdb.employeetable; -- Assumes a table named `employeetable` does not exist,Try with IF EXISTS -- will not throw exception DROP TABLE IF EXISTS employeetable; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_table.yml000066400000000000000000000022611451700765000244670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d098e81483f0e94616b601124f1e3ae0737dd24ffa4e3b314f160b8ac0bc6c92 file: - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: table_identifier - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: naked_identifier: employeetable - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: userdb - dot: . - naked_identifier: employeetable - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: employeetable - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_view.sql000066400000000000000000000006061451700765000243510ustar00rootroot00000000000000-- Drop FUNCTION with all optional syntax DROP VIEW IF EXISTS view_identifier; -- Assumes a view named `employeeView` exists. DROP VIEW employeeview; -- Assumes a view named `employeeView` exists in the `userdb` database DROP VIEW userdb.employeeview; -- Assumes a view named `employeeView` does not exist,Try with IF EXISTS -- will not throw exception DROP VIEW IF EXISTS employeeview; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/drop_view.yml000066400000000000000000000022451451700765000243540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 95065988d2896cb997bdf47c51be36020efe7eab3d8a90e7d25866e970cb7766 file: - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: view_identifier - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: naked_identifier: employeeview - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - table_reference: - naked_identifier: userdb - dot: . - naked_identifier: employeeview - statement_terminator: ; - statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: employeeview - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/explain.sql000066400000000000000000000030261451700765000240120ustar00rootroot00000000000000EXPLAIN SELECT a, b FROM person; EXPLAIN SELECT TRANSFORM (zip_code, name, age) USING 'cat' AS (a, b, c) FROM person WHERE zip_code > 94511; EXPLAIN ALTER DATABASE inventory SET DBPROPERTIES ( 'Edited-by' = 'John' ); EXPLAIN ALTER TABLE student RENAME TO studentinfo; EXPLAIN ALTER VIEW view_identifier RENAME TO view_identifier; EXPLAIN CREATE DATABASE IF NOT EXISTS database_name COMMENT "database_comment" LOCATION "root/database_directory" WITH DBPROPERTIES ( "property_name" = "property_value"); EXPLAIN CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS function_name AS "class_name" USING FILE "resource_locations"; EXPLAIN CREATE TABLE student (id INT, student_name STRING, age INT) USING CSV; EXPLAIN CREATE TABLE student (id INT, student_name STRING, age INT) STORED AS ORC; EXPLAIN CREATE TABLE student_dupli LIKE student ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE TBLPROPERTIES ('owner' = 'xxxx'); EXPLAIN CREATE VIEW experienced_employee_extended AS SELECT a FROM experienced_employee; EXPLAIN DROP DATABASE IF EXISTS dbname; EXPLAIN DROP FUNCTION test_avg; EXPLAIN USE database_name; EXPLAIN TRUNCATE TABLE student PARTITION(age = 10); EXPLAIN MSCK REPAIR TABLE table_identifier ADD PARTITIONS; EXPLAIN REFRESH TABLE tbl1; EXPLAIN REFRESH FUNCTION func1; EXPLAIN LOAD DATA LOCAL INPATH '/user/hive/warehouse/students' OVERWRITE INTO TABLE test_load; EXPLAIN INSERT INTO TABLE students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111111); EXPLAIN DROP VIEW IF EXISTS view_identifier; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/explain.yml000066400000000000000000000277351451700765000240310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 857fcc5285757408c4a45ef6f3114b118d98f95080003349d3dd8da1b5a5af6e file: - statement: explain_statement: keyword: EXPLAIN statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: a - comma: ',' - naked_identifier: b - comma: ',' - naked_identifier: c - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94511' - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: alter_database_statement: - keyword: ALTER - keyword: DATABASE - database_reference: naked_identifier: inventory - keyword: SET - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'Edited-by'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'John'" end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: student - keyword: RENAME - keyword: TO - table_reference: naked_identifier: studentinfo - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: alter_view_statement: - keyword: ALTER - keyword: VIEW - table_reference: naked_identifier: view_identifier - keyword: RENAME - keyword: TO - table_reference: naked_identifier: view_identifier - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_database_statement: - keyword: CREATE - keyword: DATABASE - keyword: IF - keyword: NOT - keyword: EXISTS - database_reference: naked_identifier: database_name - keyword: COMMENT - quoted_literal: '"database_comment"' - keyword: LOCATION - quoted_literal: '"root/database_directory"' - keyword: WITH - keyword: DBPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: '"property_name"' comparison_operator: raw_comparison_operator: '=' quoted_literal: '"property_value"' end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TEMPORARY - keyword: FUNCTION - keyword: IF - keyword: NOT - keyword: EXISTS - function_name_identifier: function_name - keyword: AS - quoted_literal: '"class_name"' - keyword: USING - file_keyword: FILE - quoted_literal: '"resource_locations"' - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: CSV - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: INT - comma: ',' - column_definition: naked_identifier: student_name data_type: primitive_type: keyword: STRING - comma: ',' - column_definition: naked_identifier: age data_type: primitive_type: keyword: INT - end_bracket: ) - keyword: STORED - keyword: AS - keyword: ORC - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: student_dupli - keyword: LIKE - table_reference: naked_identifier: student - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: STORED - keyword: AS - file_format: TEXTFILE - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'owner'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'xxxx'" end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: create_view_statement: - keyword: CREATE - keyword: VIEW - table_reference: naked_identifier: experienced_employee_extended - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: experienced_employee - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: drop_database_statement: - keyword: DROP - keyword: DATABASE - keyword: IF - keyword: EXISTS - database_reference: naked_identifier: dbname - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: function_name_identifier: test_avg - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: use_statement: keyword: USE database_reference: naked_identifier: database_name - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: student - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '=' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: ADD - keyword: PARTITIONS - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: refresh_statement: - keyword: REFRESH - keyword: TABLE - table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: refresh_statement: - keyword: REFRESH - keyword: FUNCTION - function_name: function_name_identifier: func1 - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: LOCAL - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/students'" - keyword: OVERWRITE - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: students - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - comma: ',' - expression: numeric_literal: '111111' - end_bracket: ) - statement_terminator: ; - statement: explain_statement: keyword: EXPLAIN statement: drop_view_statement: - keyword: DROP - keyword: VIEW - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: view_identifier - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/from_supported_tvf.sql000066400000000000000000000007301451700765000263000ustar00rootroot00000000000000--TVFs that are supported in a `FROM` clause -- -- range call with end SELECT id FROM range(6 + cos(3)); SELECT id FROM range(5); -- range call with start and end SELECT id FROM range(5, 10); -- range call with start, end and step SELECT id FROM range(5, 10, 2); -- range call with start, end, step, and numPartitions SELECT id FROM range(0, 10, 2, 200); -- range call with a table alias SELECT test.id FROM range(5, 8) AS test; SELECT test.id FROM range(5, 8) test; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/from_supported_tvf.yml000066400000000000000000000136071451700765000263110ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fb7e9e23268771280db637c71225316b840dff2c1ab34bb067d00b2bf87e2433 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range bracketed: start_bracket: ( expression: numeric_literal: '6' binary_operator: + function: function_name: function_name_identifier: cos bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range bracketed: - start_bracket: ( - expression: numeric_literal: '0' - comma: ',' - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '200' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) alias_expression: keyword: AS naked_identifier: test - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: range bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) alias_expression: naked_identifier: test - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/iceberg_alter_table.sql000066400000000000000000000076201451700765000263140ustar00rootroot00000000000000-- Iceberg Spark DDL Alter Statements https://iceberg.apache.org/docs/latest/spark-ddl/#alter-table ALTER TABLE prod.db.sample RENAME TO prod.db.new_name; ALTER TABLE prod.db.sample SET TBLPROPERTIES ( 'read.split.target-size'='268435456' ); ALTER TABLE prod.db.sample UNSET TBLPROPERTIES ('read.split.target-size'); ALTER TABLE prod.db.sample SET TBLPROPERTIES ( 'comment' = 'A table comment.' ); ALTER TABLE prod.db.sample ADD COLUMNS ( new_column string comment 'new_column docs' ); -- create a struct column ALTER TABLE prod.db.sample ADD COLUMN point struct; -- add a field to the struct ALTER TABLE prod.db.sample ADD COLUMN point.z double; -- create a nested array column of struct ALTER TABLE prod.db.sample ADD COLUMN points array>; -- add a field to the struct within an array. Using keyword 'element' to access the array's element column. ALTER TABLE prod.db.sample ADD COLUMN points.element.z double; -- create a map column of struct key and struct value ALTER TABLE prod.db.sample ADD COLUMN points map, struct>; -- add a field to the value struct in a map. Using keyword 'value' to access the map's value column. ALTER TABLE prod.db.sample ADD COLUMN points.value.b int; ALTER TABLE prod.db.sample ADD COLUMN new_column bigint AFTER other_column; ALTER TABLE prod.db.sample ADD COLUMN nested.new_column bigint FIRST; ALTER TABLE prod.db.sample RENAME COLUMN data TO payload; ALTER TABLE prod.db.sample RENAME COLUMN location.lat TO latitude; ALTER TABLE prod.db.sample ALTER COLUMN measurement TYPE double; ALTER TABLE prod.db.sample ALTER COLUMN measurement TYPE double COMMENT 'unit is bytes per second'; ALTER TABLE prod.db.sample ALTER COLUMN measurement COMMENT 'unit is kilobytes per second'; ALTER TABLE prod.db.sample ALTER COLUMN col FIRST; ALTER TABLE prod.db.sample ALTER COLUMN nested.col AFTER other_col; ALTER TABLE prod.db.sample ALTER COLUMN id DROP NOT NULL; ALTER TABLE prod.db.sample DROP COLUMN id; ALTER TABLE prod.db.sample DROP COLUMN point.z; ALTER TABLE prod.db.sample ADD PARTITION FIELD catalog; -- identity transform ALTER TABLE prod.db.sample ADD PARTITION FIELD bucket(16, id); ALTER TABLE prod.db.sample ADD PARTITION FIELD truncate(4, data); ALTER TABLE prod.db.sample ADD PARTITION FIELD years(ts); -- use optional AS keyword to specify a custom name for the partition field ALTER TABLE prod.db.sample ADD PARTITION FIELD bucket(16, id) AS shard; ALTER TABLE prod.db.sample DROP PARTITION FIELD catalog; ALTER TABLE prod.db.sample DROP PARTITION FIELD bucket(16, id); ALTER TABLE prod.db.sample DROP PARTITION FIELD truncate(4, data); ALTER TABLE prod.db.sample DROP PARTITION FIELD years(ts); ALTER TABLE prod.db.sample DROP PARTITION FIELD shard; ALTER TABLE prod.db.sample REPLACE PARTITION FIELD ts_day WITH days(ts); -- use optional AS keyword to specify a custom name for the new partition field ALTER TABLE prod.db.sample REPLACE PARTITION FIELD ts_day WITH days(ts) AS day_of_ts; ALTER TABLE prod.db.sample WRITE ORDERED BY category, id; -- use optional ASC/DEC keyword to specify sort order of each field (default ASC) ALTER TABLE prod.db.sample WRITE ORDERED BY category ASC, id DESC; -- use optional NULLS FIRST/NULLS LAST keyword to specify null order of each field (default FIRST) ALTER TABLE prod.db.sample WRITE ORDERED BY category ASC NULLS LAST, id DESC NULLS FIRST; ALTER TABLE prod.db.sample WRITE LOCALLY ORDERED BY category, id; ALTER TABLE prod.db.sample WRITE DISTRIBUTED BY PARTITION; ALTER TABLE prod.db.sample WRITE DISTRIBUTED BY PARTITION LOCALLY ORDERED BY category, id; -- single column ALTER TABLE prod.db.sample SET IDENTIFIER FIELDS id; -- multiple columns ALTER TABLE prod.db.sample SET IDENTIFIER FIELDS id, data; -- single column ALTER TABLE prod.db.sample DROP IDENTIFIER FIELDS id; -- multiple columns ALTER TABLE prod.db.sample DROP IDENTIFIER FIELDS id, data sqlfluff-2.3.5/test/fixtures/dialects/sparksql/iceberg_alter_table.yml000066400000000000000000000565671451700765000263340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1aaa78d9b62ca8de52d21da4d3dc2db40daecfe19b36c8d82db0a94f922a48b3 file: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: RENAME - keyword: TO - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: new_name - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'read.split.target-size'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'268435456'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: UNSET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'read.split.target-size'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: SET - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'comment'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'A table comment.'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMNS - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: new_column data_type: primitive_type: keyword: string column_constraint_segment: comment_clause: keyword: comment quoted_literal: "'new_column docs'" end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: naked_identifier: point data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: x - colon: ':' - data_type: primitive_type: keyword: double - comma: ',' - naked_identifier: y - colon: ':' - data_type: primitive_type: keyword: double - end_angle_bracket: '>' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: - naked_identifier: point - dot: . - naked_identifier: z data_type: primitive_type: keyword: double - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: naked_identifier: points data_type: array_type: keyword: array start_angle_bracket: < data_type: struct_type: keyword: struct struct_type_schema: - start_angle_bracket: < - naked_identifier: x - colon: ':' - data_type: primitive_type: keyword: double - comma: ',' - naked_identifier: y - colon: ':' - data_type: primitive_type: keyword: double - end_angle_bracket: '>' end_angle_bracket: '>' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: - naked_identifier: points - dot: . - naked_identifier: element - dot: . - naked_identifier: z data_type: primitive_type: keyword: double - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: naked_identifier: points data_type: - keyword: map - start_angle_bracket: < - data_type: struct_type: keyword: struct struct_type_schema: start_angle_bracket: < naked_identifier: x colon: ':' data_type: primitive_type: keyword: int end_angle_bracket: '>' - comma: ',' - data_type: struct_type: keyword: struct struct_type_schema: start_angle_bracket: < naked_identifier: a colon: ':' data_type: primitive_type: keyword: int end_angle_bracket: '>' - end_angle_bracket: '>' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: - naked_identifier: points - dot: . - naked_identifier: value - dot: . - naked_identifier: b data_type: primitive_type: keyword: int - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: naked_identifier: new_column data_type: primitive_type: keyword: bigint - keyword: AFTER - column_reference: naked_identifier: other_column - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: COLUMN - column_definition: column_reference: - naked_identifier: nested - dot: . - naked_identifier: new_column data_type: primitive_type: keyword: bigint - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: RENAME - keyword: COLUMN - column_reference: naked_identifier: data - keyword: TO - column_reference: naked_identifier: payload - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: RENAME - keyword: COLUMN - column_reference: - naked_identifier: location - dot: . - naked_identifier: lat - keyword: TO - column_reference: naked_identifier: latitude - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: measurement - keyword: TYPE - data_type: primitive_type: keyword: double - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: measurement - keyword: TYPE - data_type: primitive_type: keyword: double - keyword: COMMENT - quoted_literal: "'unit is bytes per second'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: measurement - keyword: COMMENT - quoted_literal: "'unit is kilobytes per second'" - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: col - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: - naked_identifier: nested - dot: . - naked_identifier: col - keyword: AFTER - column_reference: naked_identifier: other_col - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ALTER - keyword: COLUMN - column_reference: naked_identifier: id - keyword: DROP - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: COLUMN - column_reference: - naked_identifier: point - dot: . - naked_identifier: z - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: catalog - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: bucket bracketed: start_bracket: ( numeric_literal: '16' comma: ',' column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: truncate bracketed: start_bracket: ( numeric_literal: '4' comma: ',' column_reference: naked_identifier: data end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: years bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: ADD - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: bucket bracketed: start_bracket: ( numeric_literal: '16' comma: ',' column_reference: naked_identifier: id end_bracket: ) - keyword: AS - naked_identifier: shard - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: catalog - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: bucket bracketed: start_bracket: ( numeric_literal: '16' comma: ',' column_reference: naked_identifier: id end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: truncate bracketed: start_bracket: ( numeric_literal: '4' comma: ',' column_reference: naked_identifier: data end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - iceberg_transformation: keyword: years bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: shard - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: REPLACE - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: ts_day - keyword: WITH - iceberg_transformation: keyword: days bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: REPLACE - keyword: PARTITION - keyword: FIELD - column_reference: naked_identifier: ts_day - keyword: WITH - iceberg_transformation: keyword: days bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - keyword: AS - naked_identifier: day_of_ts - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - keyword: ASC - comma: ',' - column_reference: naked_identifier: id - keyword: DESC - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - keyword: ASC - keyword: NULLS - keyword: LAST - comma: ',' - column_reference: naked_identifier: id - keyword: DESC - keyword: NULLS - keyword: FIRST - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: LOCALLY - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: DISTRIBUTED - keyword: BY - keyword: PARTITION - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: WRITE - keyword: DISTRIBUTED - keyword: BY - keyword: PARTITION - keyword: LOCALLY - keyword: ORDERED - keyword: BY - column_reference: naked_identifier: category - comma: ',' - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: SET - keyword: IDENTIFIER - keyword: FIELDS - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: SET - keyword: IDENTIFIER - keyword: FIELDS - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: data - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: IDENTIFIER - keyword: FIELDS - column_reference: naked_identifier: id - statement_terminator: ; - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - keyword: DROP - keyword: IDENTIFIER - keyword: FIELDS - column_reference: naked_identifier: id - comma: ',' - column_reference: naked_identifier: data sqlfluff-2.3.5/test/fixtures/dialects/sparksql/iceberg_create_table.sql000066400000000000000000000011251451700765000264420ustar00rootroot00000000000000-- Iceberg Spark DDL Create Table Statements https://iceberg.apache.org/docs/latest/spark-ddl/#create-table CREATE TABLE prod.db.sample ( id bigint COMMENT 'unique id', data string) USING iceberg; CREATE TABLE prod.db.sample ( id bigint, data string, category string) USING iceberg PARTITIONED BY (category); CREATE TABLE prod.db.sample ( id bigint, data string, category string, ts timestamp) USING iceberg PARTITIONED BY (bucket(16, id), days(ts), category); CREATE TABLE prod.db.sample USING iceberg PARTITIONED BY (part) TBLPROPERTIES ('key'='value'); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/iceberg_create_table.yml000066400000000000000000000112641451700765000264510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 02a96866985a7d40d12108ccd412b6e7a79ffc1f663d3eabca1f10edb8ccf66c file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: bigint column_constraint_segment: comment_clause: keyword: COMMENT quoted_literal: "'unique id'" - comma: ',' - column_definition: naked_identifier: data data_type: primitive_type: keyword: string - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: iceberg - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: bigint - comma: ',' - column_definition: naked_identifier: data data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: category data_type: primitive_type: keyword: string - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: category end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: primitive_type: keyword: bigint - comma: ',' - column_definition: naked_identifier: data data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: category data_type: primitive_type: keyword: string - comma: ',' - column_definition: naked_identifier: ts data_type: primitive_type: keyword: timestamp - end_bracket: ) - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: - start_bracket: ( - iceberg_transformation: keyword: bucket bracketed: start_bracket: ( numeric_literal: '16' comma: ',' column_reference: naked_identifier: id end_bracket: ) - comma: ',' - iceberg_transformation: keyword: days bracketed: start_bracket: ( column_reference: naked_identifier: ts end_bracket: ) - comma: ',' - column_reference: naked_identifier: category - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: part end_bracket: ) - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'key'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/iceberg_replace_table.sql000066400000000000000000000004761451700765000266220ustar00rootroot00000000000000-- Iceberg Spark DDL Create Table Statements https://iceberg.apache.org/docs/latest/spark-ddl/#replace-table--as-select REPLACE TABLE prod.db.sample USING iceberg; REPLACE TABLE prod.db.sample USING iceberg PARTITIONED BY (part) TBLPROPERTIES ('key'='value'); CREATE OR REPLACE TABLE prod.db.sample USING iceberg; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/iceberg_replace_table.yml000066400000000000000000000036211451700765000266170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 85f7ed31e58bf0a5f4c8fffee2974bf12aa4f1b9027b92050953c12486797db6 file: - statement: replace_table_statement: - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - using_clause: keyword: USING data_source_format: keyword: iceberg - statement_terminator: ; - statement: replace_table_statement: - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - using_clause: keyword: USING data_source_format: keyword: iceberg - keyword: PARTITIONED - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: part end_bracket: ) - keyword: TBLPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'key'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: OR - keyword: REPLACE - keyword: TABLE - table_reference: - naked_identifier: prod - dot: . - naked_identifier: db - dot: . - naked_identifier: sample - using_clause: keyword: USING data_source_format: keyword: iceberg - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/insert_overwrite_directory.sql000066400000000000000000000010601451700765000300440ustar00rootroot00000000000000INSERT OVERWRITE DIRECTORY '/tmp/destination' USING PARQUET OPTIONS (col1 = "1", col2 = "2", col3 = 'test', "user" = "a person") SELECT a FROM test_table; INSERT OVERWRITE DIRECTORY USING PARQUET OPTIONS ( path = '/tmp/destination', col1 = "1", col2 = "2", col3 = 'test' ) SELECT a FROM test_table; INSERT OVERWRITE DIRECTORY USING PARQUET OPTIONS (path '/tmp/destination', col1 1, col2 2, col3 'test') SELECT a FROM test_table; INSERT OVERWRITE DIRECTORY '/tmp/destination' USING PARQUET OPTIONS (col1 1, col2 2, col3 'test') SELECT a FROM test_table; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/insert_overwrite_directory.yml000066400000000000000000000131141451700765000300510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1693594c7c492caa9efe94c8fdd1a42c392020ee78aea06c6b922bf4e444652a file: - statement: insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - quoted_literal: "'/tmp/destination'" - keyword: USING - data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"1"' - comma: ',' - property_name_identifier: properties_naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"2"' - comma: ',' - property_name_identifier: properties_naked_identifier: col3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test'" - comma: ',' - property_name_identifier: quoted_identifier: '"user"' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"a person"' - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - keyword: USING - data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: path - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/tmp/destination'" - comma: ',' - property_name_identifier: properties_naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"1"' - comma: ',' - property_name_identifier: properties_naked_identifier: col2 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"2"' - comma: ',' - property_name_identifier: properties_naked_identifier: col3 - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'test'" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - keyword: USING - data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: path - quoted_literal: "'/tmp/destination'" - comma: ',' - property_name_identifier: properties_naked_identifier: col1 - numeric_literal: '1' - comma: ',' - property_name_identifier: properties_naked_identifier: col2 - numeric_literal: '2' - comma: ',' - property_name_identifier: properties_naked_identifier: col3 - quoted_literal: "'test'" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: insert_overwrite_directory_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: DIRECTORY - quoted_literal: "'/tmp/destination'" - keyword: USING - data_source_format: keyword: PARQUET - keyword: OPTIONS - bracketed: - start_bracket: ( - property_name_identifier: properties_naked_identifier: col1 - numeric_literal: '1' - comma: ',' - property_name_identifier: properties_naked_identifier: col2 - numeric_literal: '2' - comma: ',' - property_name_identifier: properties_naked_identifier: col3 - quoted_literal: "'test'" - end_bracket: ) - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/insert_overwrite_directory_hive.sql000066400000000000000000000003311451700765000310570ustar00rootroot00000000000000INSERT OVERWRITE LOCAL DIRECTORY '/tmp/destination' STORED AS ORC SELECT * FROM test_table; INSERT OVERWRITE LOCAL DIRECTORY '/tmp/destination' ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' SELECT * FROM test_table; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/insert_overwrite_directory_hive.yml000066400000000000000000000036321451700765000310700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df12c2e4312b5167eee081db51c9bef0059974d4742d2aa9f315fcb238a4d169 file: - statement: insert_overwrite_directory_hive_fmt_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: LOCAL - keyword: DIRECTORY - quoted_literal: "'/tmp/destination'" - keyword: STORED - keyword: AS - keyword: ORC - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; - statement: insert_overwrite_directory_hive_fmt_statement: - keyword: INSERT - keyword: OVERWRITE - keyword: LOCAL - keyword: DIRECTORY - quoted_literal: "'/tmp/destination'" - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/insert_table.sql000066400000000000000000000034401451700765000250250ustar00rootroot00000000000000-- Single Row Insert Using a VALUES Clause INSERT INTO TABLE students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111111); INSERT INTO students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111111); INSERT OVERWRITE students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111111); -- Multi-Row Insert Using a VALUES Clause INSERT INTO students VALUES ('Bob Brown', '456 Taylor St, Cupertino', 222222), ('Cathy Johnson', '789 Race Ave, Palo Alto', 333333); INSERT OVERWRITE students VALUES ('Bob Brown', '456 Taylor St, Cupertino', 222222), ('Cathy Johnson', '789 Race Ave, Palo Alto', 333333); -- Insert Using a SELECT Statement INSERT INTO students PARTITION (student_id = 444444) SELECT name, address FROM persons WHERE name = "Dora Williams"; INSERT OVERWRITE students PARTITION (student_id = 444444) SELECT name, address FROM persons WHERE name = "Dora Williams"; -- Insert Using a TABLE Statement INSERT INTO students TABLE visiting_students; INSERT OVERWRITE students TABLE visiting_students; -- Insert Using a FROM Statement INSERT INTO students FROM applicants SELECT name, address, id WHERE qualified = TRUE; INSERT OVERWRITE students FROM applicants SELECT name, address, id WHERE qualified = TRUE; -- Insert Using a Typed Date Literal for a Partition Column Value INSERT INTO students PARTITION (birthday = DATE '2019-01-02') VALUES ('Amy Smith', '123 Park Ave, San Jose'); INSERT OVERWRITE students PARTITION (birthday = DATE '2019-01-02') VALUES ('Amy Smith', '123 Park Ave, San Jose'); -- Insert with both a partition spec and a column list INSERT INTO students PARTITION (student_id = 11215017) (address, name) VALUES ('Hangzhou, China', 'Kent Yao Jr.'); INSERT OVERWRITE students PARTITION (student_id = 11215017) (address, name) VALUES ('Hangzhou, China', 'Kent Yao Jr.'); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/insert_table.yml000066400000000000000000000260061451700765000250320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ccf4ffdc0f68eaf23ce059c9d2a983085cc59b4987e8a0a14ed79fe72dab123e file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: students - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - comma: ',' - expression: numeric_literal: '111111' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - comma: ',' - expression: numeric_literal: '111111' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - comma: ',' - expression: numeric_literal: '111111' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'Bob Brown'" - comma: ',' - expression: quoted_literal: "'456 Taylor St, Cupertino'" - comma: ',' - expression: numeric_literal: '222222' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Cathy Johnson'" - comma: ',' - expression: quoted_literal: "'789 Race Ave, Palo Alto'" - comma: ',' - expression: numeric_literal: '333333' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: quoted_literal: "'Bob Brown'" - comma: ',' - expression: quoted_literal: "'456 Taylor St, Cupertino'" - comma: ',' - expression: numeric_literal: '222222' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Cathy Johnson'" - comma: ',' - expression: quoted_literal: "'789 Race Ave, Palo Alto'" - comma: ',' - expression: numeric_literal: '333333' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '444444' end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: address from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: persons where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: '"Dora Williams"' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '444444' end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: address from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: persons where_clause: keyword: WHERE expression: column_reference: naked_identifier: name comparison_operator: raw_comparison_operator: '=' quoted_literal: '"Dora Williams"' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: TABLE - table_reference: naked_identifier: visiting_students - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: TABLE - table_reference: naked_identifier: visiting_students - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: FROM - table_reference: naked_identifier: applicants - keyword: SELECT - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: id - where_clause: keyword: WHERE expression: column_reference: naked_identifier: qualified comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: FROM - table_reference: naked_identifier: applicants - keyword: SELECT - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: id - where_clause: keyword: WHERE expression: column_reference: naked_identifier: qualified comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: birthday comparison_operator: raw_comparison_operator: '=' keyword: DATE date_constructor_literal: "'2019-01-02'" end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: birthday comparison_operator: raw_comparison_operator: '=' keyword: DATE date_constructor_literal: "'2019-01-02'" end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Amy Smith'" - comma: ',' - expression: quoted_literal: "'123 Park Ave, San Jose'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '11215017' end_bracket: ) - bracketed: - start_bracket: ( - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Hangzhou, China'" - comma: ',' - expression: quoted_literal: "'Kent Yao Jr.'" - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OVERWRITE - table_reference: naked_identifier: students - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: student_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '11215017' end_bracket: ) - bracketed: - start_bracket: ( - column_reference: naked_identifier: address - comma: ',' - column_reference: naked_identifier: name - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: quoted_literal: "'Hangzhou, China'" - comma: ',' - expression: quoted_literal: "'Kent Yao Jr.'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/issue_3484.sql000066400000000000000000000002371451700765000241650ustar00rootroot00000000000000-- https://github.com/sqlfluff/sqlfluff/issues/3484 WITH cte AS ( SELECT * FROM source WHERE col1 = 0 DISTRIBUTE BY col1 ), SELECT * FROM cte sqlfluff-2.3.5/test/fixtures/dialects/sparksql/issue_3484.yml000066400000000000000000000036421451700765000241720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ddf3f513580f1ad645ae912a5d1707ed6e1c1a84086d532e5476cde35670a293 file: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source where_clause: keyword: WHERE expression: column_reference: naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: col1 end_bracket: ) comma: ',' select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte sqlfluff-2.3.5/test/fixtures/dialects/sparksql/join_types.sql000066400000000000000000000052721451700765000245420ustar00rootroot00000000000000-- inner join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee INNER JOIN department ON employee.deptno = department.deptno; -- left join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee LEFT JOIN department ON employee.deptno = department.deptno; -- right join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee RIGHT JOIN department ON employee.deptno = department.deptno; -- full join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL JOIN department ON employee.deptno = department.deptno; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee FULL OUTER JOIN department ON employee.deptno = department.deptno; -- cross join SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee CROSS JOIN department; -- semi join SELECT employee.id -- noqa: L031 FROM employee SEMI JOIN department ON employee.deptno = department.deptno; SELECT employee.id FROM employee LEFT SEMI JOIN department ON employee.deptno = department.deptno; -- anti join SELECT employee.id FROM employee ANTI JOIN department ON employee.deptno = department.deptno; SELECT employee.id FROM employee LEFT ANTI JOIN department ON employee.deptno = department.deptno; -- natural joins SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL INNER JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL LEFT JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL RIGHT JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL FULL JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL FULL OUTER JOIN department; SELECT employee.id, employee.name, employee.deptno, department.deptname FROM employee NATURAL CROSS JOIN department; SELECT employee.id FROM employee NATURAL SEMI JOIN department; SELECT employee.id FROM employee NATURAL LEFT SEMI JOIN department; SELECT employee.id FROM employee NATURAL ANTI JOIN department; SELECT employee.id FROM employee NATURAL LEFT ANTI JOIN department; -- Multiple join SELECT table1.a, table2.b, table3.c FROM table1 INNER JOIN table2 ON table1.a = table2.a INNER JOIN table3 ON table1.a = table3.a sqlfluff-2.3.5/test/fixtures/dialects/sparksql/join_types.yml000066400000000000000000000647171451700765000245550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e7134cf8b6e1843c7a784f6ca4de6637a21009117c00cd69bbcec9c8d25e306b file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: LEFT - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: department - dot: . - naked_identifier: deptno - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: FULL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: FULL - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: deptno - comma: ',' - select_clause_element: column_reference: - naked_identifier: department - dot: . - naked_identifier: deptname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: CROSS - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: LEFT - keyword: SEMI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: employee - dot: . - naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employee join_clause: - keyword: NATURAL - keyword: LEFT - keyword: ANTI - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: department - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: table2 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: table3 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: table1 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: a - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table3 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table3 - dot: . - naked_identifier: a sqlfluff-2.3.5/test/fixtures/dialects/sparksql/list_file.sql000066400000000000000000000004531451700765000243250ustar00rootroot00000000000000LIST FILE "/path/to/file/abc.txt"; LIST FILE '/another/test.txt'; LIST FILE "/path with space/abc.txt"; LIST FILE "/path/to/some/directory"; LIST FILES "/path with space/cde.txt" '/path with space/fgh.txt'; -- NB: Non-quoted paths are not supported in SQLFluff currently --LIST FILE /tmp/test; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/list_file.yml000066400000000000000000000023231451700765000243250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e38255cadfe8a5739a5c7b4264284692e1736042b948947ed0d05d422485d794 file: - statement: list_file_statement: keyword: LIST file_keyword: FILE quoted_literal: '"/path/to/file/abc.txt"' - statement_terminator: ; - statement: list_file_statement: keyword: LIST file_keyword: FILE quoted_literal: "'/another/test.txt'" - statement_terminator: ; - statement: list_file_statement: keyword: LIST file_keyword: FILE quoted_literal: '"/path with space/abc.txt"' - statement_terminator: ; - statement: list_file_statement: keyword: LIST file_keyword: FILE quoted_literal: '"/path/to/some/directory"' - statement_terminator: ; - statement: list_file_statement: - keyword: LIST - file_keyword: FILES - quoted_literal: '"/path with space/cde.txt"' - quoted_literal: "'/path with space/fgh.txt'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/list_jar.sql000066400000000000000000000007441451700765000241650ustar00rootroot00000000000000LIST JAR "/path/to/some.jar"; LIST JAR '/some/other.jar'; LIST JAR "/path with space/abc.jar"; LIST JARS "/path with space/def.jar" '/path with space/ghi.jar'; LIST JAR "ivy://group:module:version"; LIST JAR "ivy://group:module:version?transitive=false"; LIST JAR "ivy://group:module:version?transitive=true"; LIST JAR "ivy://group:module:version?exclude=group:module&transitive=true"; -- NB: Non-quoted paths are not supported in SQLFluff currently --LIST JAR /tmp/test.jar; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/list_jar.yml000066400000000000000000000033541451700765000241670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0f2de1a613bf4f27aa5c10c9c6d246309ae866cd8e453ca887fe43faaf772b39 file: - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"/path/to/some.jar"' - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: "'/some/other.jar'" - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"/path with space/abc.jar"' - statement_terminator: ; - statement: list_jar_statement: - keyword: LIST - file_keyword: JARS - quoted_literal: '"/path with space/def.jar"' - quoted_literal: "'/path with space/ghi.jar'" - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"ivy://group:module:version"' - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"ivy://group:module:version?transitive=false"' - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"ivy://group:module:version?transitive=true"' - statement_terminator: ; - statement: list_jar_statement: keyword: LIST file_keyword: JAR quoted_literal: '"ivy://group:module:version?exclude=group:module&transitive=true"' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/literals.sql000066400000000000000000000025141451700765000241720ustar00rootroot00000000000000-- string literals SELECT 'Hello, World!' AS col; SELECT 'SPARK SQL' AS col; SELECT "it's $10." AS col; -- binary literal SELECT X'123456' AS col; -- null literal SELECT NULL AS col; -- boolean literal SELECT TRUE AS col; SELECT FALSE AS col; -- numeric literal SELECT -2147483648 AS col; SELECT 9223372036854775807l AS col; SELECT -32Y AS col; SELECT 482S AS col; -- fractional literals SELECT 12.578 AS col; SELECT -0.1234567 AS col; SELECT -.1234567 AS col; SELECT 123. AS col; SELECT 123.BD AS col; SELECT 5E2 AS col; SELECT 5D AS col; SELECT -5BD AS col; SELECT 12.578e-2d AS col; SELECT -.1234567E+2BD AS col; SELECT +3.e+3 AS col; SELECT -3.E-3D AS col; -- datetime literal SELECT DATE '1997' AS col; SELECT DATE '1997-01' AS col; SELECT DATE '2011-11-11' AS col; SELECT TIMESTAMP '1997-01-31 09:26:56.123' AS col; SELECT TIMESTAMP '1997-01-31 09:26:56.66666666UTC+08:00' AS col; SELECT TIMESTAMP '1997-01' AS col; -- ansi interval literal SELECT INTERVAL '2-3' YEAR TO MONTH AS col; SELECT INTERVAL -'20 15:40:32.99899999' DAY TO SECOND AS col; -- multi-units interval syntax SELECT INTERVAL 3 YEAR AS col; SELECT INTERVAL -2 HOUR '3' MINUTE AS col; SELECT INTERVAL '1 YEAR 2 DAYS 3 HOURS'; SELECT INTERVAL 1 YEARS 2 MONTH 3 WEEK 4 DAYS 5 HOUR 6 MINUTES 7 SECOND 8 MILLISECOND 9 MICROSECONDS AS col; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/literals.yml000066400000000000000000000272061451700765000242010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 27ac5fd6c297c22bff2b586e7f84732c3265df121931dcf21fd71f0086a96ce3 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Hello, World!'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'SPARK SQL'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "\"it's $10.\"" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bytes_quoted_literal: "X'123456'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: null_literal: 'NULL' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: boolean_literal: 'TRUE' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: boolean_literal: 'FALSE' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '2147483648' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 9223372036854775807l alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: 32Y alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 482S alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '12.578' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '0.1234567' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '.1234567' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '123.' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 123.BD alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 5E2 alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 5D alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: 5BD alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 12.578e-2d alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: .1234567E+2BD alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: + numeric_literal: '3.e+3' alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: 3.E-3D alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATE date_constructor_literal: "'1997'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATE date_constructor_literal: "'1997-01'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: DATE date_constructor_literal: "'2011-11-11'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIMESTAMP date_constructor_literal: "'1997-01-31 09:26:56.123'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIMESTAMP date_constructor_literal: "'1997-01-31 09:26:56.66666666UTC+08:00'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: TIMESTAMP date_constructor_literal: "'1997-01'" alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL interval_literal: - signed_quoted_literal: "'2-3'" - date_part: YEAR - keyword: TO - date_part: MONTH alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL interval_literal: - sign_indicator: '-' - signed_quoted_literal: "'20 15:40:32.99899999'" - date_part: DAY - keyword: TO - date_part: SECOND alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: keyword: INTERVAL interval_literal: numeric_literal: '3' date_part: YEAR alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - interval_literal: sign_indicator: '-' numeric_literal: '2' date_part: HOUR - interval_literal: signed_quoted_literal: "'3'" date_part: MINUTE alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: keyword: INTERVAL date_constructor_literal: "'1 YEAR 2 DAYS 3 HOURS'" - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: interval_expression: - keyword: INTERVAL - interval_literal: numeric_literal: '1' date_part: YEARS - interval_literal: numeric_literal: '2' date_part: MONTH - interval_literal: numeric_literal: '3' date_part: WEEK - interval_literal: numeric_literal: '4' date_part: DAYS - interval_literal: numeric_literal: '5' date_part: HOUR - interval_literal: numeric_literal: '6' date_part: MINUTES - interval_literal: numeric_literal: '7' date_part: SECOND - interval_literal: numeric_literal: '8' date_part: MILLISECOND - interval_literal: numeric_literal: '9' date_part: MICROSECONDS alias_expression: keyword: AS naked_identifier: col - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/load_data.sql000066400000000000000000000015711451700765000242650ustar00rootroot00000000000000-- Assuming the students table is in '/user/hive/warehouse/' LOAD DATA LOCAL INPATH '/user/hive/warehouse/students' OVERWRITE INTO TABLE test_load; -- Assuming the test_partition table is in '/user/hive/warehouse/' LOAD DATA LOCAL INPATH '/user/hive/warehouse/test_partition/c2=2/c3=3' OVERWRITE INTO TABLE test_load_partition PARTITION (c2 = 2, c3 = 3); -- Assuming the students table is in '/user/hive/warehouse/' LOAD DATA INPATH '/user/hive/warehouse/students' OVERWRITE INTO TABLE test_load; -- Assuming the test_partition table is in '/user/hive/warehouse/' LOAD DATA LOCAL INPATH '/user/hive/warehouse/test_partition/c2=2/c3=3' INTO TABLE test_load_partition PARTITION (c2 = 2, c3 = 3); -- Assuming the test_partition table is in '/user/hive/warehouse/' LOAD DATA INPATH '/user/hive/warehouse/test_partition/c2=2/c3=3' INTO TABLE test_load_partition PARTITION (c2 = 2, c3 = 3); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/load_data.yml000066400000000000000000000063071451700765000242710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f894e518d619e59530c501d643a4c8dfad08c6ea737037b5c56ea799502159cb file: - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: LOCAL - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/students'" - keyword: OVERWRITE - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: LOCAL - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/test_partition/c2=2/c3=3'" - keyword: OVERWRITE - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load_partition - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - column_reference: naked_identifier: c3 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/students'" - keyword: OVERWRITE - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: LOCAL - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/test_partition/c2=2/c3=3'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load_partition - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - column_reference: naked_identifier: c3 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: load_data_statement: - keyword: LOAD - keyword: DATA - keyword: INPATH - quoted_literal: "'/user/hive/warehouse/test_partition/c2=2/c3=3'" - keyword: INTO - keyword: TABLE - table_reference: naked_identifier: test_load_partition - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: c2 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - column_reference: naked_identifier: c3 - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '3' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/merge_into.sql000066400000000000000000000014221451700765000245000ustar00rootroot00000000000000-- Merge using Table MERGE INTO t USING u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); -- Merge using Select MERGE INTO t USING (SELECT * FROM u) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c); -- Merge using Delete MERGE INTO t USING u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN MATCHED THEN DELETE; -- Merge using multiple operations MERGE INTO t USING u ON (a = b) WHEN MATCHED AND a > b THEN UPDATE SET a = b WHEN MATCHED AND ( a < b AND c < d ) THEN DELETE WHEN NOT MATCHED THEN INSERT (a, c) VALUES (b, d); -- Merge using sparksql specific matched clause MERGE INTO t USING u ON (a = b) WHEN MATCHED AND ( a < b AND c < d ) THEN UPDATE SET * WHEN NOT MATCHED THEN INSERT *; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/merge_into.yml000066400000000000000000000236631451700765000245150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6150e790936ff209de8c5e251c3cd23d8901e8139f6e75b1db9c7329f5d2f83d file: - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u end_bracket: ) - alias_expression: keyword: AS naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: start_bracket: ( column_reference: naked_identifier: b end_bracket: ) values_clause: keyword: VALUES bracketed: start_bracket: ( expression: column_reference: naked_identifier: c end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_delete_clause: keyword: DELETE - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: b - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: c - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: d end_bracket: ) - keyword: THEN - merge_delete_clause: keyword: DELETE - merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: column_reference: naked_identifier: d - end_bracket: ) - statement_terminator: ; - statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: naked_identifier: t - keyword: USING - table_reference: naked_identifier: u - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: b end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: b - binary_operator: AND - column_reference: naked_identifier: c - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: d end_bracket: ) - keyword: THEN - merge_update_clause: - keyword: UPDATE - keyword: SET - wildcard_identifier: star: '*' merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: keyword: INSERT wildcard_identifier: star: '*' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/numeric_literal.sql000066400000000000000000000012331451700765000255260ustar00rootroot00000000000000SELECT foo FROM bar WHERE baz > -2147483648 AND baz > 9223372036854775807l AND baz > 9223372036854775807L AND baz > -32y AND baz > -32Y AND baz > 482s AND baz > 482S AND baz > 12.578 AND baz > -0.1234567 AND baz > -.1234567 AND baz > -123. AND baz > 123.bd AND baz > 123.BD AND baz > 5e2 AND baz > 5E2 AND baz > 5d AND baz > 5D AND baz > -5bd AND baz > -5BD AND baz > 12.578e-2d AND baz > 12.578E-2D AND baz > -.1234567e+2bd AND baz > -.1234567E+2BD AND baz > +3.e+3 AND baz > +3.E+3 AND baz > -3.E-3D AND baz > -3.e-3d AND baz > -+-1 AND baz > -+- 1 sqlfluff-2.3.5/test/fixtures/dialects/sparksql/numeric_literal.yml000066400000000000000000000170131451700765000255330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d06536672a4e28d97dd08f5456cf9201d7712426ae8ac0458d0083569d997c3e file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar where_clause: keyword: WHERE expression: - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: '2147483648' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 9223372036854775807l - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 9223372036854775807L - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 32y - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 32Y - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 482s - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 482S - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '12.578' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: '0.1234567' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: '.1234567' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: '123.' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 123.bd - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 123.BD - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 5e2 - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 5E2 - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 5d - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 5D - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 5bd - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 5BD - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 12.578e-2d - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: 12.578E-2D - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: .1234567e+2bd - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: .1234567E+2BD - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: + numeric_literal: '3.e+3' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: + numeric_literal: '3.E+3' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 3.E-3D - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - numeric_literal: sign_indicator: '-' numeric_literal: 3.e-3d - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - sign_indicator: '-' - sign_indicator: + - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: baz - comparison_operator: raw_comparison_operator: '>' - sign_indicator: '-' - sign_indicator: + - numeric_literal: sign_indicator: '-' numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/sparksql/parse_integer_type.sql000066400000000000000000000000521451700765000262360ustar00rootroot00000000000000SELECT 123 AS INTEGER, 123 AS INT sqlfluff-2.3.5/test/fixtures/dialects/sparksql/parse_integer_type.yml000066400000000000000000000014351451700765000262460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5d107a2b14daf7c12a56e12a863bafa20a714d274a708db244020895d685d0b6 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '123' alias_expression: keyword: AS naked_identifier: INTEGER - comma: ',' - select_clause_element: numeric_literal: '123' alias_expression: keyword: AS naked_identifier: INT sqlfluff-2.3.5/test/fixtures/dialects/sparksql/pivot_clause.sql000066400000000000000000000026571451700765000250600ustar00rootroot00000000000000SELECT a FROM person PIVOT ( SUM(age) AS a FOR name IN ('John' AS john) ); SELECT a FROM person PIVOT ( SUM(age) AS a FOR name IN ('John' AS john, 'Mike' AS mike) ); SELECT a FROM person PIVOT ( SUM(age) AS a FOR (name) IN ('John' AS john, 'Mike' AS mike) ); SELECT a FROM person PIVOT ( SUM(age) AS a FOR name IN ('John' AS john, 'Mike' AS mike) ); SELECT a, c FROM person PIVOT ( SUM(age) AS a, AVG(class) AS c FOR name IN ('John' AS john, 'Mike' AS mike) ); SELECT a, c FROM person PIVOT ( SUM(age) AS a, AVG(class) AS c FOR name IN ('John' AS john, 'Mike' AS mike) ); SELECT a, c FROM person PIVOT ( SUM(age) AS a, AVG(class) AS c FOR name, age IN (('John', 30) AS c1, ('Mike', 40) AS c2) ); SELECT p.a, p.c FROM person AS p PIVOT ( SUM(age) AS a, AVG(class) AS c FOR name, age IN (('John', 30) AS c1, ('Mike', 40) AS c2) ); -- Will throw error when executed but should parse SELECT a, c FROM person PIVOT ( SUM(age) AS a, AVG(class) AS c FOR (name, age) IN ('John' AS c1, ('Mike', 40) AS c2) ); SELECT * FROM ( some_table ) PIVOT ( min(timestamp_ns) / 1e9 as min_timestamp_s -- this is the offending line FOR run_id in ( test_run_id as test, ctrl_run_id as ctrl ) ); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/pivot_clause.yml000066400000000000000000000503031451700765000250510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fe63a40ae5b468ad5207ebec94cf11a25d2bcb96fb8437e5534cd2a96b54f92 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: start_bracket: ( expression: quoted_literal: "'John'" alias_expression: keyword: AS naked_identifier: john end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - keyword: FOR - bracketed: start_bracket: ( naked_identifier: name end_bracket: ) - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: keyword: AS naked_identifier: c - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: keyword: AS naked_identifier: c - keyword: FOR - naked_identifier: name - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: keyword: AS naked_identifier: john - comma: ',' - expression: quoted_literal: "'Mike'" - alias_expression: keyword: AS naked_identifier: mike - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: keyword: AS naked_identifier: c - keyword: FOR - naked_identifier: name - comma: ',' - naked_identifier: age - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - comma: ',' - expression: numeric_literal: '30' - end_bracket: ) - alias_expression: keyword: AS naked_identifier: c1 - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Mike'" - comma: ',' - expression: numeric_literal: '40' - end_bracket: ) - alias_expression: keyword: AS naked_identifier: c2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: keyword: AS naked_identifier: p pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: keyword: AS naked_identifier: c - keyword: FOR - naked_identifier: name - comma: ',' - naked_identifier: age - keyword: IN - bracketed: - start_bracket: ( - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - comma: ',' - expression: numeric_literal: '30' - end_bracket: ) - alias_expression: keyword: AS naked_identifier: c1 - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Mike'" - comma: ',' - expression: numeric_literal: '40' - end_bracket: ) - alias_expression: keyword: AS naked_identifier: c2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - alias_expression: keyword: AS naked_identifier: a - comma: ',' - function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: class end_bracket: ) - alias_expression: keyword: AS naked_identifier: c - keyword: FOR - bracketed: - start_bracket: ( - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: IN - bracketed: - start_bracket: ( - expression: quoted_literal: "'John'" - alias_expression: keyword: AS naked_identifier: c1 - comma: ',' - bracketed: - start_bracket: ( - expression: quoted_literal: "'Mike'" - comma: ',' - expression: numeric_literal: '40' - end_bracket: ) - alias_expression: keyword: AS naked_identifier: c2 - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: table_reference: naked_identifier: some_table end_bracket: ) pivot_clause: keyword: PIVOT bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: min bracketed: start_bracket: ( expression: column_reference: naked_identifier: timestamp_ns end_bracket: ) binary_operator: / numeric_literal: 1e9 - alias_expression: keyword: as naked_identifier: min_timestamp_s - keyword: FOR - naked_identifier: run_id - keyword: in - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: test_run_id - alias_expression: keyword: as naked_identifier: test - comma: ',' - expression: column_reference: naked_identifier: ctrl_run_id - alias_expression: keyword: as naked_identifier: ctrl - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/quoted_literal.sql000066400000000000000000000001231451700765000253620ustar00rootroot00000000000000SELECT result FROM student WHERE name = "John Smith" OR name = 'Jane Doe'; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/quoted_literal.yml000066400000000000000000000023331451700765000253710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12b5ee9a4ad1369df8e6285a6da40f648dd57cb5715c80816fc7738ea676215c file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: result from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: student where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: '"John Smith"' - binary_operator: OR - column_reference: naked_identifier: name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Jane Doe'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/refresh.sql000066400000000000000000000001331451700765000240040ustar00rootroot00000000000000-- The Path is resolved using the datasource's File Index. REFRESH "hdfs://path/to/table"; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/refresh.yml000066400000000000000000000010161451700765000240070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 970706fdaea7911cf0c0b819d83665cdbeb973323dd78d19cf9624cf1d3ebeb3 file: statement: refresh_statement: keyword: REFRESH quoted_literal: '"hdfs://path/to/table"' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/refresh_function.sql000066400000000000000000000005321451700765000257140ustar00rootroot00000000000000-- The cached entry of the function will be refreshed -- The function is resolved from the current database -- as the function name is unqualified. REFRESH FUNCTION func1; -- The cached entry of the function will be refreshed -- The function is resolved from tempDB database as the -- function name is qualified. REFRESH FUNCTION db1.func1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/refresh_function.yml000066400000000000000000000014131451700765000257150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 73c2d61b75bd316b3062a10b9dffa1709c124f3a0c7971c607c8319e60d4015c file: - statement: refresh_statement: - keyword: REFRESH - keyword: FUNCTION - function_name: function_name_identifier: func1 - statement_terminator: ; - statement: refresh_statement: - keyword: REFRESH - keyword: FUNCTION - function_name: naked_identifier: db1 dot: . function_name_identifier: func1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/refresh_table.sql000066400000000000000000000005711451700765000251610ustar00rootroot00000000000000-- The cached entries of the table will be refreshed -- The table is resolved from the current database as -- the table name is unqualified. REFRESH TABLE tbl1; REFRESH tbl1; -- The cached entries of the view will be refreshed or invalidated -- The view is resolved from tempDB database, as the view -- name is qualified. REFRESH TABLE tempdb.view1; REFRESH tempdb.view1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/refresh_table.yml000066400000000000000000000021021451700765000251530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ca4dd1564256b67c94d7bff54c52bd2f1ec227aafe2ebe7c17fdd6ddda30d41a file: - statement: refresh_statement: - keyword: REFRESH - keyword: TABLE - table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: refresh_statement: keyword: REFRESH table_reference: naked_identifier: tbl1 - statement_terminator: ; - statement: refresh_statement: - keyword: REFRESH - keyword: TABLE - table_reference: - naked_identifier: tempdb - dot: . - naked_identifier: view1 - statement_terminator: ; - statement: refresh_statement: keyword: REFRESH table_reference: - naked_identifier: tempdb - dot: . - naked_identifier: view1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/remove_widget.sql000066400000000000000000000000561451700765000252120ustar00rootroot00000000000000REMOVE WIDGET state; REMOVE WIDGET database; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/remove_widget.yml000066400000000000000000000012651451700765000252170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1948d24ba464cafe5f0aab52377ee29c4738fed8a52e997c246548b045b912e0 file: - statement: remove_widget_statement: - keyword: REMOVE - keyword: WIDGET - widget_name_identifier: state - statement_terminator: ; - statement: remove_widget_statement: - keyword: REMOVE - keyword: WIDGET - widget_name_identifier: database - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/repair_table.sql000066400000000000000000000005401451700765000250010ustar00rootroot00000000000000-- REPAIR TABLE with all optional syntax MSCK REPAIR TABLE table_identifier ADD PARTITIONS; MSCK REPAIR TABLE table_identifier DROP PARTITIONS; MSCK REPAIR TABLE table_identifier SYNC PARTITIONS; -- REPAIR TABLE with no optional syntax MSCK REPAIR TABLE table_identifier; -- run MSCK REPAIR TABLE to recovers all the partitions MSCK REPAIR TABLE t1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/repair_table.yml000066400000000000000000000027621451700765000250130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad69dfd163a37ca9b5693d986cfedd860fe067e937f93faa2718af5dc7ee6f9c file: - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: ADD - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: DROP - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - keyword: SYNC - keyword: PARTITIONS - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: table_identifier - statement_terminator: ; - statement: msck_repair_table_statement: - keyword: MSCK - keyword: REPAIR - keyword: TABLE - table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/reset.sql000066400000000000000000000000311451700765000234650ustar00rootroot00000000000000RESET; RESET spark.abc; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/reset.yml000066400000000000000000000011631451700765000234760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3331ebbbdfebac532a05c372437bc04e0f1dfb59d3c22d61acf1d3efa217a5b5 file: - statement: reset_statement: keyword: RESET - statement_terminator: ; - statement: reset_statement: - keyword: RESET - naked_identifier: spark - dot: . - naked_identifier: abc - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_aggregate.sql000066400000000000000000000002201451700765000256300ustar00rootroot00000000000000SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x); -- 6 SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x, acc -> acc * 10); -- 60 sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_aggregate.yml000066400000000000000000000071101451700765000256370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b7198b63537da7d91157d9a4a03aa8ce3d41efc3d77ba91cf9c24a67289be401 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: aggregate bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: x - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: aggregate bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: acc - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: '*' - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_cluster_by.sql000066400000000000000000000014071451700765000260650ustar00rootroot00000000000000-- Produces rows clustered by age. Persons with same age are clustered together. -- In the query below, persons with age 18 and 25 are in first partition and the -- persons with age 16 are in the second partition. The rows are sorted based -- on age within each partition. SELECT age, name FROM person CLUSTER BY age; SELECT age, name FROM person CLUSTER BY 1; SELECT age, name FROM person CLUSTER BY name, age; SELECT age, name FROM person CLUSTER BY LEFT(SUBSTRING_INDEX(name, ' ', -1), 1); SELECT age, name FROM person WHERE age <= 100 CLUSTER BY age; SELECT age, name FROM person GROUP BY age CLUSTER BY age; SELECT age, name FROM person GROUP BY age HAVING COUNT(age) > 1 CLUSTER BY age; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_cluster_by.yml000066400000000000000000000152111451700765000260650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0e2f4d9b1508eac0bc91d3eb8ccb54f67814f7f1092115425e5dbaa8f723b8fc file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person cluster_by_clause: - keyword: CLUSTER - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person cluster_by_clause: - keyword: CLUSTER - keyword: BY - expression: function: function_name: function_name_identifier: LEFT bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: SUBSTRING_INDEX bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "' '" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: age comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '100' cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' cluster_by_clause: - keyword: CLUSTER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_delimited_identifier.sql000066400000000000000000000002511451700765000300500ustar00rootroot00000000000000select 1 as `delimited_but_regular_identifer_0`; select 2 as `100% unruly-identifier`; select `questionable identifier?` `still-questionable` from `(delimited)-table!`; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_delimited_identifier.yml000066400000000000000000000027201451700765000300550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e1ed70a3fb03ffec035450e7153b5b48d0072fdec666987d1a1341674c46f1c3 file: - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '1' alias_expression: keyword: as quoted_identifier: '`delimited_but_regular_identifer_0`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: numeric_literal: '2' alias_expression: keyword: as quoted_identifier: '`100% unruly-identifier`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '`questionable identifier?`' alias_expression: quoted_identifier: '`still-questionable`' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`(delimited)-table!`' - statement_terminator: ; select_delimited_identifier_with_escaped_backticks.sql000066400000000000000000000006521451700765000345330ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/sparksqlselect `delimited``with escaped` from `some``table` `some``alias`; select `delimited``with escaped` from `some``table` as `some``alias`; select ```delimited` `alias` FROM `some``table` `some````alias`; select `delimited``` `alias` FROM `some``table` ```some````alias```; SELECT `delimited ``identifier` `alias` FROM `some``table` `some``alias`; SELECT `delimited ``identifier` AS `alias` FROM `some``table` AS `some``alias`; select_delimited_identifier_with_escaped_backticks.yml000066400000000000000000000073601451700765000345400ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/sparksql# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0c1ce432be9da30551e18365143c2c554a35889cc3de9d463285f929d5eca94e file: - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '`delimited``with escaped`' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: quoted_identifier: '`some``alias`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '`delimited``with escaped`' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: keyword: as quoted_identifier: '`some``alias`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '```delimited`' alias_expression: quoted_identifier: '`alias`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: quoted_identifier: '`some````alias`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: quoted_identifier: '`delimited```' alias_expression: quoted_identifier: '`alias`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: quoted_identifier: '```some````alias```' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: '`delimited ``identifier`' alias_expression: quoted_identifier: '`alias`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: quoted_identifier: '`some``alias`' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: quoted_identifier: '`delimited ``identifier`' alias_expression: keyword: AS quoted_identifier: '`alias`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: quoted_identifier: '`some``table`' alias_expression: keyword: AS quoted_identifier: '`some``alias`' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_distribute_by.sql000066400000000000000000000014171451700765000265630ustar00rootroot00000000000000-- Produces rows clustered by age. Persons with same age are clustered together. -- Unlike `CLUSTER BY` clause, the rows are not sorted within a partition. SELECT age, name FROM person DISTRIBUTE BY age; SELECT age, name FROM person DISTRIBUTE BY 1; SELECT age, name FROM person DISTRIBUTE BY name, age; SELECT age, name FROM person DISTRIBUTE BY LEFT(SUBSTRING_INDEX(name, ' ', -1), 1); SELECT age, name FROM person WHERE age <= 100 DISTRIBUTE BY age; SELECT age, name FROM person GROUP BY age DISTRIBUTE BY age; SELECT age, name FROM person GROUP BY age HAVING COUNT(age) > 1 DISTRIBUTE BY age; SELECT age, name FROM person GROUP BY age HAVING COUNT(age) > 1 DISTRIBUTE BY age SORT BY age; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_distribute_by.yml000066400000000000000000000177551451700765000266010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ce5dad4768aabdc2d0ae563cdd3b7b9db42c3608bc643d818f18cc212079131 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - expression: function: function_name: function_name_identifier: LEFT bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: SUBSTRING_INDEX bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "' '" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: age comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '100' distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' distribute_by_clause: - keyword: DISTRIBUTE - keyword: BY - column_reference: naked_identifier: age sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_div.sql000066400000000000000000000000201451700765000244620ustar00rootroot00000000000000SELECT 3 DIV 2; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_div.yml000066400000000000000000000012431451700765000244740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51cb10bd9421e08eb8915768a600cb5424ff64f19b8ecc513413f82ea2da8a15 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '3' - binary_operator: keyword: DIV - numeric_literal: '2' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_file.sql000066400000000000000000000036651451700765000256640ustar00rootroot00000000000000-- PARQUET file SELECT a, b, c FROM PARQUET.`examples/src/main/resources/users.parquet`; -- Directory of Parquet Files SELECT a, b, c FROM PARQUET.`examples/src/main/resources/users`; -- ORC file SELECT a, b, c FROM ORC.`examples/src/main/resources/users.orc`; -- JSON file SELECT a, b, c FROM JSON.`examples/src/main/resources/people.json`; -- Directory of JSON files SELECT a, b, c FROM JSON.`examples/src/main/resources/people`; -- Text File SELECT a, b, c FROM TEXT.`examples/src/main/resources/people.txt`; -- Tests for Inline Path Glob Filter -- https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html#path-global-filter --noqa: LT05 -- Inline Path Filter using Asterisk (*) SELECT a, b, c FROM TEXT.`//root/*.txt`; -- Inline Path Filter using Question mark (?) SELECT a, b, c FROM TEXT.`//root/200?.txt`; -- Inline Path Filter using Character Class ([ab]) SELECT a, b, c FROM TEXT.`//root/200[23].txt`; -- Inline Path Filter using Negated Character Class ([^ab]) SELECT a, b, c FROM TEXT.`//root/200[^23].txt`; -- Inline Path Filter using Character Range ([a-b]) SELECT a, b, c FROM TEXT.`//root/200[2-5].txt`; -- Inline Path Filter using Negated Character Range ([^a-b]) SELECT a, b, c FROM TEXT.`//root/200[^2-5].txt`; -- Inline Path Filter using Alternation ({a,b}) SELECT a, b, c FROM TEXT.`//root/20{04, 05}.txt`; -- JSON treated as Text File SELECT a, b, c FROM TEXT.`examples/src/main/resources/people.json`; -- BinaryFile SELECT a, b, c FROM BINARYFILE.`/events/events-kafka.json`; -- Directory of BinaryFiles SELECT a, b, c FROM BINARYFILE.`/events/events-kafka`; -- CSV File SELECT a, b, c FROM CSV.`/sales/sales.csv`; -- Delta File; test for Issue #602 SELECT a, b, c FROM DELTA.`/mnt/datalake/table`; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_file.yml000066400000000000000000000312421451700765000256560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 515e6a9134ebb0c984be726ff540b20dc6ae7cef764794ee195f210b1c992383 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: PARQUET dot: . quoted_identifier: '`examples/src/main/resources/users.parquet`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: PARQUET dot: . quoted_identifier: '`examples/src/main/resources/users`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: ORC dot: . quoted_identifier: '`examples/src/main/resources/users.orc`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: JSON dot: . quoted_identifier: '`examples/src/main/resources/people.json`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: JSON dot: . quoted_identifier: '`examples/src/main/resources/people`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`examples/src/main/resources/people.txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/*.txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200?.txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200[23].txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200[^23].txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200[2-5].txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/200[^2-5].txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`//root/20{04, 05}.txt`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: TEXT dot: . quoted_identifier: '`examples/src/main/resources/people.json`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: file_format: BINARYFILE dot: . quoted_identifier: '`/events/events-kafka.json`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: file_format: BINARYFILE dot: . quoted_identifier: '`/events/events-kafka`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: CSV dot: . quoted_identifier: '`/sales/sales.csv`' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: file_reference: keyword: DELTA dot: . quoted_identifier: '`/mnt/datalake/table`' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_lateral_view.sql000066400000000000000000000035151451700765000274150ustar00rootroot00000000000000SELECT id, name, age, class, address, c_age, d_age FROM person LATERAL VIEW EXPLODE(ARRAY(30, 60)) tbl_name AS c_age LATERAL VIEW EXPLODE(ARRAY(40, 80)) AS d_age; SELECT c_age, COUNT(*) AS record_count FROM person LATERAL VIEW EXPLODE(ARRAY(30, 60)) AS c_age LATERAL VIEW EXPLODE(ARRAY(40, 80)) AS d_age GROUP BY c_age; SELECT id, name, age, class, address, c_age, d_age FROM person LATERAL VIEW EXPLODE(ARRAY()) tbl_name AS c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) tbl_name AS c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) tbl_name c_age; SELECT id, name, age, class, address, time, c_age FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) c_age; SELECT person.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person LATERAL VIEW INLINE(array_of_structs) exploded_people AS name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people AS name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) exploded_people name, age, state; SELECT p.id, exploded_people.name, exploded_people.age, exploded_people.state FROM person AS p LATERAL VIEW INLINE(array_of_structs) AS name, age, state; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_lateral_view.yml000066400000000000000000000507731451700765000274270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d2478eb1ff7c9737cdf83e4a4ea9f34b9b5d72181fde839634c86d58a0a54f8e file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: column_reference: naked_identifier: d_age from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: person - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: - start_bracket: ( - expression: numeric_literal: '30' - comma: ',' - expression: numeric_literal: '60' - end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: - start_bracket: ( - expression: numeric_literal: '40' - comma: ',' - expression: numeric_literal: '80' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: d_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: - table_expression: table_reference: naked_identifier: person - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: - start_bracket: ( - expression: numeric_literal: '30' - comma: ',' - expression: numeric_literal: '60' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c_age - lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: - start_bracket: ( - expression: numeric_literal: '40' - comma: ',' - expression: numeric_literal: '80' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: d_age groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age - comma: ',' - select_clause_element: column_reference: naked_identifier: d_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: EXPLODE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - keyword: AS - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: tbl_name - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: class - comma: ',' - select_clause_element: column_reference: naked_identifier: address - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: column_reference: naked_identifier: c_age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - keyword: OUTER - function: function_name: function_name_identifier: EXPLODE bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ARRAY bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - naked_identifier: c_age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: person - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - keyword: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - keyword: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - naked_identifier: exploded_people - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: age - comma: ',' - select_clause_element: column_reference: - naked_identifier: exploded_people - dot: . - naked_identifier: state from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person alias_expression: keyword: AS naked_identifier: p lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: INLINE bracketed: start_bracket: ( expression: column_reference: naked_identifier: array_of_structs end_bracket: ) - keyword: AS - naked_identifier: name - comma: ',' - naked_identifier: age - comma: ',' - naked_identifier: state - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_multiple_values_clauses.sql000066400000000000000000000001161451700765000316620ustar00rootroot00000000000000select * from values 1, values 2; select * from (values 1, 2), (values 2, 3); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_multiple_values_clauses.yml000066400000000000000000000042461451700765000316740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9c1640e3e298203563aa4d91fdcd0176c7882e17f7fb1399af63ab20fd2a6eb7 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' - comma: ',' - from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' end_bracket: ) - comma: ',' - from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_values.sql000066400000000000000000000015431451700765000262350ustar00rootroot00000000000000select * from values 1; select * from values (1); select * from values (1,2); select * from (values 1,2,3); select * from (values (1),(2),(3)); select * from (values (1,2), (3,4)); select * from values 1, values 2; select * from (values (1,2), (3,4)), (values (1,2), (3,4)); select * from (values 1, least(2,3), greatest(4,5)); select * from values 1 as t; select * from values (1,2) as t(a, b); select * from (values (1,2), (3,4)) as t (a, b); select * from (values (1,2), (3,4)) as (a, b); select * from values 1 t; select * from values (1,2) t(a, b); select * from (values (1,2), (3,4)) t (a, b); select * from (values (1,2), (3,4)) (a, b); select * from values 1 , 2; select * from values ( 1 , 2 ) , ( 3 , 4 ); select * from values 1 , 2 , values 3 , 4; select * from values (1) , (2); select * from values 1 , 2 , values 3 , 4; select 1 + 2 == 3 from values 1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_values.yml000066400000000000000000000544201451700765000262410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2be6cb1b9f519d9837f273582ef24e37ba6d75f4ec4a9d70563ca10188da095 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' - comma: ',' - from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - comma: ',' - from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: bracketed: start_bracket: ( from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: function: function_name: function_name_identifier: least bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: greatest bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' alias_expression: keyword: as naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: keyword: as bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' alias_expression: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: bracketed: start_bracket: ( table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) alias_expression: bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: - keyword: values - bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) - comma: ',' - bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: - keyword: from - from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - from_expression: from_expression_element: table_expression: values_clause: - keyword: values - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - numeric_literal: '2' - comparison_operator: == - numeric_literal: '3' from_clause: keyword: from from_expression: from_expression_element: table_expression: values_clause: keyword: values expression: numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_where_clause.sql000066400000000000000000000017011451700765000274000ustar00rootroot00000000000000-- Comparison operator in `WHERE` clause. SELECT name, age FROM person WHERE id > 200 ORDER BY id; -- Comparison and logical operators in `WHERE` clause. SELECT name, age FROM person WHERE id = 200 OR id = 300 ORDER BY id; -- Function expression in `WHERE` clause. SELECT name, age FROM person WHERE length(name) > 3 ORDER BY id; -- `BETWEEN` expression in `WHERE` clause. SELECT name, age FROM person WHERE id BETWEEN 200 AND 300 ORDER BY id; -- Scalar Subquery in `WHERE` clause. SELECT name, age FROM person WHERE age > (SELECT avg(age) FROM person); -- Correlated Subquery in `WHERE` clause. SELECT name, age FROM person WHERE EXISTS ( SELECT 1 FROM person WHERE person.id = person.id AND person.age IS NULL ); SELECT name, age FROM person WHERE person.id is distinct from person.age; SELECT name, age FROM person WHERE person.id is not distinct from person.age sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_from_where_clause.yml000066400000000000000000000224071451700765000274100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3544556730f04d72062eb7ec28274e358afa322a87613e945abdc49c46934198 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '>' numeric_literal: '200' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '200' - binary_operator: OR - column_reference: naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: function: function_name: function_name_identifier: length bracketed: start_bracket: ( expression: column_reference: naked_identifier: name end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: id - keyword: BETWEEN - numeric_literal: '200' - keyword: AND - numeric_literal: '300' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: age comparison_operator: raw_comparison_operator: '>' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: person - dot: . - naked_identifier: id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: person - dot: . - naked_identifier: id - binary_operator: AND - column_reference: - naked_identifier: person - dot: . - naked_identifier: age - keyword: IS - null_literal: 'NULL' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: person - dot: . - naked_identifier: id - keyword: is - keyword: distinct - keyword: from - column_reference: - naked_identifier: person - dot: . - naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: person - dot: . - naked_identifier: id - keyword: is - keyword: not - keyword: distinct - keyword: from - column_reference: - naked_identifier: person - dot: . - naked_identifier: age sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_group_by.sql000066400000000000000000000051671451700765000255470ustar00rootroot00000000000000-- Sum of quantity per dealership. Group by `id`. SELECT id, sum(quantity) AS sum_quantity FROM dealer GROUP BY id ORDER BY id; -- Use column position in GROUP by clause. SELECT id, sum(quantity) AS sum_quantity FROM dealer GROUP BY 1 ORDER BY 1; -- Multiple aggregations. -- 1. Sum of quantity per dealership. -- 2. Max quantity per dealership. SELECT id, sum(quantity) AS sum_quantity, max(quantity) AS max_quantity FROM dealer GROUP BY id ORDER BY id; -- Count the number of distinct dealer cities per car_model. SELECT car_model, count(DISTINCT city) AS count_distinct_city FROM dealer GROUP BY car_model; -- Sum of only 'Honda Civic' and 'Honda CRV' quantities per dealership. SELECT id, sum(quantity) FILTER ( WHERE car_model IN ('Honda Civic', 'Honda CRV') ) AS `sum(quantity)` FROM dealer GROUP BY id ORDER BY id; -- Aggregations using multiple sets of grouping columns in a single statement. -- Following performs aggregations based on four sets of grouping columns. -- 1. city, car_model -- 2. city -- 3. car_model -- 4. Empty grouping set. Returns quantities for all city and car models. SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model, GROUPING SETS ((city, car_model), (city), (car_model), ()) ORDER BY city; -- Group by processing with `ROLLUP` clause. -- Equivalent GROUP BY GROUPING SETS ((city, car_model), (city), ()) SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model WITH ROLLUP ORDER BY city, car_model; -- Group by processing with `CUBE` clause. -- Equivalent GROUP BY: -- GROUPING SETS ((city, car_model), (city), (car_model), ()) SELECT city, car_model, sum(quantity) AS sum_quantity FROM dealer GROUP BY city, car_model WITH CUBE ORDER BY city, car_model; -- Select the first row in column age -- Implicit GROUP BY SELECT first(age) FROM person; -- Implicit GROUP BY SELECT first(age IGNORE NULLS) AS first_age, last(id) AS last_id, sum(id) AS sum_id FROM person; -- CUBE within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY cube(name, age); -- ROLLUP within GROUP BY clause SELECT name, age, count(*) AS record_count FROM people GROUP BY rollup(name, age); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_group_by.yml000066400000000000000000000517451451700765000255540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 993c7111c0dd4f24ff2a8e591b3bfc24085cc6181a41c708613751e93d1df747 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: max_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( keyword: DISTINCT expression: column_reference: naked_identifier: city end_bracket: ) alias_expression: keyword: AS naked_identifier: count_distinct_city from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: - function_name: function_name_identifier: sum - bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) - keyword: FILTER - bracketed: start_bracket: ( keyword: WHERE expression: column_reference: naked_identifier: car_model keyword: IN bracketed: - start_bracket: ( - quoted_literal: "'Honda Civic'" - comma: ',' - quoted_literal: "'Honda CRV'" - end_bracket: ) end_bracket: ) alias_expression: keyword: AS quoted_identifier: '`sum(quantity)`' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: id orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - comma: ',' - grouping_sets_clause: - keyword: GROUPING - keyword: SETS - bracketed: start_bracket: ( grouping_expression_list: - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: city end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( expression: column_reference: naked_identifier: car_model end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( end_bracket: ) end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - with_cube_rollup_clause: - keyword: WITH - keyword: ROLLUP orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: column_reference: naked_identifier: car_model - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - with_cube_rollup_clause: - keyword: WITH - keyword: CUBE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - comma: ',' - column_reference: naked_identifier: car_model - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: first bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: first bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: age - keyword: IGNORE - keyword: NULLS - end_bracket: ) alias_expression: keyword: AS naked_identifier: first_age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: last bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: keyword: AS naked_identifier: last_id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: id end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: cube bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) alias_expression: keyword: AS naked_identifier: record_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: people groupby_clause: - keyword: GROUP - keyword: BY - cube_rollup_clause: function_name: function_name_identifier: rollup bracketed: start_bracket: ( grouping_expression_list: - column_reference: naked_identifier: name - comma: ',' - column_reference: naked_identifier: age end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_having.sql000066400000000000000000000017271451700765000251730ustar00rootroot00000000000000-- `HAVING` clause referring to column in `GROUP BY`. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING city = 'Fremont'; -- `HAVING` clause referring to aggregate function. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING sum(quantity) > 15; -- `HAVING` clause referring to aggregate function -- by its alias. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING sum_quantity > 15; -- `HAVING` clause referring to a different aggregate -- function than what is present in `SELECT` list. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING max(quantity) > 15; -- `HAVING` clause referring to constant expression. SELECT city, sum(quantity) AS sum_quantity FROM dealer GROUP BY city HAVING 1 > 0 ORDER BY city; -- `HAVING` clause without a `GROUP BY` clause. SELECT sum(quantity) AS sum_quantity FROM dealer HAVING sum(quantity) > 10; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_having.yml000066400000000000000000000175101451700765000251720ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c126a9037518daed2db82455d9d3b1fdbdc72ce0ce521835403f730cf1540866 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: column_reference: naked_identifier: city comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Fremont'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '15' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: column_reference: naked_identifier: sum_quantity comparison_operator: raw_comparison_operator: '>' numeric_literal: '15' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '15' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: city - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: city having_clause: keyword: HAVING expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: city - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) alias_expression: keyword: AS naked_identifier: sum_quantity from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dealer having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: quantity end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_hints.sql000066400000000000000000000034231451700765000250370ustar00rootroot00000000000000SELECT /*+ COALESCE(3) */ a, b, c FROM t; SELECT /*+ REPARTITION(3) */ a, b, c FROM t; SELECT /*+ REPARTITION(c) */ a, b, c FROM t; SELECT /*+ REPARTITION(3, c) */ a, b, c FROM t; SELECT /*+ REPARTITION_BY_RANGE(c) */ a, b, c FROM t; SELECT /*+ REPARTITION_BY_RANGE(3, c) */ a, b, c FROM t; SELECT /*+ REBALANCE */ a, b, c FROM t; SELECT /*+ REBALANCE(c) */ a, b, c FROM t; -- multiple partitioning hints SELECT /*+ REPARTITION(100), COALESCE(500), REPARTITION_BY_RANGE(3, c) */ a, b, c FROM t; -- Join Hints for broadcast join SELECT /*+ BROADCAST(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ BROADCASTJOIN(t1) */ t1.a, t1.b, t2.c FROM t1 LEFT JOIN t2 ON t1.key = t2.key; SELECT /*+ MAPJOIN(t2) */ t1.a, t1.b, t2.c FROM t1 LEFT JOIN t2 ON t1.key = t2.key; -- Join Hints for shuffle sort merge join SELECT /*+ SHUFFLE_MERGE(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ MERGEJOIN(t2) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ MERGE(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; -- Join Hints for shuffle hash join SELECT /*+ SHUFFLE_HASH(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; -- Join Hints for shuffle-and-replicate nested loop join SELECT /*+ SHUFFLE_REPLICATE_NL(t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ BROADCAST(t1), MERGE(t1, t2) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; SELECT /*+ BROADCAST(db.t1) */ t1.a, t1.b, t2.c FROM t1 INNER JOIN t2 ON t1.key = t2.key; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_hints.yml000066400000000000000000000663141451700765000250510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 17d210004cf1b2cc4a9861681dd50739ed79ac4a0172ae16ad8a18fd885c03cd file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: COALESCE bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( naked_identifier: c end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( numeric_literal: '3' comma: ',' naked_identifier: c end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION_BY_RANGE bracketed: start_bracket: ( naked_identifier: c end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION_BY_RANGE bracketed: start_bracket: ( numeric_literal: '3' comma: ',' naked_identifier: c end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REBALANCE end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REBALANCE bracketed: start_bracket: ( naked_identifier: c end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: - start_hint: /*+ - hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) - comma: ',' - hint_function: function_name: function_name_identifier: COALESCE bracketed: start_bracket: ( numeric_literal: '500' end_bracket: ) - comma: ',' - hint_function: function_name: function_name_identifier: REPARTITION_BY_RANGE bracketed: start_bracket: ( numeric_literal: '3' comma: ',' naked_identifier: c end_bracket: ) - end_hint: '*/' - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: BROADCAST bracketed: start_bracket: ( naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: BROADCASTJOIN bracketed: start_bracket: ( naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: MAPJOIN bracketed: start_bracket: ( naked_identifier: t2 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: SHUFFLE_MERGE bracketed: start_bracket: ( naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: MERGEJOIN bracketed: start_bracket: ( naked_identifier: t2 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: MERGE bracketed: start_bracket: ( naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: SHUFFLE_HASH bracketed: start_bracket: ( naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: SHUFFLE_REPLICATE_NL bracketed: start_bracket: ( naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: - start_hint: /*+ - hint_function: function_name: function_name_identifier: BROADCAST bracketed: start_bracket: ( naked_identifier: t1 end_bracket: ) - comma: ',' - hint_function: function_name: function_name_identifier: MERGE bracketed: - start_bracket: ( - naked_identifier: t1 - comma: ',' - naked_identifier: t2 - end_bracket: ) - end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: BROADCAST bracketed: start_bracket: ( table_reference: - naked_identifier: db - dot: . - naked_identifier: t1 end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: b - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t1 - dot: . - naked_identifier: key - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t2 - dot: . - naked_identifier: key - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_lateral_view_supported_tvf.sql000066400000000000000000000036741451700765000313640ustar00rootroot00000000000000-- TVFs that can be specified in SELECT/LATERAL VIEW clauses -- explode in a SELECT SELECT explode(array(10, 20)); -- explode_outer in a SELECT SELECT explode_outer(array(10, 20)); -- explode in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW explode(array(3, 4)) AS c2; -- explode_outer in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW explode_outer(array(3, 4)) AS c2; -- inline in a SELECT SELECT inline(array(struct(1, 'a'), struct(2, 'b'))); -- inline_outer in a SELECT SELECT inline_outer(array(struct(1, 'a'), struct(2, 'b'))); -- inline in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW inline(array(struct(1, 'a'), struct(2, 'b'))) AS c1, c2; -- inline_outer in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW inline_outer(array(struct(1, 'a'), struct(2, 'b'))) AS c1, c2; -- posexplode in a SELECT SELECT posexplode(array(10, 20)); -- posexplode_outer in a SELECT SELECT posexplode_outer(array(10, 20)); -- posexplode in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW posexplode(array(10, 20)) AS c1; -- posexplode_outer in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW posexplode_outer(array(10, 20)) AS c1; -- stack in a SELECT SELECT stack(2, 1, 2, 3); -- stack in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW stack(2, 1, 2, 3) AS c1, c2; -- json_tuple in a SELECT SELECT json_tuple('{"a":1, "b":2}', 'a', 'b'); -- json_tuple in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW json_tuple('{"a":1, "b":2}', 'a', 'b') AS c1, c2; -- parse_url in a SELECT SELECT parse_url('http://spark.apache.org/path?query=1', 'HOST'); -- parse_url in a LATERAL VIEW clause SELECT test.a, test.b FROM test LATERAL VIEW parse_url( 'http://spark.apache.org/path?query=1', 'HOST' ) AS c1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_lateral_view_supported_tvf.yml000066400000000000000000000543241451700765000313640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 178acfd014cf9b8b0815c26e4b85e0121265e0f2aa05a2f1146a3c8f60f91449 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: explode_outer bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: explode_outer bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: inline bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: inline_outer bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: inline bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: inline_outer bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: quoted_literal: "'a'" - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: struct bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: posexplode bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: posexplode_outer bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: posexplode bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: posexplode_outer bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '20' - end_bracket: ) end_bracket: ) - keyword: AS - naked_identifier: c1 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: stack bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: stack bracketed: - start_bracket: ( - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - keyword: AS - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: json_tuple bracketed: - start_bracket: ( - expression: quoted_literal: "'{\"a\":1, \"b\":2}'" - comma: ',' - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: json_tuple bracketed: - start_bracket: ( - expression: quoted_literal: "'{\"a\":1, \"b\":2}'" - comma: ',' - expression: quoted_literal: "'a'" - comma: ',' - expression: quoted_literal: "'b'" - end_bracket: ) - keyword: AS - naked_identifier: c1 - comma: ',' - naked_identifier: c2 - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: parse_url bracketed: - start_bracket: ( - expression: quoted_literal: "'http://spark.apache.org/path?query=1'" - comma: ',' - expression: quoted_literal: "'HOST'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test lateral_view_clause: - keyword: LATERAL - keyword: VIEW - function: function_name: function_name_identifier: parse_url bracketed: - start_bracket: ( - expression: quoted_literal: "'http://spark.apache.org/path?query=1'" - comma: ',' - expression: quoted_literal: "'HOST'" - end_bracket: ) - keyword: AS - naked_identifier: c1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_like_clause.sql000066400000000000000000000017601451700765000261740ustar00rootroot00000000000000SELECT a, b FROM person WHERE name LIKE 'M%'; SELECT a, b FROM person WHERE name LIKE 'M_ry'; SELECT a, b FROM person WHERE name NOT LIKE 'M_ry'; SELECT a, b FROM person WHERE name RLIKE 'M+'; SELECT a, b FROM person WHERE name REGEXP 'M+'; SELECT a, b FROM person WHERE name LIKE '%\_%'; SELECT a, b FROM person WHERE name LIKE '%$_%' ESCAPE '$'; SELECT a, b FROM person WHERE name LIKE ALL ('%an%', '%an'); SELECT a, b FROM person WHERE name LIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name LIKE SOME ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE ALL ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE ANY ('%an%', '%an'); SELECT a, b FROM person WHERE name NOT LIKE SOME ('%an%', '%an'); SELECT company FROM ilike_all_table WHERE company ILIKE ALL ('%oO%', '%Go%'); SELECT company FROM ilike_any_table WHERE company ILIKE ANY ('%oo%', '%IN', 'fA%'); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_like_clause.yml000066400000000000000000000306721451700765000262020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 349068b86aa827ffccae2cceb5577c0a2b0806df07a4ee7ef3ad2c797de3cdbd file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: LIKE quoted_literal: "'M%'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: LIKE quoted_literal: "'M_ry'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - quoted_literal: "'M_ry'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: RLIKE quoted_literal: "'M+'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: REGEXP quoted_literal: "'M+'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: name keyword: LIKE quoted_literal: "'%\\_%'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - quoted_literal: "'%$_%'" - keyword: ESCAPE - quoted_literal: "'$'" - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: ALL - bracketed: - start_bracket: ( - column_reference: quoted_identifier: "'%an%'" - comma: ',' - column_reference: quoted_identifier: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: ANY - bracketed: - start_bracket: ( - column_reference: quoted_identifier: "'%an%'" - comma: ',' - column_reference: quoted_identifier: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: LIKE - keyword: SOME - bracketed: - start_bracket: ( - column_reference: quoted_identifier: "'%an%'" - comma: ',' - column_reference: quoted_identifier: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: ALL - bracketed: - start_bracket: ( - column_reference: quoted_identifier: "'%an%'" - comma: ',' - column_reference: quoted_identifier: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: ANY - bracketed: - start_bracket: ( - column_reference: quoted_identifier: "'%an%'" - comma: ',' - column_reference: quoted_identifier: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: - column_reference: naked_identifier: name - keyword: NOT - keyword: LIKE - keyword: SOME - bracketed: - start_bracket: ( - column_reference: quoted_identifier: "'%an%'" - comma: ',' - column_reference: quoted_identifier: "'%an'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: company from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ilike_all_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: company - keyword: ILIKE - keyword: ALL - bracketed: - start_bracket: ( - column_reference: quoted_identifier: "'%oO%'" - comma: ',' - column_reference: quoted_identifier: "'%Go%'" - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: company from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ilike_any_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: company - keyword: ILIKE - keyword: ANY - bracketed: - start_bracket: ( - column_reference: quoted_identifier: "'%oo%'" - comma: ',' - column_reference: quoted_identifier: "'%IN'" - comma: ',' - column_reference: quoted_identifier: "'fA%'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_limit_clause.sql000066400000000000000000000005131451700765000263610ustar00rootroot00000000000000-- Select the first two rows. SELECT name, age FROM person ORDER BY name LIMIT 2; -- Specifying ALL option on LIMIT returns all the rows. SELECT name, age FROM person ORDER BY name LIMIT ALL; -- A function expression as an input to LIMIT. SELECT name, age FROM person ORDER BY name LIMIT length('SPARK'); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_limit_clause.yml000066400000000000000000000052371451700765000263730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0bda33c859d1fc24de87abe17e48161c92c47c41e11e68222ca8dcaf01552941 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name limit_clause: keyword: LIMIT numeric_literal: '2' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name limit_clause: - keyword: LIMIT - keyword: ALL - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name limit_clause: keyword: LIMIT function: function_name: function_name_identifier: length bracketed: start_bracket: ( expression: quoted_literal: "'SPARK'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_order_by.sql000066400000000000000000000013211451700765000255120ustar00rootroot00000000000000-- Sort rows SELECT name, age FROM person ORDER BY age; -- Sort rows in ascending manner keeping null values to be last. SELECT name, age FROM person ORDER BY age NULLS LAST; -- Sort rows in descending manner, which defaults to NULL LAST. SELECT name, age FROM person ORDER BY age DESC; -- Sort rows in ascending manner keeping null values to be first. SELECT name, age FROM person ORDER BY age DESC NULLS FIRST; -- Sort rows based on more than one column with each column having different -- sort direction. SELECT name, age FROM person ORDER BY name ASC, age DESC; -- Sort rows using complex expression. SELECT name, age FROM person ORDER BY SUM(age)/SUM(age) DESC; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_order_by.yml000066400000000000000000000116571451700765000255310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: afe247b8f1ce7f10f95249deb215a501e36fbb4ffc6ff751bb1d93eed2b84c2d file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age - keyword: NULLS - keyword: LAST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - keyword: NULLS - keyword: FIRST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: name - keyword: ASC - comma: ',' - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person orderby_clause: - keyword: ORDER - keyword: BY - expression: - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - binary_operator: / - function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) - keyword: DESC - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_qualify.sql000066400000000000000000000023111451700765000253570ustar00rootroot00000000000000SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce QUALIFY rank <= 3; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 LIMIT 5; SELECT item, RANK() OVER (PARTITION BY category ORDER BY purchases DESC) AS rank FROM Produce WINDOW item_window AS ( PARTITION BY category ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) WHERE Produce.category = 'vegetable' QUALIFY rank <= 3 ORDER BY item sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_qualify.yml000066400000000000000000000355021451700765000253710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6be6f8cc477e592f46d4295f5a1f1af51289aec2aac6d4a159d82733da3b4f62 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' limit_clause: keyword: LIMIT numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Produce named_window: keyword: WINDOW named_window_expression: naked_identifier: item_window keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: category orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: purchases frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: Produce - dot: . - naked_identifier: category comparison_operator: raw_comparison_operator: '=' quoted_literal: "'vegetable'" qualify_clause: keyword: QUALIFY expression: column_reference: naked_identifier: rank comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '3' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: item sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_reduce.sql000066400000000000000000000005421451700765000251600ustar00rootroot00000000000000SELECT reduce(array(1, 2, 3), 0, (acc, x) -> acc + x); -- 6 SELECT reduce(array(1, 2, 3), 0, (acc, x) -> acc + x, acc -> acc * 10); -- 60 SELECT reduce(array(1, 2, 3, 4), -- 2.5 named_struct('sum', 0, 'cnt', 0), (acc, x) -> named_struct('sum', acc.sum + x, 'cnt', acc.cnt + 1), acc -> acc.sum / acc.cnt) AS avg; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_reduce.yml000066400000000000000000000161301451700765000251620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 93aace8b77b3e70fda02a7619b32803f9cceb0bf0e23e8af8a0c47f645eb15e8 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: x - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: - bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: - column_reference: naked_identifier: acc - binary_operator: -> - column_reference: naked_identifier: acc - binary_operator: '*' - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: reduce bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: array bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: named_struct bracketed: - start_bracket: ( - expression: quoted_literal: "'sum'" - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: quoted_literal: "'cnt'" - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) - comma: ',' - expression: bracketed: - start_bracket: ( - column_reference: naked_identifier: acc - comma: ',' - column_reference: naked_identifier: x - end_bracket: ) binary_operator: -> function: function_name: function_name_identifier: named_struct bracketed: - start_bracket: ( - expression: quoted_literal: "'sum'" - comma: ',' - expression: - column_reference: - naked_identifier: acc - dot: . - naked_identifier: sum - binary_operator: + - column_reference: naked_identifier: x - comma: ',' - expression: quoted_literal: "'cnt'" - comma: ',' - expression: column_reference: - naked_identifier: acc - dot: . - naked_identifier: cnt binary_operator: + numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: - column_reference: naked_identifier: acc - binary_operator: -> - column_reference: - naked_identifier: acc - dot: . - naked_identifier: sum - binary_operator: / - column_reference: - naked_identifier: acc - dot: . - naked_identifier: cnt - end_bracket: ) alias_expression: keyword: AS naked_identifier: avg - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_set_operators.sql000066400000000000000000000013331451700765000266010ustar00rootroot00000000000000-- EXCEPT SELECT c FROM number1 EXCEPT SELECT c FROM number2; -- EXCEPT ALL SELECT c FROM number1 EXCEPT ALL (SELECT c FROM number2); -- MINUS SELECT c FROM number1 MINUS SELECT c FROM number2; -- MINUS ALL SELECT c FROM number1 MINUS ALL (SELECT c FROM number2); -- INTERSECT (SELECT c FROM number1) INTERSECT (SELECT c FROM number2); -- INTERSECT DISTINCT (SELECT c FROM number1) INTERSECT DISTINCT (SELECT c FROM number2); -- INTERSECT ALL (SELECT c FROM number1) INTERSECT ALL (SELECT c FROM number2); -- UNION (SELECT c FROM number1) UNION (SELECT c FROM number2); -- UNION DISTINCT (SELECT c FROM number1) UNION DISTINCT (SELECT c FROM number2); -- UNION ALL SELECT c FROM number1 UNION ALL (SELECT c FROM number2); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_set_operators.yml000066400000000000000000000244131451700765000266070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 49ae4b69f8e87602726882b414a050071d3a47819eb47ff5449a03aa28bcdebf file: - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: keyword: EXCEPT - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 - statement_terminator: ; - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 set_operator: - keyword: EXCEPT - keyword: ALL bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 - set_operator: keyword: MINUS - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 - statement_terminator: ; - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 set_operator: - keyword: MINUS - keyword: ALL bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: keyword: INTERSECT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: INTERSECT - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: INTERSECT - keyword: ALL - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: keyword: UNION - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 end_bracket: ) - set_operator: - keyword: UNION - keyword: DISTINCT - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; - statement: set_expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number1 set_operator: - keyword: UNION - keyword: ALL bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: number2 end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_sort_by.sql000066400000000000000000000036701451700765000253770ustar00rootroot00000000000000-- Sort rows within each partition in ascending manner SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY name; SELECT name, age, zip_code FROM person SORT BY name; -- Sort rows within each partition using column position. SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY 1; SELECT name, age, zip_code FROM person SORT BY 1; -- Sort rows within partition in ascending -- manner keeping null values to be last. SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age NULLS LAST; SELECT age, name, zip_code FROM person SORT BY age NULLS LAST; -- Sort rows by age within each partition in -- descending manner, which defaults to NULL LAST. SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age DESC; SELECT age, name, zip_code FROM person SORT BY age DESC; -- Sort rows by age within each partition in -- descending manner keeping null values to be first. SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age DESC NULLS FIRST; SELECT age, name, zip_code FROM person SORT BY age DESC NULLS FIRST; -- Sort rows within each partition based on more -- than one column with each column having different -- sort direction. SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY name ASC, age DESC; SELECT name, age, zip_code FROM person SORT BY name ASC, age DESC; -- Sort rows within each partition based on result of a function. SELECT age, name FROM person SORT BY LEFT(SUBSTRING_INDEX(name, ' ', -1), 1); SELECT age, name FROM person WHERE age <= 100 SORT BY age; SELECT age, name FROM person GROUP BY age SORT BY age; SELECT age, name FROM person GROUP BY age HAVING COUNT(age) > 1 SORT BY age; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_sort_by.yml000066400000000000000000000400271451700765000253760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 165a27d604367bcfd0b6120bd77f56eb432074fd3ca1bed5785f1491971e8cea file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: name - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: NULLS - keyword: LAST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: NULLS - keyword: LAST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - keyword: NULLS - keyword: FIRST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - keyword: DESC - keyword: NULLS - keyword: FIRST - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: select_hint: start_hint: /*+ hint_function: function_name: function_name_identifier: REPARTITION bracketed: start_bracket: ( naked_identifier: zip_code end_bracket: ) end_hint: '*/' - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: name - keyword: ASC - comma: ',' - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: zip_code from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: name - keyword: ASC - comma: ',' - column_reference: naked_identifier: age - keyword: DESC - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person sort_by_clause: - keyword: SORT - keyword: BY - expression: function: function_name: function_name_identifier: LEFT bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: SUBSTRING_INDEX bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: name - comma: ',' - expression: quoted_literal: "' '" - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: age comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' numeric_literal: '100' sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: column_reference: naked_identifier: name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: age having_clause: keyword: HAVING expression: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( expression: column_reference: naked_identifier: age end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' sort_by_clause: - keyword: SORT - keyword: BY - column_reference: naked_identifier: age - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_star_except.sql000066400000000000000000000003031451700765000262250ustar00rootroot00000000000000select * except (col) from table_name where row_no = 1; select * except (col) from table_name where row_no = 1; select * except (col1, col2, col3, col4, col5) from table_name where row_no = 1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_star_except.yml000066400000000000000000000062251451700765000262400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cf951c121772e7daad8ad73fa0d9ad5377c234e3d698b732a743316409429048 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: col end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: where expression: column_reference: naked_identifier: row_no comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: start_bracket: ( naked_identifier: col end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: where expression: column_reference: naked_identifier: row_no comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' select_except_clause: keyword: except bracketed: - start_bracket: ( - naked_identifier: col1 - comma: ',' - naked_identifier: col2 - comma: ',' - naked_identifier: col3 - comma: ',' - naked_identifier: col4 - comma: ',' - naked_identifier: col5 - end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table_name where_clause: keyword: where expression: column_reference: naked_identifier: row_no comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_star_in_multiparameter_function.sql000066400000000000000000000001261451700765000323660ustar00rootroot00000000000000SELECT my_function(*, col2) FROM my_table; SELECT my_function(col1, *) FROM my_table; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_star_in_multiparameter_function.yml000066400000000000000000000033161451700765000323740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e2e974845de1104e895b7508eccfd7df51519a7d910df9b5cef07438d8bff9d0 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: my_function bracketed: start_bracket: ( star: '*' comma: ',' expression: column_reference: naked_identifier: col2 end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: my_function bracketed: start_bracket: ( expression: column_reference: naked_identifier: col1 comma: ',' star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_tablesample.sql000066400000000000000000000012241451700765000262000ustar00rootroot00000000000000SELECT a, b FROM test TABLESAMPLE(50 PERCENT); SELECT t.a, t.b FROM test t TABLESAMPLE(50 PERCENT); SELECT t.a, t.b FROM test AS t TABLESAMPLE(50 PERCENT); SELECT a, b FROM test TABLESAMPLE(5 ROWS); SELECT a, b FROM test TABLESAMPLE(BUCKET 4 OUT OF 10); SELECT test_1.a, test_1.b FROM test_1 TABLESAMPLE(5 ROWS) NATURAL JOIN test_2 TABLESAMPLE(BUCKET 4 OUT OF 10); SELECT t1.a, t2.b FROM test_1 t1 TABLESAMPLE(5 ROWS) NATURAL JOIN test_2 t2 TABLESAMPLE(BUCKET 4 OUT OF 10); SELECT t1.a, t2.b FROM test_1 AS t1 TABLESAMPLE(5 ROWS) NATURAL JOIN test_2 AS t2 TABLESAMPLE(BUCKET 4 OUT OF 10); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_tablesample.yml000066400000000000000000000215131451700765000262050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 72061fbe559f92d245812e9660a400734186a91f7df06debb3482e677a634e83 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '50' keyword: PERCENT end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test alias_expression: naked_identifier: t sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '50' keyword: PERCENT end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test alias_expression: keyword: AS naked_identifier: t sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '50' keyword: PERCENT end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '5' keyword: ROWS end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '4' - keyword: OUT - keyword: OF - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: test_1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: test_1 - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_1 sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '5' keyword: ROWS end_bracket: ) join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test_2 sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '4' - keyword: OUT - keyword: OF - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_1 alias_expression: naked_identifier: t1 sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '5' keyword: ROWS end_bracket: ) join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test_2 alias_expression: naked_identifier: t2 sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '4' - keyword: OUT - keyword: OF - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: t1 - dot: . - naked_identifier: a - comma: ',' - select_clause_element: column_reference: - naked_identifier: t2 - dot: . - naked_identifier: b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_1 alias_expression: keyword: AS naked_identifier: t1 sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '5' keyword: ROWS end_bracket: ) join_clause: - keyword: NATURAL - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: test_2 alias_expression: keyword: AS naked_identifier: t2 sample_expression: keyword: TABLESAMPLE bracketed: - start_bracket: ( - keyword: BUCKET - numeric_literal: '4' - keyword: OUT - keyword: OF - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_transform_clause.sql000066400000000000000000000022211451700765000272540ustar00rootroot00000000000000-- With specified output without data type SELECT TRANSFORM (zip_code, name, age) USING 'cat' AS (a, b, c) FROM person WHERE zip_code > 94511; -- With specified output with data type SELECT TRANSFORM(zip_code, name, age) USING 'cat' AS (a string, b string, c string) FROM person WHERE zip_code > 94511; -- Using ROW FORMAT DELIMITED SELECT TRANSFORM(name, age) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' NULL DEFINED AS 'NULL' USING 'cat' AS (name_age string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '@' LINES TERMINATED BY '\n' NULL DEFINED AS 'NULL' FROM person; -- Using Hive Serde SELECT TRANSFORM(zip_code, name, age) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( 'field.delim' = '\t' ) USING 'cat' AS (a string, b string, c string) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( 'field.delim' = '\t' ) FROM person WHERE zip_code > 94511; -- Schema-less mode SELECT TRANSFORM(zip_code, name, age) USING 'cat' FROM person WHERE zip_code > 94500; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_transform_clause.yml000066400000000000000000000170241451700765000272650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 560671c74a7cc311a7cf61ae385f36cd1f9d7d654cd375027baa13097f0b7b7f file: - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: a - comma: ',' - naked_identifier: b - comma: ',' - naked_identifier: c - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94511' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: a - naked_identifier: string - comma: ',' - naked_identifier: b - naked_identifier: string - comma: ',' - naked_identifier: c - naked_identifier: string - end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94511' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "','" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: 'NULL' - keyword: DEFINED - keyword: AS - quoted_literal: "'NULL'" - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: name_age - naked_identifier: string - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: DELIMITED - keyword: FIELDS - keyword: TERMINATED - keyword: BY - quoted_literal: "'@'" - keyword: LINES - keyword: TERMINATED - keyword: BY - quoted_literal: "'\\n'" - keyword: 'NULL' - keyword: DEFINED - keyword: AS - quoted_literal: "'NULL'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'field.delim'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'\\t'" end_bracket: ) - keyword: USING - quoted_literal: "'cat'" - keyword: AS - bracketed: - start_bracket: ( - naked_identifier: a - naked_identifier: string - comma: ',' - naked_identifier: b - naked_identifier: string - comma: ',' - naked_identifier: c - naked_identifier: string - end_bracket: ) - row_format_clause: - keyword: ROW - keyword: FORMAT - keyword: SERDE - quoted_literal: "'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" - keyword: WITH - keyword: SERDEPROPERTIES - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'field.delim'" comparison_operator: raw_comparison_operator: '=' quoted_literal: "'\\t'" end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94511' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT transform_clause: - keyword: TRANSFORM - bracketed: - start_bracket: ( - naked_identifier: zip_code - comma: ',' - naked_identifier: name - comma: ',' - naked_identifier: age - end_bracket: ) - keyword: USING - quoted_literal: "'cat'" from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: person where_clause: keyword: WHERE expression: column_reference: naked_identifier: zip_code comparison_operator: raw_comparison_operator: '>' numeric_literal: '94500' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_union.sql000066400000000000000000000000431451700765000250350ustar00rootroot00000000000000SELECT 'a' AS col UNION SELECT 'b' sqlfluff-2.3.5/test/fixtures/dialects/sparksql/select_union.yml000066400000000000000000000015121451700765000250410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d3287e0f23f3c2499b397b7a305afb90b1a5a09790a4c099961f2799e4f8dc5 file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'a'" alias_expression: keyword: AS naked_identifier: col - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'b'" sqlfluff-2.3.5/test/fixtures/dialects/sparksql/set.sql000066400000000000000000000001361451700765000231440ustar00rootroot00000000000000SET spark.sql.variable.substitute = FALSE; SET -v; SET; SET spark.sql.variable.substitute; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/set.yml000066400000000000000000000025371451700765000231550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 71c5e26841b7b96426ef4127afa93633f3f29bab63d71888bbe5ce951edf4a47 file: - statement: set_statement: keyword: SET property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: sql - dot: . - properties_naked_identifier: variable - dot: . - properties_naked_identifier: substitute comparison_operator: raw_comparison_operator: '=' boolean_literal: 'FALSE' - statement_terminator: ; - statement: set_statement: keyword: SET sql_conf_option: dash: '-' sql_conf_option: v - statement_terminator: ; - statement: set_statement: keyword: SET - statement_terminator: ; - statement: set_statement: keyword: SET property_name_identifier: - properties_naked_identifier: spark - dot: . - properties_naked_identifier: sql - dot: . - properties_naked_identifier: variable - dot: . - properties_naked_identifier: substitute - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_columns.sql000066400000000000000000000004351451700765000250730ustar00rootroot00000000000000-- List the columns of `customer` table in current database. SHOW COLUMNS IN customer; -- List the columns of `customer` table in `salesdb` database. SHOW COLUMNS IN salesdb.customer; -- List the columns of `customer` table in `salesdb` database SHOW COLUMNS IN customer IN salesdb; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_columns.yml000066400000000000000000000021671451700765000251010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 67e5fe11f6cfbc707a143431f2a1c59578833cbd0d59aa06f589f92c4e413f59 file: - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_expression: table_reference: naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_expression: table_reference: - naked_identifier: salesdb - dot: . - naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: COLUMNS - keyword: IN - table_expression: table_reference: naked_identifier: customer - keyword: IN - database_reference: naked_identifier: salesdb - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_create_table.sql000066400000000000000000000001471451700765000260250ustar00rootroot00000000000000SHOW CREATE TABLE test; --Generates Hive DDL for a Hive SerDe table. SHOW CREATE TABLE test AS SERDE; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_create_table.yml000066400000000000000000000015111451700765000260230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3f18bea7eabb52edaab7ee8bb25937de657a9f286cabda424e1a61b9642ade00 file: - statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - table_expression: table_reference: naked_identifier: test - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: CREATE - keyword: TABLE - table_expression: table_reference: naked_identifier: test - keyword: AS - keyword: SERDE - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_databases.sql000066400000000000000000000003441451700765000253410ustar00rootroot00000000000000-- Lists all the databases. SHOW DATABASES; -- Lists databases with name starting with string pattern `pay` SHOW DATABASES LIKE 'pay*'; -- Lists all databases. Keywords SCHEMAS and DATABASES are interchangeable. SHOW SCHEMAS; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_databases.yml000066400000000000000000000013631451700765000253450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 53e02d90fc99f33433a357b5b40be0da2129d6ae82e9913e8e38e39fd94fde22 file: - statement: show_statement: - keyword: SHOW - keyword: DATABASES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: DATABASES - keyword: LIKE - quoted_literal: "'pay*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SCHEMAS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_functions.sql000066400000000000000000000013731451700765000254250ustar00rootroot00000000000000-- List a system function `trim` by searching both user defined and system -- defined functions. SHOW FUNCTIONS trim; SHOW ALL FUNCTIONS trim; -- List a system function `concat` by searching system defined functions. SHOW SYSTEM FUNCTIONS concat; -- List a user function `concat_user` by searching user defined functions. SHOW USER FUNCTIONS concat_user; -- List a qualified function `max` from database `salesdb`. SHOW SYSTEM FUNCTIONS salesdb.max; -- List all functions starting with `t` SHOW FUNCTIONS LIKE 't*'; -- List all functions starting with `yea` or `windo` SHOW FUNCTIONS LIKE 'yea*|windo*'; -- Use normal regex pattern to list function names that has 4 characters -- with `t` as the starting character. SHOW FUNCTIONS LIKE 't[a-z][a-z][a-z]'; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_functions.yml000066400000000000000000000034541451700765000254310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 56bd613fcf0c4d9d563e1919153506fc9ebe2af11c234e1e509db53e77970979 file: - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - function_name: function_name_identifier: trim - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: ALL - keyword: FUNCTIONS - function_name: function_name_identifier: trim - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SYSTEM - keyword: FUNCTIONS - function_name: function_name_identifier: concat - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: USER - keyword: FUNCTIONS - function_name: function_name_identifier: concat_user - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: SYSTEM - keyword: FUNCTIONS - function_name: naked_identifier: salesdb dot: . function_name_identifier: max - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'t*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'yea*|windo*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: FUNCTIONS - keyword: LIKE - quoted_literal: "'t[a-z][a-z][a-z]'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_partitions.sql000066400000000000000000000010151451700765000256020ustar00rootroot00000000000000-- Lists all partitions for table `customer` SHOW PARTITIONS customer; -- Lists all partitions for the qualified table `customer` SHOW PARTITIONS salesdb.customer; -- Specify a full partition spec to list specific partition SHOW PARTITIONS customer PARTITION (state = 'CA', city = 'Fremont'); -- Specify a partial partition spec to list the specific partitions SHOW PARTITIONS customer PARTITION (state = 'CA'); -- Specify a partial spec to list specific partition SHOW PARTITIONS customer PARTITION (city = 'San Jose'); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_partitions.yml000066400000000000000000000042201451700765000256050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 117afb5bf8f20db6b3c88fad946fdd821caf02a19e8dd624360f772c770b6fe0 file: - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: - naked_identifier: salesdb - dot: . - naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: customer - keyword: PARTITION - bracketed: - start_bracket: ( - column_reference: naked_identifier: state - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CA'" - comma: ',' - column_reference: naked_identifier: city - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Fremont'" - end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: customer - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: state comparison_operator: raw_comparison_operator: '=' quoted_literal: "'CA'" end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: PARTITIONS - table_reference: naked_identifier: customer - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: city comparison_operator: raw_comparison_operator: '=' quoted_literal: "'San Jose'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_table_extended.sql000066400000000000000000000006251451700765000263630ustar00rootroot00000000000000-- Show the details of the table SHOW TABLE EXTENDED LIKE 'employee'; -- showing the multiple table details with pattern matching SHOW TABLE EXTENDED LIKE 'employe*'; -- show partition file system details SHOW TABLE EXTENDED IN default LIKE 'employee' PARTITION (grade = 1); -- show partition file system details with pattern matching SHOW TABLE EXTENDED IN default LIKE 'empl*' PARTITION (grade = 1); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_table_extended.yml000066400000000000000000000033351451700765000263660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 379a9d589d67c92e1168c626767879eca530bdac1a7a79a2fe47b689d29d681f file: - statement: show_statement: - keyword: SHOW - keyword: TABLE - keyword: EXTENDED - keyword: LIKE - quoted_literal: "'employee'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLE - keyword: EXTENDED - keyword: LIKE - quoted_literal: "'employe*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLE - keyword: EXTENDED - keyword: IN - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'employee'" - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: grade comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLE - keyword: EXTENDED - keyword: IN - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'empl*'" - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: grade comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_tables.sql000066400000000000000000000005611451700765000246650ustar00rootroot00000000000000-- List all tables in default database SHOW TABLES; -- List all tables from userdb database SHOW TABLES FROM userdb; -- List all tables in userdb database SHOW TABLES IN userdb; -- List all tables from default database matching the pattern `sam*` SHOW TABLES FROM default LIKE 'sam*'; -- List all tables matching the pattern `sam*|suj` SHOW TABLES LIKE 'sam*|suj'; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_tables.yml000066400000000000000000000023311451700765000246640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bfba166a32d45907246e1a16e98d3de19e5d015fbc5958445aa7580ea1f034c9 file: - statement: show_statement: - keyword: SHOW - keyword: TABLES - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: IN - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TABLES - keyword: LIKE - quoted_literal: "'sam*|suj'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_tblproperties.sql000066400000000000000000000007061451700765000263120ustar00rootroot00000000000000-- show all the user specified properties for table `customer` SHOW TBLPROPERTIES customer; -- show all the user specified properties for a qualified table `customer` -- in database `salesdb` SHOW TBLPROPERTIES salesdb.customer; -- show value for unquoted property key `created.by.user` SHOW TBLPROPERTIES customer (created.by.user); -- show value for property `created.date`` specified as string literal SHOW TBLPROPERTIES customer ('created.date'); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_tblproperties.yml000066400000000000000000000027561451700765000263230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9c2d1d57d30c633b5aa3d12e4c00bea4b25471f07e5d272df9d2a402d206c0d5 file: - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: - naked_identifier: salesdb - dot: . - naked_identifier: customer - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: customer - bracketed: start_bracket: ( property_name_identifier: - properties_naked_identifier: created - dot: . - properties_naked_identifier: by - dot: . - properties_naked_identifier: user end_bracket: ) - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: TBLPROPERTIES - table_reference: naked_identifier: customer - bracketed: start_bracket: ( property_name_identifier: quoted_identifier: "'created.date'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_views.sql000066400000000000000000000006371451700765000245540ustar00rootroot00000000000000-- List all views in default database SHOW VIEWS; -- List all views from userdb database SHOW VIEWS FROM userdb; -- List all views in global temp view database SHOW VIEWS IN global_temp; -- List all views from default database matching the pattern `sam*` SHOW VIEWS FROM default LIKE 'sam*'; -- List all views from the current database -- matching the pattern `sam|suj|temp*` SHOW VIEWS LIKE 'sam|suj|temp*'; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/show_views.yml000066400000000000000000000023361451700765000245540ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ecb3226351cde262a4ad4856b1622d4b1e275830a8f0a4319b2ea77de159aed3 file: - statement: show_statement: - keyword: SHOW - keyword: VIEWS - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - database_reference: naked_identifier: userdb - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: IN - database_reference: naked_identifier: global_temp - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: FROM - database_reference: naked_identifier: default - keyword: LIKE - quoted_literal: "'sam*'" - statement_terminator: ; - statement: show_statement: - keyword: SHOW - keyword: VIEWS - keyword: LIKE - quoted_literal: "'sam|suj|temp*'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/table_alias.sql000066400000000000000000000003261451700765000246120ustar00rootroot00000000000000select * from u as t ( a, b ); select * from u as t (a, b); select * from u as t(a,b); select * from u as (a,b); select * from u t ( a, b ); select * from u t (a, b); select * from u t(a,b); select * from u (a,b); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/table_alias.yml000066400000000000000000000141671451700765000246240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 43acb142a2b99c8a0367242bbe6504e7e919b34848ccebdb634949e0ee7297e1 file: - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: keyword: as bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: u alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: u bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: a - comma: ',' - expression: column_reference: naked_identifier: b - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/truncate_table.sql000066400000000000000000000002721451700765000253460ustar00rootroot00000000000000-- Removes all rows from the table in the partition specified TRUNCATE TABLE Student PARTITION(Age = 10); -- Removes all rows from the table from all partitions TRUNCATE TABLE Student; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/truncate_table.yml000066400000000000000000000017021451700765000253470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e5d64f9bdcb0ff1769f35a9e32cce65a2474a0a79602712a32e2b35fb25bc7e1 file: - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: Student - keyword: PARTITION - bracketed: start_bracket: ( column_reference: naked_identifier: Age comparison_operator: raw_comparison_operator: '=' numeric_literal: '10' end_bracket: ) - statement_terminator: ; - statement: truncate_table: - keyword: TRUNCATE - keyword: TABLE - table_reference: naked_identifier: Student - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/uncache_table.sql000066400000000000000000000000571451700765000251300ustar00rootroot00000000000000UNCACHE TABLE t1; UNCACHE TABLE IF EXISTS t1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/uncache_table.yml000066400000000000000000000013461451700765000251340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9d792b932b3b37301c279ea86aec744c42da10d2dcf6d2c17936b0980cb1c714 file: - statement: uncache_table: - keyword: UNCACHE - keyword: TABLE - table_reference: naked_identifier: t1 - statement_terminator: ; - statement: uncache_table: - keyword: UNCACHE - keyword: TABLE - keyword: IF - keyword: EXISTS - table_reference: naked_identifier: t1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/use_database.sql000066400000000000000000000001271451700765000247710ustar00rootroot00000000000000USE database_name; -- Use the 'userdb' USE userdb; -- Use the 'userdb1' USE userdb1; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/use_database.yml000066400000000000000000000014521451700765000247750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f3c7428b47771dbc65f522c134af5d7e26d17e80be2a6da405ea6afefab130b4 file: - statement: use_statement: keyword: USE database_reference: naked_identifier: database_name - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: userdb - statement_terminator: ; - statement: use_statement: keyword: USE database_reference: naked_identifier: userdb1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/values.sql000066400000000000000000000002521451700765000236470ustar00rootroot00000000000000values (1, 2); values (1, 2), (3, 4); values (1, 2), (3, 4), (greatest(5, 6), least(7, 8)); values 1, 2; values 1; values 1 , 2 , 3 limit 1; values 3 , 2 , 1 order by 2; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/values.yml000066400000000000000000000064761451700765000236670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 93b8eab28bc8d020bec2090f2622efa53b1c4cad87bbeff2761cac9025a40f78 file: - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: greatest bracketed: - start_bracket: ( - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: least bracketed: - start_bracket: ( - expression: numeric_literal: '7' - comma: ',' - expression: numeric_literal: '8' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - statement_terminator: ; - statement: values_clause: keyword: values expression: numeric_literal: '1' - statement_terminator: ; - statement: values_clause: - keyword: values - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - limit_clause: keyword: limit numeric_literal: '1' - statement_terminator: ; - statement: values_clause: - keyword: values - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '1' - orderby_clause: - keyword: order - keyword: by - numeric_literal: '2' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/values_with_alias.sql000066400000000000000000000003771451700765000260630ustar00rootroot00000000000000values (1, 2) as t; values (1, 2) t; values (1, 2) as t (a, b); values (1, 2), (3, 4) as t(a,b); values (1, 2) as (a,b); values (1, 2) t(a,b); values (1, 2) (a,b); values (1, 2), (3, 4) as (a,b); values (1, 2), (3, 4) t(a,b); values (1, 2), (3, 4) (a,b); sqlfluff-2.3.5/test/fixtures/dialects/sparksql/values_with_alias.yml000066400000000000000000000133361451700765000260640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8e85de7ad1e5b742def684191888d8c22871e3198b5a8925a436368514d2088f file: - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: keyword: as naked_identifier: t - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: naked_identifier: t - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - alias_expression: keyword: as naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: keyword: as bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: values bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - alias_expression: keyword: as bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - alias_expression: naked_identifier: t bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; - statement: values_clause: - keyword: values - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - alias_expression: bracketed: start_bracket: ( identifier_list: - naked_identifier: a - comma: ',' - naked_identifier: b end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/window_functions.sql000066400000000000000000000061501451700765000257520ustar00rootroot00000000000000SELECT name, dept, RANK() OVER ( PARTITION BY dept ORDER BY salary ) AS row_rank FROM employees; SELECT name, dept, DENSE_RANK() OVER ( PARTITION BY dept ORDER BY salary ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS row_dense_rank FROM employees; SELECT name, dept, age, CUME_DIST() OVER ( PARTITION BY dept ORDER BY age RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ) AS data_cume_dist FROM employees; SELECT name, dept, salary, MIN(salary) OVER ( PARTITION BY dept ORDER BY salary ) AS salary_min FROM employees; SELECT name, salary, LAG(salary) OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lag, LEAD(salary, 1, 0) OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lead FROM employees; SELECT name, salary, LAG(salary) IGNORE NULLS OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lag, LEAD(salary, 1, 0) IGNORE NULLS OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lead FROM employees; SELECT name, salary, LAG(salary) RESPECT NULLS OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lag, LEAD(salary, 1, 0) RESPECT NULLS OVER ( PARTITION BY dept ORDER BY salary ) AS salary_lead FROM employees; SELECT id, v, LEAD(v, 0) IGNORE NULLS OVER w AS v_lead, LAG(v, 0) IGNORE NULLS OVER w AS v_lag, NTH_VALUE(v, 2) IGNORE NULLS OVER w AS v_nth_value, FIRST_VALUE(v) IGNORE NULLS OVER w AS v_first_value, LAST_VALUE(v) IGNORE NULLS OVER w AS v_last_value FROM test_ignore_null WINDOW w AS (ORDER BY id) ORDER BY id; SELECT id, v, LEAD(v, 0) RESPECT NULLS OVER w AS v_lead, LAG(v, 0) RESPECT NULLS OVER w AS v_lag, NTH_VALUE(v, 2) RESPECT NULLS OVER w AS v_nth_value, FIRST_VALUE(v) RESPECT NULLS OVER w AS v_first_value, LAST_VALUE(v) RESPECT NULLS OVER w AS v_last_value FROM test_ignore_null WINDOW w AS (ORDER BY id) ORDER BY id; SELECT ignore_nulls.id, ignore_nulls.v, LEAD(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lead, LAG(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lag, NTH_VALUE(ignore_nulls.v, 2) RESPECT NULLS OVER w AS v_nth_value, FIRST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_first_value, LAST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_last_value FROM test_ignore_null AS ignore_nulls WINDOW w AS (ORDER BY ignore_nulls.id) ORDER BY ignore_nulls.id; SELECT ignore_nulls.id, ignore_nulls.v, LEAD(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lead, LAG(ignore_nulls.v, 0) RESPECT NULLS OVER w AS v_lag, NTH_VALUE(ignore_nulls.v, 2) RESPECT NULLS OVER w AS v_nth_value, FIRST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_first_value, LAST_VALUE(ignore_nulls.v) RESPECT NULLS OVER w AS v_last_value FROM test_ignore_null AS ignore_nulls WINDOW w AS (ORDER BY ignore_nulls.id range between interval 6 days preceding and current row) ORDER BY ignore_nulls.id; sqlfluff-2.3.5/test/fixtures/dialects/sparksql/window_functions.yml000066400000000000000000001027561451700765000257650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00bbbff73f8f9a420c42c4cdd5d98e0f36c435b97662e439e295403c96aae249 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept - comma: ',' - select_clause_element: function: function_name: function_name_identifier: RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: keyword: AS naked_identifier: row_rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DENSE_RANK bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: keyword: AS naked_identifier: row_dense_rank from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept - comma: ',' - select_clause_element: column_reference: naked_identifier: age - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CUME_DIST bracketed: start_bracket: ( end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: age frame_clause: - keyword: RANGE - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) alias_expression: keyword: AS naked_identifier: data_cume_dist from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept - comma: ',' - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: MIN bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: keyword: AS naked_identifier: salary_min from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: keyword: AS naked_identifier: salary_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: salary - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: keyword: AS naked_identifier: salary_lead from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: keyword: AS naked_identifier: salary_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: salary - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: keyword: AS naked_identifier: salary_lead from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: salary - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG bracketed: start_bracket: ( expression: column_reference: naked_identifier: salary end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: keyword: AS naked_identifier: salary_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: salary - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: dept orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: salary end_bracket: ) alias_expression: keyword: AS naked_identifier: salary_lead from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: employees - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: v - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_nth_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: v end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: v end_bracket: ) over_clause: - keyword: IGNORE - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_ignore_null named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: column_reference: naked_identifier: v - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: v - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_nth_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_ignore_null named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_nth_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE bracketed: start_bracket: ( expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_ignore_null alias_expression: keyword: AS naked_identifier: ignore_nulls named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LEAD bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_lead - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAG bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_lag - comma: ',' - select_clause_element: function: function_name: function_name_identifier: NTH_VALUE bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_nth_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: FIRST_VALUE bracketed: start_bracket: ( expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_first_value - comma: ',' - select_clause_element: function: function_name: function_name_identifier: LAST_VALUE bracketed: start_bracket: ( expression: column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: v end_bracket: ) over_clause: - keyword: RESPECT - keyword: NULLS - keyword: OVER - naked_identifier: w alias_expression: keyword: AS naked_identifier: v_last_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_ignore_null alias_expression: keyword: AS naked_identifier: ignore_nulls named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id frame_clause: - keyword: range - keyword: between - interval_expression: keyword: interval interval_literal: numeric_literal: '6' date_part: days - keyword: preceding - keyword: and - keyword: current - keyword: row end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: ignore_nulls - dot: . - naked_identifier: id - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/000077500000000000000000000000001451700765000212715ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/sqlite/.sqlfluff000066400000000000000000000000341451700765000231110ustar00rootroot00000000000000[sqlfluff] dialect = sqlite sqlfluff-2.3.5/test/fixtures/dialects/sqlite/arithmetric_a.sql000066400000000000000000000013561451700765000246320ustar00rootroot00000000000000SELECT 1 + (2 * 3) >= 4 + 6+13 as val; SELECT 1 + ~(~2 * 3) >= 4 + ~6+13 as val; SELECT -1; SELECT -1 + 5; SELECT ~1; SELECT -1 + ~5; SELECT 4 & ~8 | 16; SELECT 8 + ~(3); SELECT 8 | ~ ~ ~4; SELECT 1 * -(5); SELECT 1 * -5; SELECT 1 * - - - 5; SELECT 1 * - - - (5); SELECT 1 * + + (5); SELECT 1 * - - - func(5); SELECT 1 * ~ ~ ~ func(5); SELECT 1 * +(5); SELECT 1 * +5; SELECT 1 * + + 5; SELECT FALSE AND NOT (TRUE); SELECT FALSE AND NOT NOT NOT (TRUE); -- parses middle NOT as column ref SELECT FALSE AND NOT (TRUE); SELECT FALSE AND NOT func(5); SELECT 'abc' LIKE - - 5; -- PG can parse this ok, and then fail due to data type mismatch SELECT 'abc' LIKE ~ ~ 5; -- PG can parse this ok, and then fail due to data type mismatch sqlfluff-2.3.5/test/fixtures/dialects/sqlite/arithmetric_a.yml000066400000000000000000000253111451700765000246310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 68162fcf003cf96cc1361038a52f8a6da37d5bee87037883ce33a4f2c011cf6f file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - bracketed: start_bracket: ( expression: - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '4' - binary_operator: + - numeric_literal: '6' - binary_operator: + - numeric_literal: '13' alias_expression: keyword: as naked_identifier: val - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: + - tilde: '~' - bracketed: start_bracket: ( expression: - tilde: '~' - numeric_literal: '2' - binary_operator: '*' - numeric_literal: '3' end_bracket: ) - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '4' - binary_operator: + - tilde: '~' - numeric_literal: '6' - binary_operator: + - numeric_literal: '13' alias_expression: keyword: as naked_identifier: val - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: sign_indicator: '-' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: tilde: '~' numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: sign_indicator: '-' numeric_literal: '1' - binary_operator: + - tilde: '~' - numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '4' - binary_operator: ampersand: '&' - tilde: '~' - numeric_literal: '8' - binary_operator: pipe: '|' - numeric_literal: '16' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '8' binary_operator: + tilde: '~' bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '8' - binary_operator: pipe: '|' - tilde: '~' - tilde: '~' - tilde: '~' - numeric_literal: '4' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '1' binary_operator: '*' sign_indicator: '-' bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - sign_indicator: '-' - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: + - sign_indicator: + - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: '-' - sign_indicator: '-' - sign_indicator: '-' - function: function_name: function_name_identifier: func bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - tilde: '~' - tilde: '~' - tilde: '~' - function: function_name: function_name_identifier: func bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: numeric_literal: '1' binary_operator: '*' sign_indicator: + bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - numeric_literal: sign_indicator: + numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - numeric_literal: '1' - binary_operator: '*' - sign_indicator: + - numeric_literal: sign_indicator: + numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - boolean_literal: 'FALSE' - binary_operator: AND - keyword: NOT - keyword: NOT - keyword: NOT - bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT bracketed: start_bracket: ( expression: boolean_literal: 'TRUE' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: boolean_literal: 'FALSE' binary_operator: AND keyword: NOT function: function_name: function_name_identifier: func bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: quoted_literal: "'abc'" keyword: LIKE sign_indicator: '-' numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - quoted_literal: "'abc'" - keyword: LIKE - tilde: '~' - tilde: '~' - numeric_literal: '5' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_index.sql000066400000000000000000000006331451700765000244460ustar00rootroot00000000000000CREATE INDEX li1 ON entries_data(id, LENGTH(chunk)); CREATE INDEX acctchng_magnitude ON account_change(acct_no, abs(amt)); CREATE INDEX t2xy ON t2(x+y); CREATE UNIQUE INDEX team_leader ON person(team_id) WHERE is_team_leader; CREATE INDEX ex1 ON tab1(a,b) WHERE a=5 OR b=6; CREATE INDEX po_parent ON purchaseorder(parent_po) WHERE parent_po IS NOT NULL; CREATE INDEX ex2 ON tab2(b,c) WHERE c IS NOT NULL; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_index.yml000066400000000000000000000116741451700765000244570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42ff8ffbd151fb9e46fbdc962aa6479bceaf758ac2bdb4827d6f88e167e0ae88 file: - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: li1 - keyword: 'ON' - table_reference: naked_identifier: entries_data - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: id - comma: ',' - index_column_definition: expression: function: function_name: function_name_identifier: LENGTH bracketed: start_bracket: ( expression: column_reference: naked_identifier: chunk end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: acctchng_magnitude - keyword: 'ON' - table_reference: naked_identifier: account_change - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: acct_no - comma: ',' - index_column_definition: expression: function: function_name: function_name_identifier: abs bracketed: start_bracket: ( expression: column_reference: naked_identifier: amt end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: t2xy - keyword: 'ON' - table_reference: naked_identifier: t2 - bracketed: start_bracket: ( index_column_definition: expression: - column_reference: naked_identifier: x - binary_operator: + - column_reference: naked_identifier: y end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: naked_identifier: team_leader - keyword: 'ON' - table_reference: naked_identifier: person - bracketed: start_bracket: ( index_column_definition: naked_identifier: team_id end_bracket: ) - where_clause: keyword: WHERE expression: column_reference: naked_identifier: is_team_leader - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: ex1 - keyword: 'ON' - table_reference: naked_identifier: tab1 - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: a - comma: ',' - index_column_definition: naked_identifier: b - end_bracket: ) - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: a - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - binary_operator: OR - column_reference: naked_identifier: b - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '6' - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: po_parent - keyword: 'ON' - table_reference: naked_identifier: purchaseorder - bracketed: start_bracket: ( index_column_definition: naked_identifier: parent_po end_bracket: ) - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: parent_po - keyword: IS - keyword: NOT - keyword: 'NULL' - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: INDEX - index_reference: naked_identifier: ex2 - keyword: 'ON' - table_reference: naked_identifier: tab2 - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: b - comma: ',' - index_column_definition: naked_identifier: c - end_bracket: ) - where_clause: keyword: WHERE expression: - column_reference: naked_identifier: c - keyword: IS - keyword: NOT - keyword: 'NULL' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table.sql000066400000000000000000000002641451700765000244260ustar00rootroot00000000000000CREATE TABLE users ( user_id INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT NOT NULL UNIQUE, password TEXT NOT NULL COLLATE NOCASE, email TEXT NOT NULL UNIQUE ); sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table.yml000066400000000000000000000034601451700765000244310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8aa612e64befa74ee94085376570eed5f55511432f801989f14dc97de88dd851 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: users - bracketed: - start_bracket: ( - column_definition: naked_identifier: user_id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: AUTOINCREMENT - comma: ',' - column_definition: - naked_identifier: username - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: UNIQUE - comma: ',' - column_definition: - naked_identifier: password - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: NOCASE - comma: ',' - column_definition: - naked_identifier: email - data_type: data_type_identifier: TEXT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: UNIQUE - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_autoincrement.sql000066400000000000000000000000761451700765000273640ustar00rootroot00000000000000CREATE TABLE foo( id INTEGER PRIMARY KEY AUTOINCREMENT ); sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_autoincrement.yml000066400000000000000000000015451451700765000273700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 35d22c9df0ef9cc9ea9c020085cf4f633f9ea53a48a92bfcd928bbe86df5fe0a file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_definition: naked_identifier: id data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: AUTOINCREMENT end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_check.sql000066400000000000000000000001021451700765000255520ustar00rootroot00000000000000CREATE TABLE foo( num NUMBER NOT NULL, CHECK (num > 0) ); sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_check.yml000066400000000000000000000022451451700765000255660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5465c2be405342b8e453437cd27526acc01a94128274719114d7fe0e3ae8a665 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_definition: naked_identifier: num data_type: data_type_identifier: NUMBER column_constraint_segment: - keyword: NOT - keyword: 'NULL' comma: ',' table_constraint: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: num comparison_operator: raw_comparison_operator: '>' numeric_literal: '0' end_bracket: ) end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_constraint_default.sql000066400000000000000000000001641451700765000303750ustar00rootroot00000000000000BEGIN TRANSACTION; CREATE TABLE IF NOT EXISTS "tbl" ( "col" TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ); COMMIT; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_constraint_default.yml000066400000000000000000000023021451700765000303730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b465661bdd47b953b078b1bbeed25eaceeb4bbc4751d064066e83f9b4561876f file: - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: quoted_identifier: '"tbl"' - bracketed: start_bracket: ( column_definition: - quoted_identifier: '"col"' - data_type: data_type_identifier: TIMESTAMP - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bare_function: CURRENT_TIMESTAMP end_bracket: ) - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_constraint_regexp.sql000066400000000000000000000001341451700765000302400ustar00rootroot00000000000000CREATE TABLE colors ( css_name TEXT, rgb TEXT CHECK(rgb REGEXP '^#[0-9A-F]{6}$') ); sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_constraint_regexp.yml000066400000000000000000000023011451700765000302400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 808c49097970e89f2a72ce00cfdd9faa8c70617bc11892862e5d9e2f175b7898 file: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: colors - bracketed: - start_bracket: ( - column_definition: naked_identifier: css_name data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: rgb data_type: data_type_identifier: TEXT column_constraint_segment: keyword: CHECK bracketed: start_bracket: ( expression: column_reference: naked_identifier: rgb keyword: REGEXP quoted_literal: "'^#[0-9A-F]{6}$'" end_bracket: ) - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_deferrable.sql000066400000000000000000000033721451700765000266040ustar00rootroot00000000000000-- check deferrable in table constrain segment CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) DEFERRABLE ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) DEFERRABLE INITIALLY IMMEDIATE ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) NOT DEFERRABLE ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) NOT DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE messages( msg_id TEXT, parent_id TEXT, FOREIGN KEY(parent_id) REFERENCES users(id) NOT DEFERRABLE INITIALLY IMMEDIATE ); -- check deferrable in column constrain segment CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) DEFERRABLE ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) DEFERRABLE INITIALLY IMMEDIATE ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) NOT DEFERRABLE ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) NOT DEFERRABLE INITIALLY DEFERRED ); CREATE TABLE track( trackid INTEGER, trackname TEXT, trackartist INTEGER REFERENCES artist(artistid) NOT DEFERRABLE INITIALLY IMMEDIATE ); sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_deferrable.yml000066400000000000000000000311661451700765000266100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ceb51beba6740a7f80613179a15a4d2856cefa05e3b9b34ca1e11b4727c6973 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: DEFERRABLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: messages - bracketed: - start_bracket: ( - column_definition: naked_identifier: msg_id data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: parent_id data_type: data_type_identifier: TEXT - comma: ',' - table_constraint: - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: naked_identifier: parent_id end_bracket: ) - keyword: REFERENCES - table_reference: naked_identifier: users - bracketed: start_bracket: ( column_reference: naked_identifier: id end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: DEFERRABLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: DEFERRED - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: track - bracketed: - start_bracket: ( - column_definition: naked_identifier: trackid data_type: data_type_identifier: INTEGER - comma: ',' - column_definition: naked_identifier: trackname data_type: data_type_identifier: TEXT - comma: ',' - column_definition: naked_identifier: trackartist data_type: data_type_identifier: INTEGER column_constraint_segment: - keyword: REFERENCES - table_reference: naked_identifier: artist - bracketed: start_bracket: ( column_reference: naked_identifier: artistid end_bracket: ) - keyword: NOT - keyword: DEFERRABLE - keyword: INITIALLY - keyword: IMMEDIATE - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_table_end.sql000066400000000000000000000005471451700765000264270ustar00rootroot00000000000000CREATE TABLE foo ( id INTEGER NOT NULL PRIMARY KEY ) WITHOUT ROWID; CREATE TABLE IF NOT EXISTS wordcount( word TEXT PRIMARY KEY, cnt INTEGER ) WITHOUT ROWID; CREATE TABLE IF NOT EXISTS wordcount( word TEXT PRIMARY KEY, cnt INTEGER ) STRICT; CREATE TABLE IF NOT EXISTS wordcount( word TEXT PRIMARY KEY, cnt INTEGER ) WITHOUT ROWID, STRICT; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_table_table_end.yml000066400000000000000000000061761451700765000264350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7a2b0510ff0246a3556ac6091c65fd281aa8097764e142abf459b299530d3456 file: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: start_bracket: ( column_definition: - naked_identifier: id - data_type: data_type_identifier: INTEGER - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: - keyword: PRIMARY - keyword: KEY end_bracket: ) - table_end_clause_segment: - keyword: WITHOUT - keyword: ROWID - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: wordcount - bracketed: - start_bracket: ( - column_definition: naked_identifier: word data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: cnt data_type: data_type_identifier: INTEGER - end_bracket: ) - table_end_clause_segment: - keyword: WITHOUT - keyword: ROWID - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: wordcount - bracketed: - start_bracket: ( - column_definition: naked_identifier: word data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: cnt data_type: data_type_identifier: INTEGER - end_bracket: ) - table_end_clause_segment: keyword: STRICT - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - keyword: IF - keyword: NOT - keyword: EXISTS - table_reference: naked_identifier: wordcount - bracketed: - start_bracket: ( - column_definition: naked_identifier: word data_type: data_type_identifier: TEXT column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: cnt data_type: data_type_identifier: INTEGER - end_bracket: ) - table_end_clause_segment: - keyword: WITHOUT - keyword: ROWID - comma: ',' - keyword: STRICT - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_trigger.sql000066400000000000000000000016101451700765000247760ustar00rootroot00000000000000CREATE TRIGGER update_customer_address UPDATE OF address ON customers BEGIN UPDATE orders SET address = new.address WHERE customer_name = old.name; END; CREATE TRIGGER cust_addr_chng INSTEAD OF UPDATE OF cust_addr ON customer_address BEGIN UPDATE customer SET cust_addr=NEW.cust_addr WHERE cust_id=NEW.cust_id; END; CREATE TRIGGER validate_email_before_insert_leads BEFORE INSERT ON leads BEGIN SELECT 1; END; CREATE TRIGGER log_contact_after_update AFTER UPDATE ON leads BEGIN INSERT INTO lead_logs ( old_id, new_id, old_phone, new_phone, old_email, new_email, user_action, created_at ) VALUES ( old.id, new.id, old.phone, new.phone, old.email, new.email, 'UPDATE' ) ; END; CREATE TRIGGER aft_insert AFTER INSERT ON emp_details BEGIN INSERT INTO emp_log(emp_id,salary,edittime) VALUES(NEW.employee_id,NEW.salary,current_date); END; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/create_trigger.yml000066400000000000000000000161651451700765000250130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a6c6cd83844198daf4513a642932e69466dd2c90d887929b24e9d6577e8d120c file: - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: update_customer_address - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: address - keyword: 'ON' - table_reference: naked_identifier: customers - keyword: BEGIN - update_statement: keyword: UPDATE table_reference: naked_identifier: orders set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: address - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: new - dot: . - naked_identifier: address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: customer_name - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: old - dot: . - naked_identifier: name - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: cust_addr_chng - keyword: INSTEAD - keyword: OF - keyword: UPDATE - keyword: OF - column_reference: naked_identifier: cust_addr - keyword: 'ON' - table_reference: naked_identifier: customer_address - keyword: BEGIN - update_statement: keyword: UPDATE table_reference: naked_identifier: customer set_clause_list: keyword: SET set_clause: - column_reference: naked_identifier: cust_addr - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: NEW - dot: . - naked_identifier: cust_addr where_clause: keyword: WHERE expression: - column_reference: naked_identifier: cust_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: NEW - dot: . - naked_identifier: cust_id - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: validate_email_before_insert_leads - keyword: BEFORE - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: leads - keyword: BEGIN - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: log_contact_after_update - keyword: AFTER - keyword: UPDATE - keyword: 'ON' - table_reference: naked_identifier: leads - keyword: BEGIN - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: lead_logs - bracketed: - start_bracket: ( - column_reference: naked_identifier: old_id - comma: ',' - column_reference: naked_identifier: new_id - comma: ',' - column_reference: naked_identifier: old_phone - comma: ',' - column_reference: naked_identifier: new_phone - comma: ',' - column_reference: naked_identifier: old_email - comma: ',' - column_reference: naked_identifier: new_email - comma: ',' - column_reference: naked_identifier: user_action - comma: ',' - column_reference: naked_identifier: created_at - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: old - dot: . - naked_identifier: id - comma: ',' - expression: column_reference: - naked_identifier: new - dot: . - naked_identifier: id - comma: ',' - expression: column_reference: - naked_identifier: old - dot: . - naked_identifier: phone - comma: ',' - expression: column_reference: - naked_identifier: new - dot: . - naked_identifier: phone - comma: ',' - expression: column_reference: - naked_identifier: old - dot: . - naked_identifier: email - comma: ',' - expression: column_reference: - naked_identifier: new - dot: . - naked_identifier: email - comma: ',' - expression: quoted_literal: "'UPDATE'" - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; - statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: aft_insert - keyword: AFTER - keyword: INSERT - keyword: 'ON' - table_reference: naked_identifier: emp_details - keyword: BEGIN - insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: emp_log - bracketed: - start_bracket: ( - column_reference: naked_identifier: emp_id - comma: ',' - column_reference: naked_identifier: salary - comma: ',' - column_reference: naked_identifier: edittime - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: NEW - dot: . - naked_identifier: employee_id - comma: ',' - expression: column_reference: - naked_identifier: NEW - dot: . - naked_identifier: salary - comma: ',' - expression: bare_function: current_date - end_bracket: ) - statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/drop_trigger.sql000066400000000000000000000000461451700765000245010ustar00rootroot00000000000000DROP TRIGGER IF EXISTS MyTestTrigger; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/drop_trigger.yml000066400000000000000000000011271451700765000245040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 56de27b71563494c78c91fd576e749086a7271b6cefb3cb331727e560eaf8b23 file: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - keyword: IF - keyword: EXISTS - trigger_reference: naked_identifier: MyTestTrigger statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/insert.sql000066400000000000000000000011721451700765000233170ustar00rootroot00000000000000INSERT INTO t1 VALUES (1, 2, 3), (4, 5, 6); INSERT INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR ABORT INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR FAIL INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR IGNORE INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR REPLACE INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); REPLACE INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT OR ROLLBACK INTO t1 (a, b, c) VALUES (1, 2, 3), (4, 5, 6); INSERT INTO t1 SELECT * FROM (SELECT c, c + d AS e FROM t2) AS dt; INSERT INTO t1 DEFAULT VALUES; INSERT INTO t1 (a, b, c) DEFAULT VALUES; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/insert.yml000066400000000000000000000254631451700765000233320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5f028ef635cbb9c766ad3b82ae3dffb38d47f1bb204455cd04172af9cfd4d0cc file: - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: ABORT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: FAIL - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: IGNORE - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: REPLACE - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: REPLACE - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: OR - keyword: ROLLBACK - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - values_clause: - keyword: VALUES - bracketed: - start_bracket: ( - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - comma: ',' - bracketed: - start_bracket: ( - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '6' - end_bracket: ) - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: c - binary_operator: + - column_reference: naked_identifier: d alias_expression: keyword: AS naked_identifier: e from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t2 end_bracket: ) alias_expression: keyword: AS naked_identifier: dt - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: t1 - bracketed: - start_bracket: ( - column_reference: naked_identifier: a - comma: ',' - column_reference: naked_identifier: b - comma: ',' - column_reference: naked_identifier: c - end_bracket: ) - keyword: DEFAULT - keyword: VALUES - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/pragma.sql000066400000000000000000000010171451700765000232600ustar00rootroot00000000000000PRAGMA analysis_limit = 7; PRAGMA schema.application_id; PRAGMA schema.auto_vacuum = INCREMENTAL; PRAGMA automatic_index = TRUE; PRAGMA schema.cache_size = -500; PRAGMA collation_list; PRAGMA data_store_directory = 'directory-name'; PRAGMA encoding = 'UTF-16be'; PRAGMA schema.foreign_key_check('table-name'); PRAGMA schema.journal_mode = WAL; PRAGMA schema.locking_mode = NORMAL; PRAGMA schema.secure_delete = FAST; PRAGMA schema.synchronous = 0; PRAGMA temp_store = DEFAULT; PRAGMA schema.wal_checkpoint(FULL); sqlfluff-2.3.5/test/fixtures/dialects/sqlite/pragma.yml000066400000000000000000000102671451700765000232710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8e46290d567297bf2f869781593433e524a9205419e5b20bf5bd978b665737f2 file: - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: analysis_limit comparison_operator: raw_comparison_operator: '=' numeric_literal: '7' - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: application_id - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: auto_vacuum - comparison_operator: raw_comparison_operator: '=' - keyword: INCREMENTAL - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: automatic_index comparison_operator: raw_comparison_operator: '=' boolean_literal: 'TRUE' - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: cache_size comparison_operator: raw_comparison_operator: '=' numeric_literal: sign_indicator: '-' numeric_literal: '500' - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: collation_list - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: data_store_directory comparison_operator: raw_comparison_operator: '=' quoted_literal: "'directory-name'" - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: naked_identifier: encoding comparison_operator: raw_comparison_operator: '=' quoted_literal: "'UTF-16be'" - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: foreign_key_check bracketed: start_bracket: ( quoted_literal: "'table-name'" end_bracket: ) - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: journal_mode - comparison_operator: raw_comparison_operator: '=' - keyword: WAL - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: locking_mode - comparison_operator: raw_comparison_operator: '=' - keyword: NORMAL - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: secure_delete - comparison_operator: raw_comparison_operator: '=' - keyword: FAST - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: synchronous comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' - statement_terminator: ; - statement: pragma_statement: - keyword: PRAGMA - pragma_reference: naked_identifier: temp_store - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: pragma_statement: keyword: PRAGMA pragma_reference: - naked_identifier: schema - dot: . - naked_identifier: wal_checkpoint bracketed: start_bracket: ( keyword: FULL end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/select.sql000066400000000000000000000013051451700765000232700ustar00rootroot00000000000000SELECT a FROM foo LIMIT 10; SELECT survey_time , AVG(light) AS trips FROM survey GROUP BY survey_time; WITH time_cte AS ( SELECT branch, created_at, time, cast(time - LAG (time, 1, time) OVER (ORDER BY time) as real) AS time_spent FROM heartbeats h WHERE user_id = 1 AND created_at >= DATE('now', 'start of day') ORDER BY id LIMIT 1 OFFSET 1 ) SELECT branch as name, cast(time_spent as real) as time_spent, cast(time_spent / (SELECT SUM(time_spent) FROM time_cte) as real) as time_percentage FROM ( SELECT branch, cast(SUM(time_spent) as real) AS time_spent FROM time_cte GROUP BY branch ORDER BY time_spent DESC ) ; sqlfluff-2.3.5/test/fixtures/dialects/sqlite/select.yml000066400000000000000000000250251451700765000232770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c27d3470decd42b40110e9420bda1cbf242293766cad615753cfb40875a1c45f file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: a from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: foo limit_clause: keyword: LIMIT numeric_literal: '10' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: survey_time - comma: ',' - select_clause_element: function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: light end_bracket: ) alias_expression: keyword: AS naked_identifier: trips from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: survey groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: survey_time - statement_terminator: ; - statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: time_cte keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch - comma: ',' - select_clause_element: column_reference: naked_identifier: created_at - comma: ',' - select_clause_element: column_reference: naked_identifier: time - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: time binary_operator: '-' function: function_name: function_name_identifier: LAG bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: time - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: column_reference: naked_identifier: time - end_bracket: ) - expression: function: function_name: function_name_identifier: OVER bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: time end_bracket: ) - keyword: as - data_type: data_type_identifier: real - end_bracket: ) alias_expression: keyword: AS naked_identifier: time_spent from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: heartbeats alias_expression: naked_identifier: h where_clause: keyword: WHERE expression: - column_reference: naked_identifier: user_id - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: created_at - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - function: function_name: function_name_identifier: DATE bracketed: - start_bracket: ( - expression: quoted_literal: "'now'" - comma: ',' - expression: quoted_literal: "'start of day'" - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: id limit_clause: - keyword: LIMIT - numeric_literal: '1' - keyword: OFFSET - numeric_literal: '1' end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch alias_expression: keyword: as naked_identifier: name - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: time_spent keyword: as data_type: data_type_identifier: real end_bracket: ) alias_expression: keyword: as naked_identifier: time_spent - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: time_spent binary_operator: / bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: time_spent end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: time_cte end_bracket: ) keyword: as data_type: data_type_identifier: real end_bracket: ) alias_expression: keyword: as naked_identifier: time_percentage from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: branch - comma: ',' - select_clause_element: function: function_name: function_name_identifier: cast bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: time_spent end_bracket: ) keyword: as data_type: data_type_identifier: real end_bracket: ) alias_expression: keyword: AS naked_identifier: time_spent from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: time_cte groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: branch orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: time_spent - keyword: DESC end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/000077500000000000000000000000001451700765000215555ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/teradata/.sqlfluff000066400000000000000000000000361451700765000233770ustar00rootroot00000000000000[sqlfluff] dialect = teradata sqlfluff-2.3.5/test/fixtures/dialects/teradata/bteq_stmt.sql000066400000000000000000000000401451700765000242720ustar00rootroot00000000000000.if errorcode > 0 then .quit 4; sqlfluff-2.3.5/test/fixtures/dialects/teradata/bteq_stmt.yml000066400000000000000000000014501451700765000243020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5c9454897c6fb0804ca61cd756db50740fc13aabf09a5232ea602068cc2fa713 file: statement: bteq_statement: - dot: . - bteq_key_word_segment: keyword: if - bteq_key_word_segment: keyword: errorcode - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' - bteq_key_word_segment: keyword: then - bteq_key_word_segment: dot: . keyword: quit numeric_literal: '4' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/collect_stats.sql000066400000000000000000000011311451700765000251350ustar00rootroot00000000000000COLLECT STATISTICS COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1; COLLECT STATISTICS INDEX ( COD_TARJETA, COD_EST, IND_TIPO_TARJETA, FEC_ANIO_MES ) ON DB_1.TABLE_1; COLLECT STATISTICS COLUMN o_orderstatus ON orders; COLLECT STATISTICS USING SYSTEM THRESHOLD FOR CURRENT COLUMN (o_orderstatus, o_orderkey) ON orders; COLLECT STATS COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1; COLLECT STAT COLUMN ( IND_TIPO_TARJETA ) ON DB_1.TABLE_1; COLLECT STATS COLUMN IND_TIPO_TARJETA ON DB_1.TABLE_1; COLLECT STATS INDEX ( COD_TARJETA, COD_EST, IND_TIPO_TARJETA, FEC_ANIO_MES ) ON DB_1.TABLE_1; sqlfluff-2.3.5/test/fixtures/dialects/teradata/collect_stats.yml000066400000000000000000000100071451700765000251410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 79c36181d2695cfe097a7f516eb4f7505073d6886f5b0f7e37b98b45374a717b file: - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: COLUMN - bracketed: start_bracket: ( column_reference: naked_identifier: IND_TIPO_TARJETA end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: naked_identifier: COD_TARJETA - comma: ',' - column_reference: naked_identifier: COD_EST - comma: ',' - column_reference: naked_identifier: IND_TIPO_TARJETA - comma: ',' - column_reference: naked_identifier: FEC_ANIO_MES - end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: COLUMN - column_reference: naked_identifier: o_orderstatus - keyword: 'ON' - table_reference: naked_identifier: orders - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATISTICS - keyword: USING - collect_stat_using_option_clause: - keyword: SYSTEM - keyword: THRESHOLD - keyword: FOR - keyword: CURRENT - keyword: COLUMN - bracketed: - start_bracket: ( - column_reference: naked_identifier: o_orderstatus - comma: ',' - column_reference: naked_identifier: o_orderkey - end_bracket: ) - keyword: 'ON' - table_reference: naked_identifier: orders - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATS - keyword: COLUMN - bracketed: start_bracket: ( column_reference: naked_identifier: IND_TIPO_TARJETA end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STAT - keyword: COLUMN - bracketed: start_bracket: ( column_reference: naked_identifier: IND_TIPO_TARJETA end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATS - keyword: COLUMN - column_reference: naked_identifier: IND_TIPO_TARJETA - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; - statement: collect_statistics_statement: - keyword: COLLECT - keyword: STATS - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: naked_identifier: COD_TARJETA - comma: ',' - column_reference: naked_identifier: COD_EST - comma: ',' - column_reference: naked_identifier: IND_TIPO_TARJETA - comma: ',' - column_reference: naked_identifier: FEC_ANIO_MES - end_bracket: ) - keyword: 'ON' - table_reference: - naked_identifier: DB_1 - dot: . - naked_identifier: TABLE_1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/collect_stats_2.sql000066400000000000000000000005701451700765000253640ustar00rootroot00000000000000collect statistics column (Org_Unit_Code, Org_Unit_Type, Entity_Code) as Org_Descendant_NUPI, column (Org_Unit_Type), column (Entity_Code), column (Org_Unit_Code, Entity_Code), column (Entity_Code, Parent_Org_Unit_Code, Parent_Org_Unit_Type), column (Org_Unit_Code), column (Parent_Org_Unit_Code, Parent_Org_Unit_Type, Parent_Entity_Code) on sandbox_db.Org_Descendant; sqlfluff-2.3.5/test/fixtures/dialects/teradata/collect_stats_2.yml000066400000000000000000000050011451700765000253600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4addc1b374fb1f0cc38c57b6cf399fc8b72cf7d1403b7d5b2cbf510d7e4cf0c4 file: statement: collect_statistics_statement: - keyword: collect - keyword: statistics - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Org_Unit_Type - comma: ',' - column_reference: naked_identifier: Entity_Code - end_bracket: ) - keyword: as - object_reference: naked_identifier: Org_Descendant_NUPI - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Org_Unit_Type end_bracket: ) - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Entity_Code end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Entity_Code - end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Entity_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Type - end_bracket: ) - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Org_Unit_Code end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Parent_Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Type - comma: ',' - column_reference: naked_identifier: Parent_Entity_Code - end_bracket: ) - keyword: 'on' - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/comment_on_column_stmt.sql000066400000000000000000000001321451700765000270540ustar00rootroot00000000000000comment on column sandbox_db.Org_Descendant.Org_Unit_Code is 'Organisational unit code'; sqlfluff-2.3.5/test/fixtures/dialects/teradata/comment_on_column_stmt.yml000066400000000000000000000013671451700765000270710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ff1d0a5b3b8ac35886f6d7cdd3d98c2af949c207b8263ce999116a720d65b86 file: statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Org_Unit_Code - keyword: is - quoted_literal: "'Organisational unit code'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/comment_on_table_stmt.sql000066400000000000000000000001331451700765000266470ustar00rootroot00000000000000comment on table sandbox_db.Org_Descendant is 'View with all Org_Unit_Ids on all levels'; sqlfluff-2.3.5/test/fixtures/dialects/teradata/comment_on_table_stmt.yml000066400000000000000000000013161451700765000266550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6d6e22405c4f26a72f1a4ba75356cb6bd3ab0f8ece2336ba8ac2dfad7b4782e8 file: statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: table - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - keyword: is - quoted_literal: "'View with all Org_Unit_Ids on all levels'" statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/comparison_operators.sql000066400000000000000000000010221451700765000265410ustar00rootroot00000000000000SELECT * FROM MY_TABLE WHERE A >= B; SELECT * FROM MY_TABLE WHERE A GE B; SELECT * FROM MY_TABLE WHERE A <= B; SELECT * FROM MY_TABLE WHERE A LE B; SELECT * FROM MY_TABLE WHERE A = B; SELECT * FROM MY_TABLE WHERE A EQ B; SELECT * FROM MY_TABLE WHERE A <> B; SELECT * FROM MY_TABLE WHERE A ^= B; SELECT * FROM MY_TABLE WHERE A NOT= B; SELECT * FROM MY_TABLE WHERE A NE B; SELECT * FROM MY_TABLE WHERE A GT B; SELECT * FROM MY_TABLE WHERE A > B; SELECT * FROM MY_TABLE WHERE A LT B; SELECT * FROM MY_TABLE WHERE A < B; sqlfluff-2.3.5/test/fixtures/dialects/teradata/comparison_operators.yml000066400000000000000000000233111451700765000265500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fe3b44345923a0b3030d15821fb486005acb9e39850430f6d05fd6af81a7679 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: GE - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: LE - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: EQ - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: binary_operator: ^ raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: keyword: NOT raw_comparison_operator: '=' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: NE - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: GT - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: raw_comparison_operator: '>' - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: LT - column_reference: naked_identifier: B - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: A - comparison_operator: raw_comparison_operator: < - column_reference: naked_identifier: B - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table.sql000066400000000000000000000030171451700765000247110ustar00rootroot00000000000000create table sandbox_db.Org_Descendant ( Org_Unit_Code char(6) character set unicode not null, Org_Unit_Type char(3) character set unicode not null, Entity_Code varchar(10) uppercase not null, Parent_Org_Unit_Code char(6) character set unicode not null, Parent_Org_Unit_Type char(3) character set unicode not null, Parent_Entity_Code varchar(10) uppercase not null ) primary index Org_Descendant_NUPI (Org_Unit_Code, Org_Unit_Type, Entity_Code) ; collect statistics column (Org_Unit_Code, Org_Unit_Type, Entity_Code) as Org_Descendant_NUPI, column (Org_Unit_Type), column (Entity_Code), column (Org_Unit_Code, Entity_Code), column (Entity_Code, Parent_Org_Unit_Code, Parent_Org_Unit_Type), column (Org_Unit_Code), column (Parent_Org_Unit_Code, Parent_Org_Unit_Type, Parent_Entity_Code) on sandbox_db.Org_Descendant; comment on table sandbox_db.Org_Descendant is 'View with all Org_Unit_Ids on all levels'; comment on column sandbox_db.Org_Descendant.Org_Unit_Code is 'Organisational unit code'; comment on column sandbox_db.Org_Descendant.Org_Unit_Type is 'The type of organization such as branch, region, team, call center'; comment on column sandbox_db.Org_Descendant.Entity_Code is 'Owning entity code'; comment on column sandbox_db.Org_Descendant.Parent_Org_Unit_Code is 'Organisational unit code'; comment on column sandbox_db.Org_Descendant.Parent_Org_Unit_Type is 'The type of organization such as branch, region, team, call center'; comment on column sandbox_db.Org_Descendant.Parent_Entity_Code is 'Owning entity code parent'; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table.yml000066400000000000000000000216501451700765000247160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3badd344b1e26c3368e9102fa001ff4302206b72fb4594f4dda4a6611a08fd32 file: - statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: Org_Unit_Code data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Org_Unit_Type data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Entity_Code data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) td_column_attribute_constraint: keyword: uppercase column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Org_Unit_Code data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Org_Unit_Type data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Entity_Code data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) td_column_attribute_constraint: keyword: uppercase column_constraint_segment: - keyword: not - keyword: 'null' - end_bracket: ) - td_table_constraint: - keyword: primary - keyword: index - object_reference: naked_identifier: Org_Descendant_NUPI - bracketed: - start_bracket: ( - naked_identifier: Org_Unit_Code - comma: ',' - naked_identifier: Org_Unit_Type - comma: ',' - naked_identifier: Entity_Code - end_bracket: ) - statement_terminator: ; - statement: collect_statistics_statement: - keyword: collect - keyword: statistics - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Org_Unit_Type - comma: ',' - column_reference: naked_identifier: Entity_Code - end_bracket: ) - keyword: as - object_reference: naked_identifier: Org_Descendant_NUPI - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Org_Unit_Type end_bracket: ) - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Entity_Code end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Entity_Code - end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Entity_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Type - end_bracket: ) - comma: ',' - keyword: column - bracketed: start_bracket: ( column_reference: naked_identifier: Org_Unit_Code end_bracket: ) - comma: ',' - keyword: column - bracketed: - start_bracket: ( - column_reference: naked_identifier: Parent_Org_Unit_Code - comma: ',' - column_reference: naked_identifier: Parent_Org_Unit_Type - comma: ',' - column_reference: naked_identifier: Parent_Entity_Code - end_bracket: ) - keyword: 'on' - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: table - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - keyword: is - quoted_literal: "'View with all Org_Unit_Ids on all levels'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Org_Unit_Code - keyword: is - quoted_literal: "'Organisational unit code'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Org_Unit_Type - keyword: is - quoted_literal: "'The type of organization such as branch, region, team, call\ \ center'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Entity_Code - keyword: is - quoted_literal: "'Owning entity code'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Parent_Org_Unit_Code - keyword: is - quoted_literal: "'Organisational unit code'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Parent_Org_Unit_Type - keyword: is - quoted_literal: "'The type of organization such as branch, region, team, call\ \ center'" - statement_terminator: ; - statement: comment_clause: - keyword: comment - keyword: 'on' - keyword: column - column_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - dot: . - naked_identifier: Parent_Entity_Code - keyword: is - quoted_literal: "'Owning entity code parent'" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_stmt.sql000066400000000000000000000003511451700765000257560ustar00rootroot00000000000000-- Testing of the specific create table begin options CREATE MULTISET TABLE CONSUMOS, NO FALLBACK, NO BEFORE JOURNAL, NO AFTER JOURNAL, CHECKSUM = DEFAULT, DEFAULT MERGEBLOCKRATIO ( FIELD1 CHAR(9) ) PRIMARY INDEX( FIELD1 ); sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_stmt.yml000066400000000000000000000031521451700765000257620ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 21981d268a2697bb67da8fb4a6c9b4aa9ef7d9545a7b04259adea592591d3f92 file: statement: create_table_statement: - keyword: CREATE - keyword: MULTISET - keyword: TABLE - table_reference: naked_identifier: CONSUMOS - create_table_options_statement: - comma: ',' - keyword: 'NO' - keyword: FALLBACK - comma: ',' - keyword: 'NO' - keyword: BEFORE - keyword: JOURNAL - comma: ',' - keyword: 'NO' - keyword: AFTER - keyword: JOURNAL - comma: ',' - keyword: CHECKSUM - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - comma: ',' - keyword: DEFAULT - keyword: MERGEBLOCKRATIO - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: FIELD1 data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '9' end_bracket: ) end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: start_bracket: ( naked_identifier: FIELD1 end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_stmt_2.sql000066400000000000000000000007331451700765000262030ustar00rootroot00000000000000-- Testing of the specific column options CREATE MULTISET TABLE TABLE_2 ( CHAR_FIELD CHAR(19) CHARACTER SET LATIN NOT CASESPECIFIC NOT NULL, DATE_FIELD DATE FORMAT 'YYYY-MM-DD' NOT NULL, BYTE_FIELD BYTEINT COMPRESS 0, DECIMAL_FIELD DECIMAL(15, 2) COMPRESS (50.00, 45.50, 40.00, 30.00, 27.80, 27.05, 20.00, 17.87, 17.56, 17.41, 17.26, 17.11, 16.96, 16.82, 16.68), TIMESTAMP_FIELD TIMESTAMP(6) NOT NULL ) PRIMARY INDEX( CHAR_FIELD, DATE_FIELD, BYTE_FIELD );sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_stmt_2.yml000066400000000000000000000100601451700765000261770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8b9d1e361194f1e8fa9e08200de67e0ca3b3b581d305f7577b5c9038fe0ca3ca file: statement: create_table_statement: - keyword: CREATE - keyword: MULTISET - keyword: TABLE - table_reference: naked_identifier: TABLE_2 - bracketed: - start_bracket: ( - column_definition: - column_reference: naked_identifier: CHAR_FIELD - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '19' end_bracket: ) - td_column_attribute_constraint: - keyword: CHARACTER - keyword: SET - naked_identifier: LATIN - td_column_attribute_constraint: - keyword: NOT - keyword: CASESPECIFIC - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: column_reference: naked_identifier: DATE_FIELD data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'YYYY-MM-DD'" column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: column_reference: naked_identifier: BYTE_FIELD data_type: data_type_identifier: BYTEINT td_column_attribute_constraint: keyword: COMPRESS numeric_literal: '0' - comma: ',' - column_definition: column_reference: naked_identifier: DECIMAL_FIELD data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - numeric_literal: '15' - comma: ',' - numeric_literal: '2' - end_bracket: ) td_column_attribute_constraint: keyword: COMPRESS bracketed: - start_bracket: ( - numeric_literal: '50.00' - comma: ',' - numeric_literal: '45.50' - comma: ',' - numeric_literal: '40.00' - comma: ',' - numeric_literal: '30.00' - comma: ',' - numeric_literal: '27.80' - comma: ',' - numeric_literal: '27.05' - comma: ',' - numeric_literal: '20.00' - comma: ',' - numeric_literal: '17.87' - comma: ',' - numeric_literal: '17.56' - comma: ',' - numeric_literal: '17.41' - comma: ',' - numeric_literal: '17.26' - comma: ',' - numeric_literal: '17.11' - comma: ',' - numeric_literal: '16.96' - comma: ',' - numeric_literal: '16.82' - comma: ',' - numeric_literal: '16.68' - end_bracket: ) - comma: ',' - column_definition: column_reference: naked_identifier: TIMESTAMP_FIELD data_type: data_type_identifier: TIMESTAMP bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: - start_bracket: ( - naked_identifier: CHAR_FIELD - comma: ',' - naked_identifier: DATE_FIELD - comma: ',' - naked_identifier: BYTE_FIELD - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_stmt_3.sql000066400000000000000000000007621451700765000262060ustar00rootroot00000000000000-- Testing of the specific create table end options CREATE MULTISET TABLE NUM_LTR_DESVINCULADOS_ADH ( DES_EVENTO VARCHAR(255) CHARACTER SET LATIN NOT CASESPECIFIC COMPRESS ('Cambio de bandera', 'Cierre'), IND_CONTINUA BYTEINT COMPRESS ) PRIMARY INDEX( COD_TARJETA, COD_EST, FEC_CIERRE_EST, IND_TIPO_TARJETA ) PARTITION BY RANGE_N (FEC_OPERACION BETWEEN DATE '2007-01-01' AND DATE '2022-01-01' EACH INTERVAL '1' MONTH, NO RANGE OR UNKNOWN) INDEX HOPR_TRN_TRAV_SIN_MP_I ( IND_TIPO_TARJETA );sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_stmt_3.yml000066400000000000000000000057201451700765000262070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 480b3302006a1e87338abb180621b4ed58094ce63919f94f281261fd2a2496ff file: statement: create_table_statement: - keyword: CREATE - keyword: MULTISET - keyword: TABLE - table_reference: naked_identifier: NUM_LTR_DESVINCULADOS_ADH - bracketed: - start_bracket: ( - column_definition: - column_reference: naked_identifier: DES_EVENTO - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '255' end_bracket: ) - td_column_attribute_constraint: - keyword: CHARACTER - keyword: SET - naked_identifier: LATIN - td_column_attribute_constraint: - keyword: NOT - keyword: CASESPECIFIC - td_column_attribute_constraint: keyword: COMPRESS bracketed: - start_bracket: ( - quoted_literal: "'Cambio de bandera'" - comma: ',' - quoted_literal: "'Cierre'" - end_bracket: ) - comma: ',' - column_definition: column_reference: naked_identifier: IND_CONTINUA data_type: data_type_identifier: BYTEINT td_column_attribute_constraint: keyword: COMPRESS - end_bracket: ) - td_table_constraint: - keyword: PRIMARY - keyword: INDEX - bracketed: - start_bracket: ( - naked_identifier: COD_TARJETA - comma: ',' - naked_identifier: COD_EST - comma: ',' - naked_identifier: FEC_CIERRE_EST - comma: ',' - naked_identifier: IND_TIPO_TARJETA - end_bracket: ) - keyword: PARTITION - keyword: BY - td_partitioning_level: function_name: function_name_identifier: RANGE_N bracketed: - start_bracket: ( - word: FEC_OPERACION - word: BETWEEN - word: DATE - single_quote: "'2007-01-01'" - word: AND - word: DATE - single_quote: "'2022-01-01'" - word: EACH - word: INTERVAL - single_quote: "'1'" - word: MONTH - comma: ',' - word: 'NO' - word: RANGE - word: OR - word: UNKNOWN - end_bracket: ) - keyword: INDEX - object_reference: naked_identifier: HOPR_TRN_TRAV_SIN_MP_I - bracketed: start_bracket: ( column_reference: naked_identifier: IND_TIPO_TARJETA end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_stmt_4.sql000066400000000000000000000007061451700765000262050ustar00rootroot00000000000000create table sandbox_db.Org_Descendant ( Org_Unit_Code char(6) character set unicode not null, Org_Unit_Type char(3) character set unicode not null, Entity_Code varchar(10) uppercase not null, Parent_Org_Unit_Code char(6) character set unicode not null, Parent_Org_Unit_Type char(3) character set unicode not null, Parent_Entity_Code varchar(10) uppercase not null ) primary index Org_Descendant_NUPI (Org_Unit_Code, Org_Unit_Type, Entity_Code) ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_stmt_4.yml000066400000000000000000000103511451700765000262040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bf62ad54c88397641a65ceae62c072d661544f2c0993695e44ea00582b7713ac file: statement: create_table_statement: - keyword: create - keyword: table - table_reference: - naked_identifier: sandbox_db - dot: . - naked_identifier: Org_Descendant - bracketed: - start_bracket: ( - column_definition: column_reference: naked_identifier: Org_Unit_Code data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Org_Unit_Type data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Entity_Code data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) td_column_attribute_constraint: keyword: uppercase column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Org_Unit_Code data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '6' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Org_Unit_Type data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '3' end_bracket: ) td_column_attribute_constraint: - keyword: character - keyword: set - naked_identifier: unicode column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: column_reference: naked_identifier: Parent_Entity_Code data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) td_column_attribute_constraint: keyword: uppercase column_constraint_segment: - keyword: not - keyword: 'null' - end_bracket: ) - td_table_constraint: - keyword: primary - keyword: index - object_reference: naked_identifier: Org_Descendant_NUPI - bracketed: - start_bracket: ( - naked_identifier: Org_Unit_Code - comma: ',' - naked_identifier: Org_Unit_Type - comma: ',' - naked_identifier: Entity_Code - end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_with_data.sql000066400000000000000000000010331451700765000267310ustar00rootroot00000000000000CREATE VOLATILE TABLE a AS (SELECT 'A' AS B) WITH DATA ON COMMIT PRESERVE ROWS; CREATE VOLATILE TABLE b AS (SELECT 'A' AS B) WITH DATA ON COMMIT DELETE ROWS; CREATE VOLATILE TABLE c AS (SELECT 'A' AS B) WITH NO DATA; CREATE VOLATILE TABLE e AS (SELECT 'A' AS B) WITH NO DATA AND STATS; CREATE VOLATILE TABLE f AS (SELECT 'A' AS B) WITH NO DATA AND NO STATS; CREATE VOLATILE TABLE g AS (SELECT 'A' AS B) WITH NO DATA AND STATISTICS; CREATE VOLATILE TABLE h AS (SELECT 'A' AS B) WITH NO DATA AND NO STATISTICS ON COMMIT PRESERVE ROWS; sqlfluff-2.3.5/test/fixtures/dialects/teradata/create_table_with_data.yml000066400000000000000000000117651451700765000267500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4d792b0bbbda0f7cb7064bdc3967ad78ae41b51e7ef3585d7c61d1f5507afa28 file: - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: a - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: DATA - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: b - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: DATA - keyword: 'ON' - keyword: COMMIT - keyword: DELETE - keyword: ROWS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: c - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: e - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - keyword: AND - keyword: STATS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: f - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - keyword: AND - keyword: 'NO' - keyword: STATS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: g - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - keyword: AND - keyword: STATISTICS - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: VOLATILE - keyword: TABLE - table_reference: naked_identifier: h - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'A'" alias_expression: keyword: AS naked_identifier: B end_bracket: ) - td_table_constraint: - keyword: WITH - keyword: 'NO' - keyword: DATA - keyword: AND - keyword: 'NO' - keyword: STATISTICS - keyword: 'ON' - keyword: COMMIT - keyword: PRESERVE - keyword: ROWS - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/database.sql000066400000000000000000000000241451700765000240360ustar00rootroot00000000000000DATABASE database1; sqlfluff-2.3.5/test/fixtures/dialects/teradata/database.yml000066400000000000000000000010371451700765000240450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7cac8d54c918136598faf203938b44e330a57396dfae6efc0d65fa18f733c571 file: statement: database_statement: keyword: DATABASE database_reference: naked_identifier: database1 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/delete_stmt.sql000066400000000000000000000004541451700765000246120ustar00rootroot00000000000000DELETE FROM MY_TABLE WHERE 1=1 ; DELETE FROM MY_TABLE WHERE MY_COL > 10 ; DELETE FROM MY_TABLE WHERE ID IN (SELECT ID FROM ANOTHER_TABLE) AND ID <> 5 ; DEL FROM MY_TABLE WHERE 1=1 ; DEL FROM MY_TABLE WHERE MY_COL > 10 ; DEL FROM MY_TABLE WHERE ID IN (SELECT ID FROM ANOTHER_TABLE) AND ID <> 5 ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/delete_stmt.yml000066400000000000000000000113371451700765000246160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ee1c9bd2be40a1b3b1133e4b1c0e9a569b519ffe26d5ba228c80511b0bfb214a file: - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: column_reference: naked_identifier: MY_COL comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - statement_terminator: ; - statement: delete_statement: keyword: DELETE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ID - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ANOTHER_TABLE end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: ID - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '5' - statement_terminator: ; - statement: delete_statement: keyword: DEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - statement_terminator: ; - statement: delete_statement: keyword: DEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: column_reference: naked_identifier: MY_COL comparison_operator: raw_comparison_operator: '>' numeric_literal: '10' - statement_terminator: ; - statement: delete_statement: keyword: DEL from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ID - keyword: IN - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: ANOTHER_TABLE end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: ID - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '5' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/qualify_expression.sql000066400000000000000000000000451451700765000262260ustar00rootroot00000000000000SELECT id FROM mytable qualify x = 1;sqlfluff-2.3.5/test/fixtures/dialects/teradata/qualify_expression.yml000066400000000000000000000020021451700765000262230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7f386f9b109021b13e353f320b4608a2943275e84ffa68b54818c8a72afdfa49 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable qualify_clause: keyword: qualify expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/qualify_expression_union.sql000066400000000000000000000001251451700765000274350ustar00rootroot00000000000000SELECT id FROM mytable qualify x = 1 UNION ALL SELECT id FROM mytable qualify x = 1; sqlfluff-2.3.5/test/fixtures/dialects/teradata/qualify_expression_union.yml000066400000000000000000000033731451700765000274470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7efd41312f2179e83d194313ba3e11714b6bee9f358317573b142b06ba9cbe6f file: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable qualify_clause: keyword: qualify expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable qualify_clause: keyword: qualify expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/qualify_over.sql000066400000000000000000000001031451700765000247750ustar00rootroot00000000000000SELECT id FROM mytable qualify count(*) over (partition by id) > 1;sqlfluff-2.3.5/test/fixtures/dialects/teradata/qualify_over.yml000066400000000000000000000030441451700765000250060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6d7d0043b6a42db85d21b924d95054b845001892db8d0fbb84fa45e681d36206 file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: id from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable qualify_clause: keyword: qualify expression: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - expression: column_reference: naked_identifier: id end_bracket: ) comparison_operator: raw_comparison_operator: '>' numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_date.sql000066400000000000000000000003341451700765000245520ustar00rootroot00000000000000SELECT DATE; CREATE TABLE t1 (f1 DATE); SELECT DATE (FORMAT 'MMMbdd,bYYYY'); -- (CHAR(12), UC); -- https://docs.teradata.com/r/S0Fw2AVH8ff3MDA0wDOHlQ/ryoeKJsEr22NqKahaktP5g -- Disabled CHAR(12, UC) for now, see #1665 sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_date.yml000066400000000000000000000025651451700765000245640ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9306659b0e9dee104de5abc043616c7d8c3592182967fedf3a6e32feead7af66 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: bare_function: DATE - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: t1 - bracketed: start_bracket: ( column_definition: column_reference: naked_identifier: f1 data_type: data_type_identifier: DATE end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATE bracketed: start_bracket: ( expression: data_type: data_type_identifier: FORMAT quoted_literal: "'MMMbdd,bYYYY'" end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_stmt.sql000066400000000000000000000003521451700765000246240ustar00rootroot00000000000000SELECT ADD_MONTHS(abandono.FEC_CIERRE_EST, -12) AS FEC_CIERRE_EST_ULT12, CAST('200010' AS DATE FORMAT 'YYYYMM') AS CAST_STATEMENT_EXAMPLE FROM EXAMPLE_TABLE; SEL * FROM CUSTOMERS; SELECT * FROM CUSTOMERS; SEL 1; SELECT 1; sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_stmt.yml000066400000000000000000000060701451700765000246310ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f1b89b3679dd8d827bcbf97affb0b08d1bbf6371a0ef9707b8a7eb4801e0582e file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ADD_MONTHS bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: abandono - dot: . - naked_identifier: FEC_CIERRE_EST - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '12' - end_bracket: ) alias_expression: keyword: AS naked_identifier: FEC_CIERRE_EST_ULT12 - comma: ',' - select_clause_element: function: function_name: function_name_identifier: CAST bracketed: start_bracket: ( expression: quoted_literal: "'200010'" keyword: AS data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'YYYYMM'" end_bracket: ) alias_expression: keyword: AS naked_identifier: CAST_STATEMENT_EXAMPLE from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EXAMPLE_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SEL select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: CUSTOMERS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: CUSTOMERS - statement_terminator: ; - statement: select_statement: select_clause: keyword: SEL select_clause_element: numeric_literal: '1' - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_stmt_cast.sql000066400000000000000000000001651451700765000256400ustar00rootroot00000000000000SELECT '9999-12-31' (DATE), '9999-12-31' (DATE FORMAT 'YYYY-MM-DD'), '100000' (SMALLINT) from test_table;sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_stmt_cast.yml000066400000000000000000000032261451700765000256430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 46bd603de20857e0fdd71b4d28d127c5cb5e37e04369abc557ca3c8f4dc1be1b file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: quoted_literal: "'9999-12-31'" cast_expression: bracketed: start_bracket: ( data_type: data_type_identifier: DATE end_bracket: ) - comma: ',' - select_clause_element: expression: quoted_literal: "'9999-12-31'" cast_expression: bracketed: start_bracket: ( data_type: data_type_identifier: DATE keyword: FORMAT quoted_literal: "'YYYY-MM-DD'" end_bracket: ) - comma: ',' - select_clause_element: expression: quoted_literal: "'100000'" cast_expression: bracketed: start_bracket: ( data_type: data_type_identifier: SMALLINT end_bracket: ) from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: test_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_stmt_normalize.sql000066400000000000000000000007261451700765000267110ustar00rootroot00000000000000select normalize on meets or overlaps id ,period(vld_fm, vld_to) as vld_prd from mydb.mytable where id = 12345; SELECT NORMALIZE ON MEETS OR OVERLAPS emp_id, duration FROM project; SELECT NORMALIZE project_name, duration FROM project; SELECT NORMALIZE project_name, dept_id, duration FROM project; SELECT NORMALIZE ON OVERLAPS project_name, dept_id, duration FROM project; SELECT NORMALIZE ON OVERLAPS OR MEETS project_name, dept_id, duration FROM project; sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_stmt_normalize.yml000066400000000000000000000123331451700765000267100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e3ba8a2583bd3448e0e87373ae5338ec94005ee9f3273e6cd0b8bc083b273bd9 file: - statement: select_statement: select_clause: - keyword: select - select_clause_modifier: - keyword: normalize - keyword: 'on' - keyword: meets - keyword: or - keyword: overlaps - select_clause_element: column_reference: naked_identifier: id - comma: ',' - select_clause_element: function: function_name: function_name_identifier: period bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: vld_fm - comma: ',' - expression: column_reference: naked_identifier: vld_to - end_bracket: ) alias_expression: keyword: as naked_identifier: vld_prd from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: mydb - dot: . - naked_identifier: mytable where_clause: keyword: where expression: column_reference: naked_identifier: id comparison_operator: raw_comparison_operator: '=' numeric_literal: '12345' - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: NORMALIZE - keyword: 'ON' - keyword: MEETS - keyword: OR - keyword: OVERLAPS - select_clause_element: column_reference: naked_identifier: emp_id - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: NORMALIZE - select_clause_element: column_reference: naked_identifier: project_name - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: NORMALIZE - select_clause_element: column_reference: naked_identifier: project_name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept_id - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: NORMALIZE - keyword: 'ON' - keyword: OVERLAPS - select_clause_element: column_reference: naked_identifier: project_name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept_id - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: NORMALIZE - keyword: 'ON' - keyword: OVERLAPS - keyword: OR - keyword: MEETS - select_clause_element: column_reference: naked_identifier: project_name - comma: ',' - select_clause_element: column_reference: naked_identifier: dept_id - comma: ',' - select_clause_element: column_reference: naked_identifier: duration from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: project - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_top.sql000066400000000000000000000004611451700765000244400ustar00rootroot00000000000000SELECT TOP 100 * FROM MY_TABLE; SELECT * FROM MY_TABLE; SELECT TOP 100 COL_A, COL_B FROM MY_TABLE; SELECT DISTINCT * FROM MY_TABLE; SELECT TOP 10 PERCENT * FROM MY_TABLE; SELECT TOP 0.1 PERCENT COL_A FROM MY_TABLE; SELECT TOP 0.1 PERCENT WITH TIES COL_A, COL_B FROM MY_TABLE ORDER BY COL_B; sqlfluff-2.3.5/test/fixtures/dialects/teradata/select_top.yml000066400000000000000000000105211451700765000244400ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 115f038980ab8782a6a505e2371ca12f1916731277a75af335b7c61439e9327b file: - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '100' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP expression: numeric_literal: '100' - select_clause_element: column_reference: naked_identifier: COL_A - comma: ',' - select_clause_element: column_reference: naked_identifier: COL_B from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: DISTINCT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: TOP - expression: numeric_literal: '10' - keyword: PERCENT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_modifier: - keyword: TOP - expression: numeric_literal: '0.1' - keyword: PERCENT select_clause_element: column_reference: naked_identifier: COL_A from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: TOP - expression: numeric_literal: '0.1' - keyword: PERCENT - keyword: WITH - keyword: TIES - select_clause_element: column_reference: naked_identifier: COL_A - comma: ',' - select_clause_element: column_reference: naked_identifier: COL_B from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: MY_TABLE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: COL_B - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/set_query_band.sql000066400000000000000000000005751451700765000253110ustar00rootroot00000000000000SET QUERY_BAND = 'cat=siamese;dog=akita;' UPDATE FOR SESSION VOLATILE; SET QUERY_BAND = 'area=west;city=sandiego;tree=maple;flower=rose;' FOR SESSION; SET QUERY_BAND = 'city=san diego;' UPDATE FOR SESSION; SET QUERY_BAND='PROXYUSER=fred;' FOR TRANSACTION; SET QUERY_BAND = NONE FOR TRANSACTION; SET QUERY_BAND=NONE FOR TRANSACTION; SET QUERY_BAND = '' FOR TRANSACTION; sqlfluff-2.3.5/test/fixtures/dialects/teradata/set_query_band.yml000066400000000000000000000043641451700765000253130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 06bd5bdc0a85a0086810f6436263bb2bd81376f46011bc3e08d4228aafd1baca file: - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'cat=siamese;dog=akita;'" - keyword: UPDATE - keyword: FOR - keyword: SESSION - keyword: VOLATILE - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'area=west;city=sandiego;tree=maple;flower=rose;'" - keyword: FOR - keyword: SESSION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'city=san diego;'" - keyword: UPDATE - keyword: FOR - keyword: SESSION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'PROXYUSER=fred;'" - keyword: FOR - keyword: TRANSACTION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - keyword: FOR - keyword: TRANSACTION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - keyword: FOR - keyword: TRANSACTION - statement_terminator: ; - statement: set_query_band_statement: - keyword: SET - keyword: QUERY_BAND - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - keyword: FOR - keyword: TRANSACTION - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/set_session.sql000066400000000000000000000000671451700765000246370ustar00rootroot00000000000000SET SESSION DATABASE database1; SS DATABASE database1; sqlfluff-2.3.5/test/fixtures/dialects/teradata/set_session.yml000066400000000000000000000014661451700765000246450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 41c693de7ef7b9f05e2ff7ad60eb113c8afc7f8281ab1109d15d44949b99f446 file: - statement: set_session_statement: - keyword: SET - keyword: SESSION - database_statement: keyword: DATABASE database_reference: naked_identifier: database1 - statement_terminator: ; - statement: set_session_statement: keyword: SS database_statement: keyword: DATABASE database_reference: naked_identifier: database1 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/teradata/update_from.sql000066400000000000000000000003701451700765000246030ustar00rootroot00000000000000UPDATE table_name FROM ( SELECT a, b, c, d FROM t_b INNER JOIN t_c ON t_b.d = t_c.d WHERE b = 'F' -- AND SUBSTR(c, 1, 1) = 'T' ) AS t_d SET column1 = value1, column2 = 'value2' WHERE a=1; sqlfluff-2.3.5/test/fixtures/dialects/teradata/update_from.yml000066400000000000000000000070571451700765000246160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b3ac8a3be0125b5387bea1d87047d3420798b0ec34f7f1229ed24bdfa9b8e2bf file: statement: update_statement: keyword: UPDATE table_reference: naked_identifier: table_name from_in_update_clause: keyword: FROM from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c - comma: ',' - select_clause_element: column_reference: naked_identifier: d from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_b join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: t_c - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: t_b - dot: . - naked_identifier: d - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: t_c - dot: . - naked_identifier: d where_clause: keyword: WHERE expression: column_reference: naked_identifier: b comparison_operator: raw_comparison_operator: '=' quoted_literal: "'F'" end_bracket: ) alias_expression: keyword: AS naked_identifier: t_d set_clause_list: - keyword: SET - set_clause: - column_reference: naked_identifier: column1 - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: value1 - comma: ',' - set_clause: column_reference: naked_identifier: column2 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value2'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: a comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/trino/000077500000000000000000000000001451700765000211235ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/trino/.sqlfluff000066400000000000000000000000331451700765000227420ustar00rootroot00000000000000[sqlfluff] dialect = trino sqlfluff-2.3.5/test/fixtures/dialects/trino/bare_functions.sql000066400000000000000000000002041451700765000246410ustar00rootroot00000000000000SELECT current_date AS col1, current_timestamp AS col2, current_time as col3, localtime as col4, localtimestamp as col5 ; sqlfluff-2.3.5/test/fixtures/dialects/trino/bare_functions.yml000066400000000000000000000025201451700765000246460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 210d2e71599eaff34740b685cb392e0267bbd3ab09cf0c772df8d8388a79c123 file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: bare_function: current_date alias_expression: keyword: AS naked_identifier: col1 - comma: ',' - select_clause_element: bare_function: current_timestamp alias_expression: keyword: AS naked_identifier: col2 - comma: ',' - select_clause_element: bare_function: current_time alias_expression: keyword: as naked_identifier: col3 - comma: ',' - select_clause_element: bare_function: localtime alias_expression: keyword: as naked_identifier: col4 - comma: ',' - select_clause_element: bare_function: localtimestamp alias_expression: keyword: as naked_identifier: col5 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/trino/range_offset.sql000066400000000000000000000012061451700765000243050ustar00rootroot00000000000000-- https://trino.io/blog/2021/03/10/introducing-new-window-features.html SELECT student_id, result, count(*) OVER ( ORDER BY result RANGE BETWEEN 1 FOLLOWING AND 2 FOLLOWING) AS close_better_scores_count FROM students_results; SELECT avg(totalprice) OVER ( PARTITION BY custkey ORDER BY orderdate RANGE BETWEEN interval '1' DAY PRECEDING AND interval '1' DAY FOLLOWING) FROM orders; SELECT avg(totalprice) OVER w, sum(totalprice) OVER w, max(totalprice) OVER w FROM orders WINDOW w AS ( PARTITION BY custkey ORDER BY orderdate RANGE BETWEEN interval '1' month PRECEDING AND CURRENT ROW) sqlfluff-2.3.5/test/fixtures/dialects/trino/range_offset.yml000066400000000000000000000140731451700765000243150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 51b32d87af6e763f334ee4dda38980afe9ef256589c80dae87f4f12086e5b419 file: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: student_id - comma: ',' - select_clause_element: column_reference: naked_identifier: result - comma: ',' - select_clause_element: function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: result frame_clause: - keyword: RANGE - keyword: BETWEEN - numeric_literal: '1' - keyword: FOLLOWING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) alias_expression: keyword: AS naked_identifier: close_better_scores_count from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: students_results - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: custkey orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: orderdate frame_clause: - keyword: RANGE - keyword: BETWEEN - interval_expression: - keyword: interval - quoted_literal: "'1'" - keyword: DAY - keyword: PRECEDING - keyword: AND - interval_expression: - keyword: interval - quoted_literal: "'1'" - keyword: DAY - keyword: FOLLOWING end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: avg bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER naked_identifier: w - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER naked_identifier: w - comma: ',' - select_clause_element: function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: totalprice end_bracket: ) over_clause: keyword: OVER naked_identifier: w from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: orders named_window: keyword: WINDOW named_window_expression: naked_identifier: w keyword: AS bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: column_reference: naked_identifier: custkey orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: orderdate frame_clause: - keyword: RANGE - keyword: BETWEEN - interval_expression: - keyword: interval - quoted_literal: "'1'" - keyword: month - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/trino/select_interval.sql000066400000000000000000000004111451700765000250230ustar00rootroot00000000000000SELECT CURRENT_DATE + INTERVAL '1' YEAR; ; SELECT CURRENT_DATE - INTERVAL '14' MONTH; ; SELECT CURRENT_DATE + INTERVAL '32' DAY; ; SELECT CURRENT_TIME - INTERVAL '13' HOUR; ; SELECT CURRENT_TIME + INTERVAL '64' MINUTE; ; SELECT CURRENT_TIME - INTERVAL '61' SECOND; sqlfluff-2.3.5/test/fixtures/dialects/trino/select_interval.yml000066400000000000000000000052051451700765000250330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 0016e90b5291b5fadb87daca36c1553feb56c725e4e4c21af8b31b77ba0e9786 file: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_DATE binary_operator: + interval_expression: - keyword: INTERVAL - quoted_literal: "'1'" - keyword: YEAR - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_DATE binary_operator: '-' interval_expression: - keyword: INTERVAL - quoted_literal: "'14'" - keyword: MONTH - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_DATE binary_operator: + interval_expression: - keyword: INTERVAL - quoted_literal: "'32'" - keyword: DAY - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_TIME binary_operator: '-' interval_expression: - keyword: INTERVAL - quoted_literal: "'13'" - keyword: HOUR - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_TIME binary_operator: + interval_expression: - keyword: INTERVAL - quoted_literal: "'64'" - keyword: MINUTE - statement_terminator: ; - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: bare_function: CURRENT_TIME binary_operator: '-' interval_expression: - keyword: INTERVAL - quoted_literal: "'61'" - keyword: SECOND - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/trino/values.sql000066400000000000000000000002221451700765000231370ustar00rootroot00000000000000VALUES 42, 13; VALUES 1, 2, 3; VALUES 5, 2, 4, 1, 3; VALUES (1, 'a'), (2, 'b'), (3, 'c'); VALUES (26, 'POLAND', 3, 'no comment'); sqlfluff-2.3.5/test/fixtures/dialects/trino/values.yml000066400000000000000000000042671451700765000231560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd8cda2e1389d55bbf7ffe6f5e62961bdc20ebe0ebe1d630f6b708fe383a802f file: - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '42' - comma: ',' - expression: numeric_literal: '13' - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '3' - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: numeric_literal: '5' - comma: ',' - expression: numeric_literal: '2' - comma: ',' - expression: numeric_literal: '4' - comma: ',' - expression: numeric_literal: '1' - comma: ',' - expression: numeric_literal: '3' - statement_terminator: ; - statement: values_clause: - keyword: VALUES - expression: bracketed: start_bracket: ( numeric_literal: '1' comma: ',' quoted_literal: "'a'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '2' comma: ',' quoted_literal: "'b'" end_bracket: ) - comma: ',' - expression: bracketed: start_bracket: ( numeric_literal: '3' comma: ',' quoted_literal: "'c'" end_bracket: ) - statement_terminator: ; - statement: values_clause: keyword: VALUES expression: bracketed: - start_bracket: ( - numeric_literal: '26' - comma: ',' - quoted_literal: "'POLAND'" - comma: ',' - numeric_literal: '3' - comma: ',' - quoted_literal: "'no comment'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/000077500000000000000000000000001451700765000207535ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/dialects/tsql/.sqlfluff000066400000000000000000000000321451700765000225710ustar00rootroot00000000000000[sqlfluff] dialect = tsql sqlfluff-2.3.5/test/fixtures/dialects/tsql/add_index.sql000066400000000000000000000040411451700765000234120ustar00rootroot00000000000000IF NOT EXISTS(SELECT * FROM sys.indexes WHERE NAME='IX_INTER_VIMR_INFECTIOUS_PEOPLE') CREATE NONCLUSTERED INDEX IX_INTER_VIMR_INFECTIOUS_PEOPLE ON dbo.VIMR_INFECTIOUS_PEOPLE(DATE_LAST_INSERTED); GO IF NOT EXISTS(SELECT * FROM sys.indexes WHERE NAME='IX_INTER_FOUNDATION_NICE_IC_INTAKE_COUNT') CREATE NONCLUSTERED INDEX IX_INTER_FOUNDATION_NICE_IC_INTAKE_COUNT ON dbo.FOUNDATION_NICE_IC_INTAKE_COUNT(DATE_LAST_INSERTED); GO IF EXISTS(SELECT * FROM sys.indexes WHERE NAME='IX_INTER_VIMR_REPRODUCTION_NUMBER') CREATE CLUSTERED INDEX IX_INTER_VIMR_REPRODUCTION_NUMBER ON dbo.VIMR_INFECTIOUS_PEOPLE(DATE_LAST_INSERTED); GO CREATE NONCLUSTERED INDEX [ind_1] ON [schema1].[table1]([column1] ASC) INCLUDE([column2]) WHERE ([column3] IS NULL); GO CREATE NONCLUSTERED INDEX [NI_name] ON [schema1].[table1]([column1] DESC) ON [PRIMARY]; GO CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1], [column2]) WITH FILLFACTOR = 80; GO CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1]) WITH (PAD_INDEX = OFF, SORT_IN_TEMPDB = ON); CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1]) WITH (ONLINE = ON); GO CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1]) WITH (ONLINE = ON (WAIT_AT_LOW_PRIORITY ( MAX_DURATION = 5 MINUTES, ABORT_AFTER_WAIT = NONE ) ) ); GO CREATE UNIQUE INDEX [ix_name] ON [schema1].[table1]([column1]) WITH DATA_COMPRESSION = ROW ON PARTITIONS (2, 4, 6 TO 8) GO CREATE STATISTICS Stats_Population ON [Reporting].[Population] ([ID],[Facility],[Population]) GO UPDATE STATISTICS Reporting.Population Stats_Population; GO UPDATE STATISTICS Reporting.Population (Stats_Facility, Stats_Population); GO UPDATE STATISTICS Reporting.Population (Stats_Facility, Stats_Population) WITH FULLSCAN; GO UPDATE STATISTICS Reporting.Population (Stats_Facility, Stats_Population) WITH RESAMPLE; GO DROP STATISTICS Reporting.Population.Stats_Population GO DROP INDEX IX_INTER_VIMR_REPRODUCTION_NUMBER ON dbo.VIMR_INFECTIOUS_PEOPLE; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/add_index.yml000066400000000000000000000405451451700765000234250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 32467a3df7496155a7a70394a04634d094a226fc21bbbc56ee5756e664acf72b file: - batch: statement: if_then_statement: if_clause: keyword: IF expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: indexes where_clause: keyword: WHERE expression: column_reference: naked_identifier: NAME comparison_operator: raw_comparison_operator: '=' quoted_literal: "'IX_INTER_VIMR_INFECTIOUS_PEOPLE'" end_bracket: ) statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: naked_identifier: IX_INTER_VIMR_INFECTIOUS_PEOPLE - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: VIMR_INFECTIOUS_PEOPLE - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: DATE_LAST_INSERTED end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: if_then_statement: if_clause: keyword: IF expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: indexes where_clause: keyword: WHERE expression: column_reference: naked_identifier: NAME comparison_operator: raw_comparison_operator: '=' quoted_literal: "'IX_INTER_FOUNDATION_NICE_IC_INTAKE_COUNT'" end_bracket: ) statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: naked_identifier: IX_INTER_FOUNDATION_NICE_IC_INTAKE_COUNT - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: FOUNDATION_NICE_IC_INTAKE_COUNT - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: DATE_LAST_INSERTED end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: if_then_statement: if_clause: keyword: IF expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: indexes where_clause: keyword: WHERE expression: column_reference: naked_identifier: NAME comparison_operator: raw_comparison_operator: '=' quoted_literal: "'IX_INTER_VIMR_REPRODUCTION_NUMBER'" end_bracket: ) statement: create_index_statement: - keyword: CREATE - keyword: CLUSTERED - keyword: INDEX - index_reference: naked_identifier: IX_INTER_VIMR_REPRODUCTION_NUMBER - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: VIMR_INFECTIOUS_PEOPLE - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: DATE_LAST_INSERTED end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: quoted_identifier: '[ind_1]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' keyword: ASC end_bracket: ) - keyword: INCLUDE - bracketed: start_bracket: ( column_reference: quoted_identifier: '[column2]' end_bracket: ) - where_clause: keyword: WHERE bracketed: start_bracket: ( expression: - column_reference: quoted_identifier: '[column3]' - keyword: IS - keyword: 'NULL' end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: quoted_identifier: '[NI_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' keyword: DESC end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: quoted_identifier: '[column1]' - comma: ',' - index_column_definition: quoted_identifier: '[column2]' - end_bracket: ) - relational_index_options: - keyword: WITH - keyword: FILLFACTOR - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '80' - statement_terminator: ; - go_statement: keyword: GO - batch: - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' end_bracket: ) - relational_index_options: keyword: WITH bracketed: - start_bracket: ( - keyword: PAD_INDEX - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - comma: ',' - keyword: SORT_IN_TEMPDB - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' end_bracket: ) - relational_index_options: keyword: WITH bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' end_bracket: ) - relational_index_options: keyword: WITH bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: WAIT_AT_LOW_PRIORITY bracketed: - start_bracket: ( - max_duration: - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - keyword: MINUTES - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: UNIQUE - keyword: INDEX - index_reference: quoted_identifier: '[ix_name]' - keyword: 'ON' - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[column1]' end_bracket: ) - relational_index_options: - keyword: WITH - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: ROW - on_partitions_clause: - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '6' - keyword: TO - numeric_literal: '8' - end_bracket: ) - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: STATISTICS - index_reference: naked_identifier: Stats_Population - keyword: 'ON' - table_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[Population]' - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: quoted_identifier: '[ID]' - comma: ',' - index_column_definition: quoted_identifier: '[Facility]' - comma: ',' - index_column_definition: quoted_identifier: '[Population]' - end_bracket: ) - go_statement: keyword: GO - batch: statement: update_statistics_statement: - keyword: UPDATE - keyword: STATISTICS - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - naked_identifier: Stats_Population - statement_terminator: ; - go_statement: keyword: GO - batch: statement: update_statistics_statement: - keyword: UPDATE - keyword: STATISTICS - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - bracketed: - start_bracket: ( - naked_identifier: Stats_Facility - comma: ',' - naked_identifier: Stats_Population - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: update_statistics_statement: - keyword: UPDATE - keyword: STATISTICS - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - bracketed: - start_bracket: ( - naked_identifier: Stats_Facility - comma: ',' - naked_identifier: Stats_Population - end_bracket: ) - keyword: WITH - keyword: FULLSCAN statement_terminator: ; - go_statement: keyword: GO - batch: statement: update_statistics_statement: - keyword: UPDATE - keyword: STATISTICS - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - bracketed: - start_bracket: ( - naked_identifier: Stats_Facility - comma: ',' - naked_identifier: Stats_Population - end_bracket: ) - keyword: WITH - keyword: RESAMPLE statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_statement: - keyword: DROP - keyword: STATISTICS - index_reference: - naked_identifier: Reporting - dot: . - naked_identifier: Population - dot: . - naked_identifier: Stats_Population - go_statement: keyword: GO - batch: statement: drop_index_statement: - keyword: DROP - keyword: INDEX - index_reference: naked_identifier: IX_INTER_VIMR_REPRODUCTION_NUMBER - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: VIMR_INFECTIOUS_PEOPLE - statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/alter_and_drop.sql000066400000000000000000000001771451700765000244560ustar00rootroot00000000000000ALTER TABLE [REPORTING].[UN_NEW] SWITCH to [REPORTING].[UN_BASE] WITH (TRUNCATE_TARGET = ON); DROP TABLE [REPORTING].[UN_NEW]; sqlfluff-2.3.5/test/fixtures/dialects/tsql/alter_and_drop.yml000066400000000000000000000024221451700765000244530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 26ed370cdb2c40b13b0b5fd84c79cb0a9e30e6cc3e65047db4edd2f483509a39 file: batch: - statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: - quoted_identifier: '[REPORTING]' - dot: . - quoted_identifier: '[UN_NEW]' - keyword: SWITCH - keyword: to - object_reference: - quoted_identifier: '[REPORTING]' - dot: . - quoted_identifier: '[UN_BASE]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: TRUNCATE_TARGET - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[REPORTING]' - dot: . - quoted_identifier: '[UN_NEW]' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/alter_index.sql000066400000000000000000000054111451700765000237730ustar00rootroot00000000000000ALTER INDEX index1 ON table1 REBUILD; ALTER INDEX ALL ON table1 REBUILD; ALTER INDEX idxcci_cci_target ON cci_target REORGANIZE WITH (COMPRESS_ALL_ROW_GROUPS = ON); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE; ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE PARTITION = 0; ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE WITH (COMPRESS_ALL_ROW_GROUPS = ON); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE PARTITION = 0 WITH (COMPRESS_ALL_ROW_GROUPS = ON); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 REORGANIZE; ALTER INDEX cci_fact3ON ON fact3 REBUILD PARTITION = 12; ALTER INDEX cci_SimpleTable ON SimpleTable REBUILD WITH (DATA_COMPRESSION = COLUMNSTORE_ARCHIVE); ALTER INDEX cci_SimpleTable ON SimpleTable REBUILD WITH (DATA_COMPRESSION = COLUMNSTORE); ALTER INDEX PK_ProductPhoto_ProductPhotoID ON Production.ProductPhoto REORGANIZE WITH (LOB_COMPACTION = ON); ALTER INDEX IX_Employee_ManagerID ON HumanResources.Employee DISABLE; ALTER INDEX IX_INDEX1 ON T1 REBUILD WITH (XML_COMPRESSION = ON); ALTER INDEX ALL ON Production.Product REBUILD WITH (FILLFACTOR = 80, SORT_IN_TEMPDB = ON, STATISTICS_NORECOMPUTE = ON); ALTER INDEX test_idx on test_table REBUILD WITH (ONLINE = ON, MAXDOP = 1, RESUMABLE = ON); ALTER INDEX test_idx on test_table PAUSE; ALTER INDEX test_idx on test_table ABORT; ALTER INDEX test_idx on test_table REBUILD WITH (XML_COMPRESSION = ON ON PARTITIONS (2, 4, 6 TO 8)); ALTER INDEX test_idx on test_table REBUILD WITH (DATA_COMPRESSION = PAGE ON PARTITIONS (3, 5)); ALTER INDEX test_idx on test_table REBUILD WITH (DATA_COMPRESSION = NONE ON PARTITIONS (1)); ALTER INDEX IX_TransactionHistory_TransactionDate ON Production.TransactionHistory REBUILD Partition = 5 WITH (ONLINE = ON (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF))); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (ALLOW_ROW_LOCKS = ON, ALLOW_PAGE_LOCKS = OFF); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (OPTIMIZE_FOR_SEQUENTIAL_KEY = ON, IGNORE_DUP_KEY = OFF, STATISTICS_NORECOMPUTE = ON); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (COMPRESSION_DELAY = 0); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 SET (COMPRESSION_DELAY = 100 minutes); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME; ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (MAXDOP = 100, MAX_DURATION = 500 minutes, WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF)); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (MAX_DURATION = 500); ALTER INDEX cci_FactInternetSales2 ON FactInternetSales2 RESUME WITH (WAIT_AT_LOW_PRIORITY (MAX_DURATION = 10, ABORT_AFTER_WAIT = SELF)); sqlfluff-2.3.5/test/fixtures/dialects/tsql/alter_index.yml000066400000000000000000000427431451700765000240060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ae559c6095a99da7a6eaa94388edbca341d6980ecac15a47f886246db5ce0e8e file: batch: - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: index1 - keyword: 'ON' - table_reference: naked_identifier: table1 - keyword: REBUILD - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - keyword: ALL - keyword: 'ON' - table_reference: naked_identifier: table1 - keyword: REBUILD - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: idxcci_cci_target - keyword: 'ON' - table_reference: naked_identifier: cci_target - keyword: REORGANIZE - keyword: WITH - bracketed: - start_bracket: ( - keyword: COMPRESS_ALL_ROW_GROUPS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - keyword: PARTITION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - keyword: WITH - bracketed: - start_bracket: ( - keyword: COMPRESS_ALL_ROW_GROUPS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - keyword: PARTITION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - keyword: WITH - bracketed: - start_bracket: ( - keyword: COMPRESS_ALL_ROW_GROUPS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: REORGANIZE - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_fact3ON - keyword: 'ON' - table_reference: naked_identifier: fact3 - keyword: REBUILD - keyword: PARTITION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '12' - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_SimpleTable - keyword: 'ON' - table_reference: naked_identifier: SimpleTable - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNSTORE_ARCHIVE - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_SimpleTable - keyword: 'ON' - table_reference: naked_identifier: SimpleTable - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: COLUMNSTORE - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: PK_ProductPhoto_ProductPhotoID - keyword: 'ON' - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductPhoto - keyword: REORGANIZE - keyword: WITH - bracketed: - start_bracket: ( - keyword: LOB_COMPACTION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: IX_Employee_ManagerID - keyword: 'ON' - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee - keyword: DISABLE - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: IX_INDEX1 - keyword: 'ON' - table_reference: naked_identifier: T1 - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - keyword: ALL - keyword: 'ON' - table_reference: - naked_identifier: Production - dot: . - naked_identifier: Product - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILLFACTOR - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '80' - comma: ',' - keyword: SORT_IN_TEMPDB - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: STATISTICS_NORECOMPUTE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: MAXDOP - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - comma: ',' - keyword: RESUMABLE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: PAUSE - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: ABORT - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '2' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '6' - keyword: TO - numeric_literal: '8' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: PAGE - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '5' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: test_idx - keyword: 'on' - table_reference: naked_identifier: test_table - keyword: REBUILD - keyword: WITH - bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: NONE - keyword: 'ON' - keyword: PARTITIONS - bracketed: start_bracket: ( numeric_literal: '1' end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: IX_TransactionHistory_TransactionDate - keyword: 'ON' - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistory - keyword: REBUILD - keyword: Partition - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '5' - keyword: WITH - bracketed: - start_bracket: ( - keyword: ONLINE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: WAIT_AT_LOW_PRIORITY bracketed: - start_bracket: ( - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: SELF - end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: SET - bracketed: - start_bracket: ( - keyword: ALLOW_ROW_LOCKS - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: ALLOW_PAGE_LOCKS - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: SET - bracketed: - start_bracket: ( - keyword: OPTIMIZE_FOR_SEQUENTIAL_KEY - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: IGNORE_DUP_KEY - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - comma: ',' - keyword: STATISTICS_NORECOMPUTE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: SET - bracketed: start_bracket: ( keyword: COMPRESSION_DELAY comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: SET - bracketed: - start_bracket: ( - keyword: COMPRESSION_DELAY - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - keyword: minutes - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: RESUME - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: RESUME - keyword: WITH - bracketed: - start_bracket: ( - keyword: MAXDOP - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '100' - comma: ',' - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '500' - keyword: minutes - comma: ',' - keyword: WAIT_AT_LOW_PRIORITY - bracketed: - start_bracket: ( - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: SELF - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: RESUME - keyword: WITH - bracketed: start_bracket: ( keyword: MAX_DURATION comparison_operator: raw_comparison_operator: '=' numeric_literal: '500' end_bracket: ) - statement_terminator: ; - statement: alter_index_statement: - keyword: ALTER - keyword: INDEX - object_reference: naked_identifier: cci_FactInternetSales2 - keyword: 'ON' - table_reference: naked_identifier: FactInternetSales2 - keyword: RESUME - keyword: WITH - bracketed: start_bracket: ( keyword: WAIT_AT_LOW_PRIORITY bracketed: - start_bracket: ( - keyword: MAX_DURATION - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: ABORT_AFTER_WAIT - comparison_operator: raw_comparison_operator: '=' - keyword: SELF - end_bracket: ) end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/alter_table.sql000066400000000000000000000056451451700765000237640ustar00rootroot00000000000000CREATE TABLE dbo.doc_exa (column_a INT) ; GO ALTER TABLE dbo.doc_exa ADD column_b VARCHAR(20) NULL ; GO CREATE TABLE dbo.doc_exc (column_a INT) ; GO ALTER TABLE dbo.doc_exc ADD column_b VARCHAR(20) NULL CONSTRAINT exb_unique UNIQUE, DROP COLUMN column_a, DROP COLUMN IF EXISTS column_c ; GO EXEC sp_help doc_exc ; GO DROP TABLE dbo.doc_exc ; GO CREATE TABLE dbo.doc_exz (column_a INT, column_b INT) ; GO INSERT INTO dbo.doc_exz (column_a) VALUES (7) ; GO ALTER TABLE dbo.doc_exz ADD CONSTRAINT col_b_def DEFAULT 50 FOR column_b ; GO INSERT INTO dbo.doc_exz (column_a) VALUES (10) ; GO SELECT * FROM dbo.doc_exz ; GO DROP TABLE dbo.doc_exz ; GO ALTER TABLE Production.TransactionHistoryArchive ADD CONSTRAINT PK_TransactionHistoryArchive_TransactionID PRIMARY KEY CLUSTERED (TransactionID) GO ALTER TABLE Production.TransactionHistoryArchive ALTER COLUMN rec_number VARCHAR(36) GO ALTER TABLE Production.TransactionHistoryArchive DROP CONSTRAINT PK_TransactionHistoryArchive_TransactionID ALTER TABLE [Production].[ProductCostHistory] WITH CHECK ADD CONSTRAINT [FK_ProductCostHistory_Product_ProductID] FOREIGN KEY([ProductID]) REFERENCES [Production].[Product] ([ProductID]) GO ALTER TABLE [Production].[ProductCostHistory] CHECK CONSTRAINT [FK_ProductCostHistory_Product_ProductID] GO ALTER TABLE my_table ADD my_col_1 INT , my_col_2 INT GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = ON); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory )); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory, DATA_CONSISTENCY_CHECK = ON )); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory, DATA_CONSISTENCY_CHECK = ON, HISTORY_RETENTION_PERIOD = INFINITE )); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory, DATA_CONSISTENCY_CHECK = ON, HISTORY_RETENTION_PERIOD = 1 YEAR )); GO ALTER TABLE TestTable SET (SYSTEM_VERSIONING = OFF ( HISTORY_TABLE = TestTableHistory, DATA_CONSISTENCY_CHECK = ON, HISTORY_RETENTION_PERIOD = 7 MONTHS )); GO ALTER TABLE TestTable SET (FILESTREAM_ON = "NULL"); GO ALTER TABLE TestTable SET (FILESTREAM_ON = "default"); GO ALTER TABLE TestTable SET (FILESTREAM_ON = PartitionSchemeName); GO ALTER TABLE TestTable SET (DATA_DELETION = ON); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName)); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 1 YEAR)); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = INFINITE)); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 7 YEARS)); GO ALTER TABLE TestTable SET (DATA_DELETION = OFF(FILTER_COLUMN = ColumnName, RETENTION_PERIOD = 7 DAYS)); GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/alter_table.yml000066400000000000000000000537251451700765000237700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bc7ad7040e8454374ad40070f2b85fdb9ddb06ffcabe49cd8111153db4f39862 file: - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exa - bracketed: start_bracket: ( column_definition: naked_identifier: column_a data_type: data_type_identifier: INT end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exa - keyword: ADD - column_definition: naked_identifier: column_b data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '20' end_bracket: ) column_constraint_segment: keyword: 'NULL' statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exc - bracketed: start_bracket: ( column_definition: naked_identifier: column_a data_type: data_type_identifier: INT end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exc - keyword: ADD - column_definition: - naked_identifier: column_b - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '20' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: exb_unique - keyword: UNIQUE - comma: ',' - keyword: DROP - keyword: COLUMN - column_reference: naked_identifier: column_a - comma: ',' - keyword: DROP - keyword: COLUMN - keyword: IF - keyword: EXISTS - column_reference: naked_identifier: column_c statement_terminator: ; - go_statement: keyword: GO - batch: statement: execute_script_statement: keyword: EXEC object_reference: naked_identifier: sp_help naked_identifier: doc_exc statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exc - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - bracketed: - start_bracket: ( - column_definition: naked_identifier: column_a data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: column_b data_type: data_type_identifier: INT - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - bracketed: start_bracket: ( column_reference: naked_identifier: column_a end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '7' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - keyword: ADD - column_constraint_segment: - keyword: CONSTRAINT - object_reference: naked_identifier: col_b_def - keyword: DEFAULT - numeric_literal: '50' - keyword: FOR - column_reference: naked_identifier: column_b statement_terminator: ; - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - bracketed: start_bracket: ( column_reference: naked_identifier: column_a end_bracket: ) - values_clause: keyword: VALUES bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: doc_exz - statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistoryArchive - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: naked_identifier: PK_TransactionHistoryArchive_TransactionID - keyword: PRIMARY - keyword: KEY - keyword: CLUSTERED - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: TransactionID end_bracket: ) - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistoryArchive - keyword: ALTER - keyword: COLUMN - column_definition: naked_identifier: rec_number data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '36' end_bracket: ) - go_statement: keyword: GO - batch: - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: TransactionHistoryArchive - keyword: DROP - keyword: CONSTRAINT - object_reference: naked_identifier: PK_TransactionHistoryArchive_TransactionID - statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '[Production]' - dot: . - quoted_identifier: '[ProductCostHistory]' - keyword: WITH - keyword: CHECK - keyword: ADD - table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '[FK_ProductCostHistory_Product_ProductID]' - keyword: FOREIGN - keyword: KEY - bracketed: start_bracket: ( column_reference: quoted_identifier: '[ProductID]' end_bracket: ) - references_constraint_grammar: keyword: REFERENCES table_reference: - quoted_identifier: '[Production]' - dot: . - quoted_identifier: '[Product]' bracketed: start_bracket: ( column_reference: quoted_identifier: '[ProductID]' end_bracket: ) - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: - quoted_identifier: '[Production]' - dot: . - quoted_identifier: '[ProductCostHistory]' - keyword: CHECK - keyword: CONSTRAINT - object_reference: quoted_identifier: '[FK_ProductCostHistory_Product_ProductID]' - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: my_table - keyword: ADD - column_definition: naked_identifier: my_col_1 data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: my_col_2 data_type: data_type_identifier: INT - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: naked_identifier: TestTableHistory end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: naked_identifier: TestTableHistory - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: naked_identifier: TestTableHistory - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - date_part: INFINITE - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: naked_identifier: TestTableHistory - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - date_part: YEAR - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: naked_identifier: TestTableHistory - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '7' - date_part: MONTHS - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: start_bracket: ( keyword: FILESTREAM_ON comparison_operator: raw_comparison_operator: '=' filegroup_name: quoted_identifier: '"NULL"' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: start_bracket: ( keyword: FILESTREAM_ON comparison_operator: raw_comparison_operator: '=' filegroup_name: quoted_identifier: '"default"' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: start_bracket: ( keyword: FILESTREAM_ON comparison_operator: raw_comparison_operator: '=' filegroup_name: naked_identifier: PartitionSchemeName end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: start_bracket: ( keyword: FILTER_COLUMN comparison_operator: raw_comparison_operator: '=' column_reference: naked_identifier: ColumnName end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnName - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - date_part: YEAR - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnName - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - date_part: INFINITE - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnName - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '7' - date_part: YEARS - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: alter_table_statement: - keyword: ALTER - keyword: TABLE - table_reference: naked_identifier: TestTable - keyword: SET - bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnName - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '7' - date_part: DAYS - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/alter_table_switch.sql000066400000000000000000000002241451700765000253310ustar00rootroot00000000000000--TRUNCATE_TARGET is Azure Synapse Analytics specific ALTER TABLE [Facility].[PL_stage] SWITCH TO [Facility].[PL_BASE] WITH (TRUNCATE_TARGET = ON); sqlfluff-2.3.5/test/fixtures/dialects/tsql/alter_table_switch.yml000066400000000000000000000020351451700765000253350ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a40fabce576738fd3cf6906914c39c675c61a6f105bd4e3e642815c931db1b3e file: batch: statement: alter_table_switch_statement: - keyword: ALTER - keyword: TABLE - object_reference: - quoted_identifier: '[Facility]' - dot: . - quoted_identifier: '[PL_stage]' - keyword: SWITCH - keyword: TO - object_reference: - quoted_identifier: '[Facility]' - dot: . - quoted_identifier: '[PL_BASE]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: TRUNCATE_TARGET - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/begin_end.sql000066400000000000000000000001741451700765000234100ustar00rootroot00000000000000BEGIN SELECT 'Weekend'; END BEGIN; SELECT 'Weekend'; END; BEGIN; SELECT 'Weekend'; END BEGIN SELECT 'Weekend'; END; sqlfluff-2.3.5/test/fixtures/dialects/tsql/begin_end.yml000066400000000000000000000033071451700765000234130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 89e27b9db8605baae4659e1ecea24f550f5326f3cf87fb886482d9fbac8d8f6c file: batch: - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - statement: begin_end_block: - keyword: BEGIN - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/begin_end_nested.sql000066400000000000000000000000531451700765000247460ustar00rootroot00000000000000BEGIN BEGIN SELECT 'Weekend'; END END; sqlfluff-2.3.5/test/fixtures/dialects/tsql/begin_end_nested.yml000066400000000000000000000015341451700765000247550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 39641013f110b42ad9666ff4c19a1588fac779efd1a5302f7d26735c28f3184e file: batch: statement: begin_end_block: - keyword: BEGIN - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - keyword: END - keyword: END statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/begin_end_no_semicolon.sql000066400000000000000000000000251451700765000261470ustar00rootroot00000000000000BEGIN SELECT 1 END sqlfluff-2.3.5/test/fixtures/dialects/tsql/begin_end_no_semicolon.yml000066400000000000000000000012271451700765000261560ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5fb241b3172412eea845ce528dfdcda03e3470d89a9411bacbacd61898db3b8f file: batch: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - keyword: END sqlfluff-2.3.5/test/fixtures/dialects/tsql/bulk_insert.sql000066400000000000000000000003741451700765000240210ustar00rootroot00000000000000-- Plain BULK insert BULK INSERT my_schema.my_table FROM 'data.csv'; -- BULK insert with options BULK INSERT my_schema.my_table FROM 'data.csv' WITH ( BATCHSIZE = 1024, CHECK_CONSTRAINTS, ORDER (col1 ASC, col2 DESC), FORMAT = 'CSV' ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/bulk_insert.yml000066400000000000000000000034711451700765000240240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 052ffa5a983c0b3920c31222e9477343d61d42569770c2ff4e8b70e407065140 file: batch: - statement: bulk_insert_statement: - keyword: BULK - keyword: INSERT - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: FROM - quoted_literal: "'data.csv'" - statement_terminator: ; - statement: bulk_insert_statement: - keyword: BULK - keyword: INSERT - table_reference: - naked_identifier: my_schema - dot: . - naked_identifier: my_table - keyword: FROM - quoted_literal: "'data.csv'" - bulk_insert_with_segment: keyword: WITH bracketed: - start_bracket: ( - keyword: BATCHSIZE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1024' - comma: ',' - keyword: CHECK_CONSTRAINTS - comma: ',' - keyword: ORDER - bracketed: - start_bracket: ( - column_reference: naked_identifier: col1 - keyword: ASC - comma: ',' - column_reference: naked_identifier: col2 - keyword: DESC - end_bracket: ) - comma: ',' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/cast_variable.sql000066400000000000000000000003721451700765000242750ustar00rootroot00000000000000DECLARE @DateNow date = ISNULL(Shared.GetESTDateTime(GETDATE()), GETDATE()) select enc.personid as personid, cast('1900-01-01' as datetime2(7)) as DataRefreshDate from encounter enc; declare @sample nvarchar(max) = cast(100 as nvarchar(max)) sqlfluff-2.3.5/test/fixtures/dialects/tsql/cast_variable.yml000066400000000000000000000075711451700765000243070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 935199c7ac89c3a42afe824da3a00b3541e19ed276476b2ef8cd50e05ab7c023 file: batch: - statement: declare_segment: keyword: DECLARE parameter: '@DateNow' data_type: data_type_identifier: date comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: ISNULL bracketed: - start_bracket: ( - expression: function: function_name: naked_identifier: Shared dot: . function_name_identifier: GetESTDateTime bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: enc - dot: . - naked_identifier: personid alias_expression: keyword: as naked_identifier: personid - comma: ',' - select_clause_element: function: function_name: keyword: cast bracketed: start_bracket: ( expression: quoted_literal: "'1900-01-01'" keyword: as data_type: data_type_identifier: datetime2 bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '7' end_bracket: ) end_bracket: ) alias_expression: keyword: as naked_identifier: DataRefreshDate from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: encounter alias_expression: naked_identifier: enc statement_terminator: ; - statement: declare_segment: keyword: declare parameter: '@sample' data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: function: function_name: keyword: cast bracketed: start_bracket: ( expression: numeric_literal: '100' keyword: as data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/collate.sql000066400000000000000000000007631451700765000231250ustar00rootroot00000000000000-- `COLLATE` in JOIN condition SELECT table1.col FROM table1 INNER JOIN table2 ON table1.col = table2.col COLLATE Latin1_GENERAL_CS_AS; SELECT table1.col FROM table1 INNER JOIN table2 ON table1.col COLLATE Latin1_GENERAL_CS_AS = table2.col; -- `COLLATE` in ORDER BY clause SELECT col FROM my_table ORDER BY col COLLATE Latin1_General_CS_AS_KS_WS DESC; -- `COLLATE` in SELECT SELECT col COLLATE Latin1_General_CS_AS_KS_WS FROM my_table; SELECT col COLLATE database_default FROM my_table; sqlfluff-2.3.5/test/fixtures/dialects/tsql/collate.yml000066400000000000000000000116271451700765000231300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dca7d5478d7b46625258401d664c36b95e94cffc6a4e046d2f72e1dc54ce2b34 file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col - keyword: COLLATE - collation_reference: naked_identifier: Latin1_GENERAL_CS_AS statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - keyword: COLLATE - collation_reference: naked_identifier: Latin1_GENERAL_CS_AS - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table orderby_clause: - keyword: ORDER - keyword: BY - expression: column_reference: naked_identifier: col keyword: COLLATE collation_reference: naked_identifier: Latin1_General_CS_AS_KS_WS - keyword: DESC statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: col keyword: COLLATE collation_reference: naked_identifier: Latin1_General_CS_AS_KS_WS from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: column_reference: naked_identifier: col keyword: COLLATE collation_reference: naked_identifier: database_default from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/comment_blocks.sql000066400000000000000000000007421451700765000244760ustar00rootroot00000000000000/* birds /* live /* in /* nests */ */ */fdsfdlsjf */ /* although /* so /**/ do */ wasps */ SELECT foo /*nest/*/*nest/*/*/* nest nest /* */*/*/*/nest*/*/*/ FROM bar -- A recursive block comment (fun patternwise - no actual recursion going on ofc) /* A block comment looks like /* A block comment looks like /* A block comment looks like /* A block comment looks like /* A block comment looks like /* ... */ */ */ */ */ */ -- Test cases from #2086. /** **/ /** ( **/ /** ' **/ sqlfluff-2.3.5/test/fixtures/dialects/tsql/comment_blocks.yml000066400000000000000000000014441451700765000245000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fcb82f7553b7e5c50a26a1af66d146eed4778441a2332740b9f8388837cc56d9 file: batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar sqlfluff-2.3.5/test/fixtures/dialects/tsql/convert.sql000066400000000000000000000001661451700765000231570ustar00rootroot00000000000000SELECT CONVERT(nvarchar(100), first_column) as first, TRY_CONVERT(float, second_column) as second FROM some_table sqlfluff-2.3.5/test/fixtures/dialects/tsql/convert.yml000066400000000000000000000037171451700765000231660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4473a6434c9f0ce4b91fae0d3d548d74f7374294e65e6d08f3e5d5c60f2bdd62 file: batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: keyword: CONVERT bracketed: start_bracket: ( data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) comma: ',' expression: column_reference: naked_identifier: first_column end_bracket: ) alias_expression: keyword: as naked_identifier: first - comma: ',' - select_clause_element: function: function_name: keyword: TRY_CONVERT bracketed: start_bracket: ( data_type: data_type_identifier: float comma: ',' expression: column_reference: naked_identifier: second_column end_bracket: ) alias_expression: keyword: as naked_identifier: second from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table sqlfluff-2.3.5/test/fixtures/dialects/tsql/copy.sql000066400000000000000000000035071451700765000224530ustar00rootroot00000000000000COPY INTO dbo.[lineitem] FROM 'https://unsecureaccount.blob.core.windows.net/customerdatasets/folder1/lineitem.csv'; COPY INTO test_1 FROM 'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/' WITH ( FILE_TYPE = 'CSV', CREDENTIAL=(IDENTITY= 'Shared Access Signature', SECRET=''), --CREDENTIAL should look something like this: --CREDENTIAL=(IDENTITY= 'Shared Access Signature', SECRET='?sv=2018-03-28&ss=bfqt&srt=sco&sp=rl&st=2016-10-17T20%3A14%3A55Z&se=2021-10-18T20%3A19%3A00Z&sig=IEoOdmeYnE9%2FKiJDSHFSYsz4AkNa%2F%2BTx61FuQ%2FfKHefqoBE%3D'), FIELDQUOTE = '"', FIELDTERMINATOR=';', ROWTERMINATOR='0X0A', ENCODING = 'UTF8', DATEFORMAT = 'ymd', MAXERRORS = 10, ERRORFILE = '/errorsfolder',--path starting from the storage container IDENTITY_INSERT = 'ON' ); COPY INTO test_parquet FROM 'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/*.parquet' WITH ( FILE_FORMAT = myFileFormat, CREDENTIAL=(IDENTITY= 'Shared Access Signature', SECRET='') ); COPY INTO t1 FROM 'https://myaccount.blob.core.windows.net/myblobcontainer/folder0/*.txt', 'https://myaccount.blob.core.windows.net/myblobcontainer/folder1' WITH ( FILE_TYPE = 'CSV', CREDENTIAL=(IDENTITY= '@',SECRET=''), FIELDTERMINATOR = '|' ); COPY INTO dbo.myCOPYDemoTable FROM 'https://myaccount.blob.core.windows.net/myblobcontainer/folder0/*.txt' WITH ( FILE_TYPE = 'CSV', CREDENTIAL = (IDENTITY = 'Managed Identity'), FIELDQUOTE = '"', FIELDTERMINATOR=',' ); COPY INTO [myCOPYDemoTable] FROM 'https://myaccount.blob.core.windows.net/customerdatasets/folder1/lineitem.parquet' WITH ( FILE_TYPE = 'Parquet', CREDENTIAL = ( IDENTITY = 'Shared Access Signature', SECRET=''), AUTO_CREATE_TABLE = 'ON' ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/copy.yml000066400000000000000000000217701451700765000224570ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6b04459bc8222f88cba2e2cb188c4f633a375cef08e86ff05d4179aa1344a216 file: batch: - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: dbo dot: . quoted_identifier: '[lineitem]' - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://unsecureaccount.blob.core.windows.net/customerdatasets/folder1/lineitem.csv'" statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: test_1 - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Shared Access Signature'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - comma: ',' - keyword: FIELDQUOTE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\"'" - comma: ',' - keyword: FIELDTERMINATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "';'" - comma: ',' - keyword: ROWTERMINATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'0X0A'" - comma: ',' - keyword: ENCODING - comparison_operator: raw_comparison_operator: '=' - file_encoding: "'UTF8'" - comma: ',' - keyword: DATEFORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ymd'" - comma: ',' - keyword: MAXERRORS - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: ERRORFILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/errorsfolder'" - comma: ',' - keyword: IDENTITY_INSERT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ON'" - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: test_parquet - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder1/*.parquet'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: myFileFormat - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Shared Access Signature'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: naked_identifier: t1 - from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder0/*.txt'" - comma: ',' - from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder1'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'@'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - comma: ',' - keyword: FIELDTERMINATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'|'" - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: myCOPYDemoTable - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/myblobcontainer/folder0/*.txt'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: start_bracket: ( keyword: IDENTITY comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Managed Identity'" end_bracket: ) - comma: ',' - keyword: FIELDQUOTE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'\"'" - comma: ',' - keyword: FIELDTERMINATOR - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "','" - end_bracket: ) - statement_terminator: ; - statement: copy_into_table_statement: - keyword: COPY - keyword: INTO - table_reference: quoted_identifier: '[myCOPYDemoTable]' - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: storage_location: external_location: "'https://myaccount.blob.core.windows.net/customerdatasets/folder1/lineitem.parquet'" - keyword: WITH - bracketed: - start_bracket: ( - keyword: FILE_TYPE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Parquet'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - bracketed: - start_bracket: ( - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Shared Access Signature'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - end_bracket: ) - comma: ',' - keyword: AUTO_CREATE_TABLE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ON'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_database_scoped_credential.sql000066400000000000000000000002611451700765000303110ustar00rootroot00000000000000CREATE DATABASE SCOPED CREDENTIAL AppCred WITH IDENTITY = 'Mary5'; CREATE DATABASE SCOPED CREDENTIAL AppCred WITH IDENTITY = 'Mary5', SECRET = ''; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_database_scoped_credential.yml000066400000000000000000000025501451700765000303160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 990a242afa79affe3e73f08022a569ce596765b1a3151a411810c97e5680db92 file: batch: - statement: create_database_scoped_credential_statement: - keyword: CREATE - keyword: DATABASE - keyword: SCOPED - keyword: CREDENTIAL - object_reference: naked_identifier: AppCred - keyword: WITH - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Mary5'" - statement_terminator: ; - statement: create_database_scoped_credential_statement: - keyword: CREATE - keyword: DATABASE - keyword: SCOPED - keyword: CREDENTIAL - object_reference: naked_identifier: AppCred - keyword: WITH - keyword: IDENTITY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Mary5'" - comma: ',' - keyword: SECRET - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "''" - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_external_data_source.sql000066400000000000000000000010561451700765000272140ustar00rootroot00000000000000CREATE EXTERNAL DATA SOURCE MyOracleServer WITH ( LOCATION = 'oracle://145.145.145.145:1521', CREDENTIAL = OracleProxyAccount, PUSHDOWN = ON ); CREATE EXTERNAL DATA SOURCE [OracleSalesSrvr] WITH ( LOCATION = 'oracle://145.145.145.145:1521', CONNECTION_OPTIONS = 'ImpersonateUser=%CURRENT_USER', CREDENTIAL = [OracleProxyCredential] ); CREATE EXTERNAL DATA SOURCE [external_data_source_name] WITH ( LOCATION = N'oracle://XE', CREDENTIAL = [OracleCredentialTest], CONNECTION_OPTIONS = N'TNSNamesFile=C:\Temp\tnsnames.ora;ServerName=XE' );sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_external_data_source.yml000066400000000000000000000062141451700765000272170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4ee9e14852d24196e56654034afae16e6b08f5929988bd7701cd477429ca1e3a file: batch: - statement: create_external_data_source_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: DATA - keyword: SOURCE - object_reference: naked_identifier: MyOracleServer - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "'oracle://145.145.145.145:1521'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: OracleProxyAccount - comma: ',' - keyword: PUSHDOWN - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement_terminator: ; - statement: create_external_data_source_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: DATA - keyword: SOURCE - object_reference: quoted_identifier: '[OracleSalesSrvr]' - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "'oracle://145.145.145.145:1521'" - comma: ',' - keyword: CONNECTION_OPTIONS - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'ImpersonateUser=%CURRENT_USER'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - object_reference: quoted_identifier: '[OracleProxyCredential]' - end_bracket: ) - statement_terminator: ; - statement: create_external_data_source_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: DATA - keyword: SOURCE - object_reference: quoted_identifier: '[external_data_source_name]' - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'oracle://XE'" - comma: ',' - keyword: CREDENTIAL - comparison_operator: raw_comparison_operator: '=' - object_reference: quoted_identifier: '[OracleCredentialTest]' - comma: ',' - keyword: CONNECTION_OPTIONS - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'TNSNamesFile=C:\\Temp\\tnsnames.ora;ServerName=XE'" - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_external_file_format.sql000066400000000000000000000025341451700765000272140ustar00rootroot00000000000000/* https://learn.microsoft.com/en-us/sql/t-sql/statements/create-external-file-format-transact-sql?view=sql-server-ver16&tabs=delimited#examples */ CREATE EXTERNAL FILE FORMAT textdelimited1 WITH ( FORMAT_TYPE = DELIMITEDTEXT, FORMAT_OPTIONS ( FIELD_TERMINATOR = '|', DATE_FORMAT = 'MM/dd/yyyy' ), DATA_COMPRESSION = 'org.apache.hadoop.io.compress.GzipCodec' ); CREATE EXTERNAL FILE FORMAT skipHeader_CSV WITH ( FORMAT_TYPE = DELIMITEDTEXT, FORMAT_OPTIONS ( FIELD_TERMINATOR = ',', STRING_DELIMITER = '"', FIRST_ROW = 2, USE_TYPE_DEFAULT = True ) ); CREATE EXTERNAL FILE FORMAT [rcfile1] WITH ( FORMAT_TYPE = RCFILE, SERDE_METHOD = 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe', DATA_COMPRESSION = 'org.apache.hadoop.io.compress.DefaultCodec' ); CREATE EXTERNAL FILE FORMAT orcfile1 WITH ( FORMAT_TYPE = ORC, DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec' ); CREATE EXTERNAL FILE FORMAT parquetfile1 WITH ( FORMAT_TYPE = PARQUET, DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec' ); CREATE EXTERNAL FILE FORMAT jsonFileFormat WITH ( FORMAT_TYPE = JSON, DATA_COMPRESSION = 'org.apache.hadoop.io.compress.SnappyCodec' ); CREATE EXTERNAL FILE FORMAT DeltaFileFormat WITH ( FORMAT_TYPE = DELTA ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_external_file_format.yml000066400000000000000000000156071451700765000272230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c3c9eb5f60f14a124a441e27bdcdfed2cd507bd99e30502ab64ce6abf3c0d8bc file: batch: - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: textdelimited1 - keyword: WITH - bracketed: start_bracket: ( external_file_delimited_text_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: DELIMITEDTEXT - comma: ',' - keyword: FORMAT_OPTIONS - bracketed: - start_bracket: ( - external_file_delimited_text_format_options_clause: keyword: FIELD_TERMINATOR comparison_operator: raw_comparison_operator: '=' quoted_literal: "'|'" - comma: ',' - external_file_delimited_text_format_options_clause: keyword: DATE_FORMAT comparison_operator: raw_comparison_operator: '=' quoted_literal: "'MM/dd/yyyy'" - end_bracket: ) - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.GzipCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: skipHeader_CSV - keyword: WITH - bracketed: start_bracket: ( external_file_delimited_text_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: DELIMITEDTEXT - comma: ',' - keyword: FORMAT_OPTIONS - bracketed: - start_bracket: ( - external_file_delimited_text_format_options_clause: keyword: FIELD_TERMINATOR comparison_operator: raw_comparison_operator: '=' quoted_literal: "','" - comma: ',' - external_file_delimited_text_format_options_clause: keyword: STRING_DELIMITER comparison_operator: raw_comparison_operator: '=' quoted_literal: "'\"'" - comma: ',' - external_file_delimited_text_format_options_clause: keyword: FIRST_ROW comparison_operator: raw_comparison_operator: '=' numeric_literal: '2' - comma: ',' - external_file_delimited_text_format_options_clause: keyword: USE_TYPE_DEFAULT comparison_operator: raw_comparison_operator: '=' boolean_literal: 'True' - end_bracket: ) end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: quoted_identifier: '[rcfile1]' - keyword: WITH - bracketed: start_bracket: ( external_file_rc_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: RCFILE - comma: ',' - keyword: SERDE_METHOD - comparison_operator: raw_comparison_operator: '=' - serde_method: "'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'" - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.DefaultCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: orcfile1 - keyword: WITH - bracketed: start_bracket: ( external_file_orc_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: ORC - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: parquetfile1 - keyword: WITH - bracketed: start_bracket: ( external_file_parquet_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: PARQUET - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: jsonFileFormat - keyword: WITH - bracketed: start_bracket: ( external_file_json_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: JSON - comma: ',' - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - file_compression: "'org.apache.hadoop.io.compress.SnappyCodec'" end_bracket: ) - statement_terminator: ; - statement: create_external_file_format: - keyword: CREATE - keyword: EXTERNAL - keyword: FILE - keyword: FORMAT - object_reference: naked_identifier: DeltaFileFormat - keyword: WITH - bracketed: start_bracket: ( external_file_delta_clause: - keyword: FORMAT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: DELTA end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_external_table.sql000066400000000000000000000025071451700765000260140ustar00rootroot00000000000000CREATE EXTERNAL TABLE schema_name.table_name ( column_name_1 VARCHAR(50), column_name_2 VARCHAR(50) NULL, column_name_3 VARCHAR(50) NOT NULL ) WITH ( LOCATION = N'/path/to/folder/', DATA_SOURCE = external_data_source, FILE_FORMAT = parquetfileformat, REJECT_TYPE = VALUE, REJECT_VALUE = 0, REJECTED_ROW_LOCATION = '/REJECT_Directory' ) CREATE EXTERNAL TABLE schema_name.table_name ( column_name_1 VARCHAR(50), column_name_2 VARCHAR(50) NULL, column_name_3 VARCHAR(50) NOT NULL ) WITH ( LOCATION = N'/path/to/folder/', DATA_SOURCE = external_data_source, FILE_FORMAT = parquetfileformat, REJECT_TYPE = PERCENTAGE, REJECT_VALUE = 0, REJECT_SAMPLE_VALUE = 0, REJECTED_ROW_LOCATION = '/REJECT_DIRECTORY' ) CREATE EXTERNAL TABLE customers ( o_orderkey DECIMAL(38) NOT NULL, o_custkey DECIMAL(38) NOT NULL, o_orderstatus CHAR COLLATE latin1_general_bin NOT NULL, o_totalprice DECIMAL(15, 2) NOT NULL, o_orderdate DATETIME2(0) NOT NULL, o_orderpriority CHAR(15) COLLATE latin1_general_bin NOT NULL, o_clerk CHAR(15) COLLATE latin1_general_bin NOT NULL, o_shippriority DECIMAL(38) NOT NULL, o_comment VARCHAR(79) COLLATE latin1_general_bin NOT NULL ) WITH ( LOCATION = 'DB1.mySchema.customer', DATA_SOURCE = external_data_source_name ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_external_table.yml000066400000000000000000000262341451700765000260210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 12537be730d551297489fb91ede9e13b1067815a7a5b1a94924a28445c14838a file: batch: - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - object_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: column_name_1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - column_definition: naked_identifier: column_name_2 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: column_name_3 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'/path/to/folder/'" - comma: ',' - keyword: DATA_SOURCE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: external_data_source - comma: ',' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: parquetfileformat - comma: ',' - keyword: REJECT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: VALUE - comma: ',' - keyword: REJECT_VALUE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - comma: ',' - keyword: REJECTED_ROW_LOCATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/REJECT_Directory'" - end_bracket: ) - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - object_reference: - naked_identifier: schema_name - dot: . - naked_identifier: table_name - bracketed: - start_bracket: ( - column_definition: naked_identifier: column_name_1 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - column_definition: naked_identifier: column_name_2 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: column_name_3 data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - bracketed: - start_bracket: ( - table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'/path/to/folder/'" - comma: ',' - keyword: DATA_SOURCE - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: external_data_source - comma: ',' - keyword: FILE_FORMAT - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: parquetfileformat - comma: ',' - keyword: REJECT_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: PERCENTAGE - comma: ',' - keyword: REJECT_VALUE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - comma: ',' - keyword: REJECT_SAMPLE_VALUE - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '0' - comma: ',' - keyword: REJECTED_ROW_LOCATION - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/REJECT_DIRECTORY'" - end_bracket: ) - statement: create_external_table_statement: - keyword: CREATE - keyword: EXTERNAL - keyword: TABLE - object_reference: naked_identifier: customers - bracketed: - start_bracket: ( - column_definition: naked_identifier: o_orderkey data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '38' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: o_custkey data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '38' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: o_orderstatus - data_type: data_type_identifier: CHAR - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_general_bin - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: o_totalprice data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '15' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: o_orderdate data_type: data_type_identifier: DATETIME2 bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '0' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: o_orderpriority - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '15' end_bracket: ) - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_general_bin - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: o_clerk - data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '15' end_bracket: ) - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_general_bin - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: o_shippriority data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '38' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: o_comment - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '79' end_bracket: ) - column_constraint_segment: keyword: COLLATE collation_reference: naked_identifier: latin1_general_bin - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - bracketed: start_bracket: ( table_location_clause: keyword: LOCATION comparison_operator: raw_comparison_operator: '=' quoted_literal: "'DB1.mySchema.customer'" comma: ',' keyword: DATA_SOURCE comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: external_data_source_name end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_fulltext_index.sql000066400000000000000000000070641451700765000260640ustar00rootroot00000000000000CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062, [test] STATISTICAL_SEMANTICS ) KEY INDEX [KEY_INDEX]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 'french' ) KEY INDEX [KEY_INDEX]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] STATISTICAL_SEMANTICS ) KEY INDEX [KEY_INDEX]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE ) KEY INDEX [KEY_INDEX]; -- catalog_filegroup_options CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name], FILEGROUP [filegroup_name]; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON FILEGROUP [filegroup_name], [ft_catalog_name],; CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON FILEGROUP [filegroup_name]; -- change_tracking (MANUAL | AUTO | OFF | OFF, NO POPULATION) CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name] WITH ( CHANGE_TRACKING MANUAL ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name] WITH ( CHANGE_TRACKING = MANUAL ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name] WITH ( CHANGE_TRACKING AUTO ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] ON [ft_catalog_name] WITH ( CHANGE_TRACKING = AUTO ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( CHANGE_TRACKING OFF ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( CHANGE_TRACKING = OFF ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( CHANGE_TRACKING OFF, NO POPULATION ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( CHANGE_TRACKING = OFF, NO POPULATION ); -- stoplist (OFF | SYSTEM | stoplist_name) CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST OFF ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST = OFF ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST SYSTEM ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST = SYSTEM ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST [custom_stoplist_name] ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( STOPLIST = [custom_stoplist_name] ); -- search property list CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( SEARCH PROPERTY LIST [property_list_name] ); CREATE FULLTEXT INDEX ON [dbo].[TEST_FULLTEXT_INDEX] ( [id] LANGUAGE 1062 ) KEY INDEX [PK_IDENTIFIER] WITH ( SEARCH PROPERTY LIST = [property_list_name] ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_fulltext_index.yml000066400000000000000000000470121451700765000260630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ccf02e4d1d84a8730ca5821ebc9007db4236efdb9ff039882d245c0b9ff8d53d file: batch: - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[id]' - keyword: LANGUAGE - numeric_literal: '1062' - comma: ',' - column_reference: quoted_identifier: '[test]' - keyword: STATISTICAL_SEMANTICS - end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[KEY_INDEX]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE quoted_literal: "'french'" end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[KEY_INDEX]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: STATISTICAL_SEMANTICS end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[KEY_INDEX]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[KEY_INDEX]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - comma: ',' - keyword: FILEGROUP - object_reference: quoted_identifier: '[filegroup_name]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - keyword: FILEGROUP - object_reference: quoted_identifier: '[filegroup_name]' - comma: ',' - object_reference: quoted_identifier: '[ft_catalog_name]' - comma: ',' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - keyword: FILEGROUP - object_reference: quoted_identifier: '[filegroup_name]' - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - keyword: MANUAL - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - keyword: MANUAL - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - keyword: AUTO - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: 'ON' - object_reference: quoted_identifier: '[ft_catalog_name]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - keyword: AUTO - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - keyword: 'OFF' - comma: ',' - keyword: 'NO' - keyword: POPULATION - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: CHANGE_TRACKING - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - comma: ',' - keyword: 'NO' - keyword: POPULATION - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STOPLIST - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STOPLIST - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STOPLIST - keyword: SYSTEM - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: STOPLIST - comparison_operator: raw_comparison_operator: '=' - keyword: SYSTEM - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: start_bracket: ( keyword: STOPLIST object_reference: quoted_identifier: '[custom_stoplist_name]' end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: start_bracket: ( keyword: STOPLIST comparison_operator: raw_comparison_operator: '=' object_reference: quoted_identifier: '[custom_stoplist_name]' end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: SEARCH - keyword: PROPERTY - keyword: LIST - object_reference: quoted_identifier: '[property_list_name]' - end_bracket: ) - statement_terminator: ; - statement: create_fulltext_index_statement: - keyword: CREATE - keyword: FULLTEXT - keyword: INDEX - keyword: 'ON' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_FULLTEXT_INDEX]' - bracketed: start_bracket: ( column_reference: quoted_identifier: '[id]' keyword: LANGUAGE numeric_literal: '1062' end_bracket: ) - keyword: KEY - keyword: INDEX - object_reference: quoted_identifier: '[PK_IDENTIFIER]' - keyword: WITH - bracketed: - start_bracket: ( - keyword: SEARCH - keyword: PROPERTY - keyword: LIST - comparison_operator: raw_comparison_operator: '=' - object_reference: quoted_identifier: '[property_list_name]' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_function.sql000066400000000000000000000022501451700765000246430ustar00rootroot00000000000000CREATE FUNCTION dbo.ISOweek (@DATE datetime) RETURNS int WITH EXECUTE AS CALLER AS BEGIN DECLARE @ISOweek int; SET @ISOweek= DATEPART(wk,@DATE)+1 -DATEPART(wk,CAST(DATEPART(yy,@DATE) as CHAR(4))+'0104'); --Special cases: Jan 1-3 may belong to the previous year IF (@ISOweek=0) SET @ISOweek=dbo.ISOweek(CAST(DATEPART(yy,@DATE)-1 AS CHAR(4))+'12'+ CAST(24+DATEPART(DAY,@DATE) AS CHAR(2)))+1; --Special case: Dec 29-31 may belong to the next year IF ((DATEPART(mm,@DATE)=12) AND ((DATEPART(dd,@DATE)-DATEPART(dw,@DATE))>= 28)) SET @ISOweek=1; RETURN(@ISOweek); END; GO CREATE FUNCTION f () RETURNS @t TABLE (i int) AS BEGIN INSERT INTO @t SELECT 1; RETURN; END; GO CREATE OR ALTER FUNCTION F (@DATE as datetime) RETURNS INT AS BEGIN RETURN 1 END; GO ALTER FUNCTION F (@DATE as datetime) RETURNS INT AS BEGIN RETURN 0 END; GO CREATE FUNCTION [UTIL].[getItemList] ( @list ItemList READONLY ) RETURNS nvarchar(max) AS BEGIN DECLARE @str nvarchar(max) = '' SELECT @str = @str + [item] FROM ( SELECT TOP (9999) [item] FROM @list ORDER BY [order] ) i RETURN @str END; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_function.yml000066400000000000000000000450171451700765000246550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9ded27ea23a5a527e9a235ea24249e64b61d4711444cf4b648973734d3618169 file: - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ISOweek - function_parameter_list: bracketed: start_bracket: ( parameter: '@DATE' data_type: data_type_identifier: datetime end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: int - function_option_segment: keyword: WITH execute_as_clause: - keyword: EXECUTE - keyword: AS - keyword: CALLER - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE parameter: '@ISOweek' data_type: data_type_identifier: int statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@ISOweek' assignment_operator: raw_comparison_operator: '=' expression: - function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: wk comma: ',' expression: parameter: '@DATE' end_bracket: ) - binary_operator: + - numeric_literal: '1' - binary_operator: '-' - function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: wk comma: ',' expression: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: yy comma: ',' expression: parameter: '@DATE' end_bracket: ) keyword: as data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) end_bracket: ) binary_operator: + quoted_literal: "'0104'" end_bracket: ) statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: parameter: '@ISOweek' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) statement: set_segment: keyword: SET parameter: '@ISOweek' assignment_operator: raw_comparison_operator: '=' expression: function: function_name: naked_identifier: dbo dot: . function_name_identifier: ISOweek bracketed: start_bracket: ( expression: - function: function_name: keyword: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: yy comma: ',' expression: parameter: '@DATE' end_bracket: ) binary_operator: '-' numeric_literal: '1' keyword: AS data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '4' end_bracket: ) end_bracket: ) - binary_operator: + - quoted_literal: "'12'" - binary_operator: + - function: function_name: keyword: CAST bracketed: start_bracket: ( expression: numeric_literal: '24' binary_operator: + function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: DAY comma: ',' expression: parameter: '@DATE' end_bracket: ) keyword: AS data_type: data_type_identifier: CHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '2' end_bracket: ) end_bracket: ) end_bracket: ) binary_operator: + numeric_literal: '1' statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: - bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: mm comma: ',' expression: parameter: '@DATE' end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '12' end_bracket: ) - binary_operator: AND - bracketed: start_bracket: ( expression: bracketed: start_bracket: ( expression: - function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: dd comma: ',' expression: parameter: '@DATE' end_bracket: ) - binary_operator: '-' - function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: dw comma: ',' expression: parameter: '@DATE' end_bracket: ) end_bracket: ) comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' numeric_literal: '28' end_bracket: ) end_bracket: ) statement: set_segment: keyword: SET parameter: '@ISOweek' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' statement_terminator: ; - statement: return_segment: keyword: RETURN expression: bracketed: start_bracket: ( expression: parameter: '@ISOweek' end_bracket: ) statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: naked_identifier: f - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - parameter: '@t' - keyword: TABLE - bracketed: start_bracket: ( column_definition: naked_identifier: i data_type: data_type_identifier: int end_bracket: ) - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: parameter: '@t' - select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - statement: return_segment: keyword: RETURN statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: FUNCTION - object_reference: naked_identifier: F - function_parameter_list: bracketed: start_bracket: ( parameter: '@DATE' keyword: as data_type: data_type_identifier: datetime end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: numeric_literal: '1' - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: ALTER - keyword: FUNCTION - object_reference: naked_identifier: F - function_parameter_list: bracketed: start_bracket: ( parameter: '@DATE' keyword: as data_type: data_type_identifier: datetime end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: numeric_literal: '0' - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - quoted_identifier: '[UTIL]' - dot: . - quoted_identifier: '[getItemList]' - function_parameter_list: bracketed: start_bracket: ( parameter: '@list' data_type: data_type_identifier: ItemList keyword: READONLY end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE parameter: '@str' data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "''" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: alias_expression: parameter: '@str' raw_comparison_operator: '=' expression: parameter: '@str' binary_operator: + column_reference: quoted_identifier: '[item]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '9999' end_bracket: ) select_clause_element: column_reference: quoted_identifier: '[item]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: parameter: '@list' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[order]' end_bracket: ) alias_expression: naked_identifier: i - statement: return_segment: keyword: RETURN expression: parameter: '@str' - keyword: END statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_procedure.sql000066400000000000000000000046251451700765000250160ustar00rootroot00000000000000-- Minimal stored procedure CREATE PROC [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO CREATE PROCEDURE [dbo].[TEST] AS BEGIN SELECT 1; END; GO ALTER PROC [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO ALTER PROCEDURE [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO CREATE OR ALTER PROC [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO CREATE OR ALTER PROCEDURE [PROCEDURE_NAME] AS BEGIN SELECT 1; END; GO -- Stored procedure with parameters CREATE PROCEDURE [dbo].[TEST] (@id UNIQUEIDENTIFIER) AS SELECT 1; GO CREATE PROCEDURE [dbo].[TEST] ( @id UNIQUEIDENTIFIER NULL = NULL, @fooReadonly NVARCHAR(42) = N'foo' READONLY, @bar BIT VARYING NULL = NULL OUTPUT, @output TINYINT OUT ) AS BEGIN SET @output = ( SELECT tinyint_value FROM dbo.TEST ); IF @id IS NULL BEGIN SELECT @bar, @fooReadonly; END; END; GO CREATE PROCEDURE [dbo].[TEST] ( @id UNIQUEIDENTIFIER NULL = NULL, @bar NVARCHAR(32) NULL = NULL ) WITH ENCRYPTION, RECOMPILE, EXECUTE AS 'sa' AS BEGIN SELECT 1; END; GO CREATE PROCEDURE [dbo].[TEST] ( @id UNIQUEIDENTIFIER NULL = NULL, @bar NVARCHAR(32) NULL = NULL ) WITH ENCRYPTION, RECOMPILE, EXECUTE AS 'sa' FOR REPLICATION AS BEGIN SELECT @id, @bar; END; GO -- Natively compiled stored procedure CREATE OR ALTER PROCEDURE [dbo].[TEST] (@id INT NOT NULL) WITH NATIVE_COMPILATION, SCHEMABINDING, EXECUTE AS OWNER AS BEGIN ATOMIC WITH ( LANGUAGE = N'us_english', TRANSACTION ISOLATION LEVEL = SERIALIZABLE, DATEFIRST = 10, DATEFORMAT = dym, DELAYED_DURABILITY = ON ) SELECT 1; END; GO CREATE OR ALTER PROCEDURE [dbo].[TEST] (@id INT NOT NULL) WITH NATIVE_COMPILATION, SCHEMABINDING, EXECUTE AS OWNER AS BEGIN ATOMIC WITH ( TRANSACTION ISOLATION LEVEL = SNAPSHOT, LANGUAGE = 'us_english' ) SELECT 1; END; GO CREATE OR ALTER PROCEDURE [dbo].[TEST] (@id INT NOT NULL) WITH NATIVE_COMPILATION, SCHEMABINDING, EXECUTE AS OWNER AS BEGIN ATOMIC WITH ( TRANSACTION ISOLATION LEVEL = REPEATABLE READ, LANGUAGE = N'us_english', DELAYED_DURABILITY = OFF, DATEFORMAT = myd ) SELECT 1; END; GO -- CLR stored procedure CREATE PROCEDURE [dbo].[TEST] AS EXTERNAL NAME [dbo].[class_name].[static_method]; GO CREATE PROCEDURE [dbo].[TEST]; 1064 AS EXTERNAL NAME [dbo].[class_name].[static_method]; GO CREATE OR ALTER PROCEDURE [dbo].[TEST] ( @id UNIQUEIDENTIFIER = NEWID(), @output NVARCHAR(32) OUTPUT, @activated BIT OUT READONLY ) WITH EXECUTE AS 'sa' AS EXTERNAL NAME [dbo].[class_name].[static_method]; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_procedure.yml000066400000000000000000000502721451700765000250170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 675c65a45426a06227ba37b639eb5625154210effad6819e2e9071d5b61f066a file: - batch: create_procedure_statement: - keyword: CREATE - keyword: PROC - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: ALTER - keyword: PROC - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROC - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: quoted_identifier: '[PROCEDURE_NAME]' - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: start_bracket: ( parameter: '@id' data_type: data_type_identifier: UNIQUEIDENTIFIER end_bracket: ) - keyword: AS - procedure_statement: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: UNIQUEIDENTIFIER - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - comma: ',' - parameter: '@fooReadonly' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '42' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "N'foo'" - keyword: READONLY - comma: ',' - parameter: '@bar' - data_type: data_type_identifier: BIT keyword: VARYING - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - keyword: OUTPUT - comma: ',' - parameter: '@output' - data_type: data_type_identifier: TINYINT - keyword: OUT - end_bracket: ) - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: set_segment: keyword: SET parameter: '@output' assignment_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: tinyint_value from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: TEST end_bracket: ) statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: - parameter: '@id' - keyword: IS - keyword: 'NULL' statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: parameter: '@bar' - comma: ',' - select_clause_element: parameter: '@fooReadonly' statement_terminator: ; - keyword: END statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: UNIQUEIDENTIFIER - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - comma: ',' - parameter: '@bar' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '32' end_bracket: ) - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - end_bracket: ) - keyword: WITH - keyword: ENCRYPTION - comma: ',' - keyword: RECOMPILE - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - quoted_literal: "'sa'" - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: UNIQUEIDENTIFIER - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - comma: ',' - parameter: '@bar' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '32' end_bracket: ) - keyword: 'NULL' - comparison_operator: raw_comparison_operator: '=' - expression: null_literal: 'NULL' - end_bracket: ) - keyword: WITH - keyword: ENCRYPTION - comma: ',' - keyword: RECOMPILE - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - quoted_literal: "'sa'" - keyword: FOR - keyword: REPLICATION - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: parameter: '@id' - comma: ',' - select_clause_element: parameter: '@bar' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: INT - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - keyword: NATIVE_COMPILATION - comma: ',' - keyword: SCHEMABINDING - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - keyword: OWNER - keyword: AS - procedure_statement: statement: atomic_begin_end_block: - keyword: BEGIN - keyword: ATOMIC - keyword: WITH - bracketed: - start_bracket: ( - keyword: LANGUAGE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'us_english'" - comma: ',' - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: SERIALIZABLE - comma: ',' - keyword: DATEFIRST - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - comma: ',' - keyword: DATEFORMAT - comparison_operator: raw_comparison_operator: '=' - date_format: dym - comma: ',' - keyword: DELAYED_DURABILITY - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: INT - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - keyword: NATIVE_COMPILATION - comma: ',' - keyword: SCHEMABINDING - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - keyword: OWNER - keyword: AS - procedure_statement: statement: atomic_begin_end_block: - keyword: BEGIN - keyword: ATOMIC - keyword: WITH - bracketed: - start_bracket: ( - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: SNAPSHOT - comma: ',' - keyword: LANGUAGE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'us_english'" - end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: INT - keyword: NOT - keyword: 'NULL' - end_bracket: ) - keyword: WITH - keyword: NATIVE_COMPILATION - comma: ',' - keyword: SCHEMABINDING - comma: ',' - execute_as_clause: - keyword: EXECUTE - keyword: AS - keyword: OWNER - keyword: AS - procedure_statement: statement: atomic_begin_end_block: - keyword: BEGIN - keyword: ATOMIC - keyword: WITH - bracketed: - start_bracket: ( - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - comparison_operator: raw_comparison_operator: '=' - keyword: REPEATABLE - keyword: READ - comma: ',' - keyword: LANGUAGE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'us_english'" - comma: ',' - keyword: DELAYED_DURABILITY - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - comma: ',' - keyword: DATEFORMAT - comparison_operator: raw_comparison_operator: '=' - date_format: myd - end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - keyword: AS - procedure_statement: - keyword: EXTERNAL - keyword: NAME - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[class_name]' - dot: . - quoted_identifier: '[static_method]' - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - statement_terminator: ; - numeric_literal: '1064' - keyword: AS - procedure_statement: - keyword: EXTERNAL - keyword: NAME - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[class_name]' - dot: . - quoted_identifier: '[static_method]' - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@id' - data_type: data_type_identifier: UNIQUEIDENTIFIER - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: NEWID bracketed: start_bracket: ( end_bracket: ) - comma: ',' - parameter: '@output' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '32' end_bracket: ) - keyword: OUTPUT - comma: ',' - parameter: '@activated' - data_type: data_type_identifier: BIT - keyword: OUT - keyword: READONLY - end_bracket: ) - keyword: WITH - execute_as_clause: - keyword: EXECUTE - keyword: AS - quoted_literal: "'sa'" - keyword: AS - procedure_statement: - keyword: EXTERNAL - keyword: NAME - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[class_name]' - dot: . - quoted_identifier: '[static_method]' - statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_role.sql000066400000000000000000000000761451700765000237630ustar00rootroot00000000000000CREATE ROLE testuser; CREATE ROLE testuser AUTHORIZATION dbo; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_role.yml000066400000000000000000000015201451700765000237600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1830cd153260fb341d79a750dc0c6191650a02e59b7e76acdc5081597c844c89 file: batch: - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: testuser - statement_terminator: ; - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: testuser - keyword: AUTHORIZATION - role_reference: naked_identifier: dbo - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_schema.sql000066400000000000000000000001201451700765000242500ustar00rootroot00000000000000CREATE SCHEMA [Reporting] GO CREATE SCHEMA [Extracts] AUTHORIZATION [dbo]; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_schema.yml000066400000000000000000000016351451700765000242660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c308748e09e74b7c6c4e3a3cdbf45e63a4a756b19637c7fc16cf053dabb11c88 file: - batch: statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: quoted_identifier: '[Reporting]' - go_statement: keyword: GO - batch: statement: create_schema_statement: - keyword: CREATE - keyword: SCHEMA - schema_reference: quoted_identifier: '[Extracts]' - keyword: AUTHORIZATION - role_reference: quoted_identifier: '[dbo]' - statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table.sql000066400000000000000000000006141451700765000241070ustar00rootroot00000000000000CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) -- Test various forms of quoted data types CREATE TABLE foo ( pk int PRIMARY KEY, quoted_name [custom udt], qualified_name sch.qualified, quoted_qualified "my schema".qualified, more_quoted "my schema"."custom udt", quoted_udt sch.[custom udt] ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table.yml000066400000000000000000000065141451700765000241160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cfcae343a1e7dffe16887fb99acfec87e64823d1dbbe32c94bd4a971757073cb file: batch: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: foo - bracketed: - start_bracket: ( - column_definition: naked_identifier: pk data_type: data_type_identifier: int column_constraint_segment: - keyword: PRIMARY - keyword: KEY - comma: ',' - column_definition: naked_identifier: quoted_name data_type: quoted_identifier: '[custom udt]' - comma: ',' - column_definition: naked_identifier: qualified_name data_type: naked_identifier: sch dot: . data_type_identifier: qualified - comma: ',' - column_definition: naked_identifier: quoted_qualified data_type: quoted_identifier: '"my schema"' dot: . data_type_identifier: qualified - comma: ',' - column_definition: naked_identifier: more_quoted data_type: - quoted_identifier: '"my schema"' - dot: . - quoted_identifier: '"custom udt"' - comma: ',' - column_definition: naked_identifier: quoted_udt data_type: naked_identifier: sch dot: . quoted_identifier: '[custom udt]' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_as_select.sql000066400000000000000000000017431451700765000261350ustar00rootroot00000000000000--Azure Synapse Analytics specific CREATE TABLE [dbo].[PL_stage] WITH (DISTRIBUTION = HASH([ID]), HEAP) AS WITH CommentsTracking AS ( SELECT 'Program' AS Program ) SELECT e.[ID] ,e.[ArriveDate] ,e.[Contribution] ,e.[DischargeDate] ,e.[Encounter] ,e.[Facility] ,e.[Region] ,e.[LOS] FROM dbo.Encounter e JOIN dbo.Finance f ON e.[ID] = f.[ID] DROP TABLE [dbo].[PL_stage] CREATE TABLE [dbo].[PL_stage] WITH (DISTRIBUTION = HASH([ID]), HEAP) AS SELECT e.[ID] ,e.[ArriveDate] ,e.[Contribution] ,e.[DischargeDate] ,e.[Encounter] ,e.[Facility] ,e.[Region] ,e.[LOS] FROM dbo.Encounter e JOIN dbo.Finance f ON e.[ID] = f.[ID]; DROP TABLE [dbo].[PL_stage]; CREATE TABLE [dbo].[PL_stage] WITH (DISTRIBUTION = HASH([ID]), HEAP) AS ( SELECT e.[ID] ,e.[ArriveDate] ,e.[Contribution] ,e.[DischargeDate] ,e.[Encounter] ,e.[Facility] ,e.[Region] ,e.[LOS] FROM dbo.Encounter e JOIN dbo.Finance f ON e.[ID] = f.[ID] ) OPTION (LABEL = 'Test_Label') sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_as_select.yml000066400000000000000000000316751451700765000261460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d283c172dcd9eb54ec2eabecafdedf10a50b417d0aef72fa8f6d88751b259ac6 file: batch: - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[ID]' end_bracket: ) comma: ',' table_index_clause: keyword: HEAP end_bracket: ) - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: CommentsTracking keyword: AS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Program'" alias_expression: keyword: AS naked_identifier: Program end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ArriveDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Contribution]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[DischargeDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Encounter]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Facility]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Region]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[LOS]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Encounter alias_expression: naked_identifier: e join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Finance alias_expression: naked_identifier: f join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: f dot: . quoted_identifier: '[ID]' - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[ID]' end_bracket: ) comma: ',' table_index_clause: keyword: HEAP end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ArriveDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Contribution]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[DischargeDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Encounter]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Facility]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Region]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[LOS]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Encounter alias_expression: naked_identifier: e join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Finance alias_expression: naked_identifier: f join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: f dot: . quoted_identifier: '[ID]' statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - statement_terminator: ; - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[PL_stage]' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[ID]' end_bracket: ) comma: ',' table_index_clause: keyword: HEAP end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[ArriveDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Contribution]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[DischargeDate]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Encounter]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Facility]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[Region]' - comma: ',' - select_clause_element: column_reference: naked_identifier: e dot: . quoted_identifier: '[LOS]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Encounter alias_expression: naked_identifier: e join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Finance alias_expression: naked_identifier: f join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: e dot: . quoted_identifier: '[ID]' - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: f dot: . quoted_identifier: '[ID]' end_bracket: ) - option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: LABEL comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Test_Label'" end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_constraints.sql000066400000000000000000000021541451700765000265370ustar00rootroot00000000000000CREATE TABLE [dbo].[example]( [Column A] [int] IDENTITY, [Column B] [int] IDENTITY(1, 1) NOT NULL, [ColumnC] varchar(100) DEFAULT 'mydefault', [ColumnDecimal] DATE DEFAULT GETDATE(), [col1] int default ((-1)) not null, [col1] int default (-1) not null, [col1] int default -1 not null, [col1] INT DEFAULT (NULL) NULL ) GO create table [schema1].[table1] ( [col1] INT , PRIMARY KEY CLUSTERED ([col1] ASC) ) GO create table [schema1].[table1] ( [col1] INT , CONSTRAINT [Pk_Id] PRIMARY KEY NONCLUSTERED ([col1] DESC) ) GO CREATE TABLE [dbo].[table1] ( [ColumnB] [varchar](100) FILESTREAM MASKED WITH (FUNCTION = 'my_func'), [ColumnC] varchar(100) NULL NOT FOR REPLICATION, [ColumnDecimal] decimal(10,3) GENERATED ALWAYS AS ROW START HIDDEN, [columnE] varchar(100) ENCRYPTED WITH (COLUMN_ENCRYPTION_KEY = key_name, ENCRYPTION_TYPE = RANDOMIZED, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256' ), [column1] varchar (100) collate Latin1_General_BIN ) GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_constraints.yml000066400000000000000000000240471451700765000265460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 653c3331eb481f4fe465aa9aa63fb4d83bf7515eb19bd756bd6d0dfde3ba0184 file: - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[example]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column A]' data_type: data_type_identifier: '[int]' column_constraint_segment: identity_grammar: keyword: IDENTITY - comma: ',' - column_definition: - quoted_identifier: '[Column B]' - data_type: data_type_identifier: '[int]' - column_constraint_segment: identity_grammar: keyword: IDENTITY bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '1' - end_bracket: ) - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: DEFAULT quoted_literal: "'mydefault'" - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: DATE column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: - quoted_identifier: '[col1]' - data_type: data_type_identifier: int - column_constraint_segment: keyword: default bracketed: start_bracket: ( bracketed: start_bracket: ( numeric_literal: sign_indicator: '-' numeric_literal: '1' end_bracket: ) end_bracket: ) - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: - quoted_identifier: '[col1]' - data_type: data_type_identifier: int - column_constraint_segment: keyword: default bracketed: start_bracket: ( numeric_literal: sign_indicator: '-' numeric_literal: '1' end_bracket: ) - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: - quoted_identifier: '[col1]' - data_type: data_type_identifier: int - column_constraint_segment: keyword: default numeric_literal: sign_indicator: '-' numeric_literal: '1' - column_constraint_segment: - keyword: not - keyword: 'null' - comma: ',' - column_definition: - quoted_identifier: '[col1]' - data_type: data_type_identifier: INT - column_constraint_segment: keyword: DEFAULT bracketed: start_bracket: ( null_literal: 'NULL' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - end_bracket: ) - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: create - keyword: table - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[col1]' data_type: data_type_identifier: INT comma: ',' table_constraint: - keyword: PRIMARY - keyword: KEY - keyword: CLUSTERED - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[col1]' keyword: ASC end_bracket: ) end_bracket: ) - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: create - keyword: table - table_reference: - quoted_identifier: '[schema1]' - dot: . - quoted_identifier: '[table1]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[col1]' data_type: data_type_identifier: INT comma: ',' table_constraint: - keyword: CONSTRAINT - object_reference: quoted_identifier: '[Pk_Id]' - keyword: PRIMARY - keyword: KEY - keyword: NONCLUSTERED - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: quoted_identifier: '[col1]' keyword: DESC end_bracket: ) end_bracket: ) - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[table1]' - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ColumnB]' - data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - column_constraint_segment: keyword: FILESTREAM - column_constraint_segment: - keyword: MASKED - keyword: WITH - bracketed: start_bracket: ( keyword: FUNCTION comparison_operator: raw_comparison_operator: '=' quoted_literal: "'my_func'" end_bracket: ) - comma: ',' - column_definition: - quoted_identifier: '[ColumnC]' - data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - column_constraint_segment: keyword: 'NULL' - column_constraint_segment: - keyword: NOT - keyword: FOR - keyword: REPLICATION - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: ROW - keyword: START - keyword: HIDDEN - comma: ',' - column_definition: quoted_identifier: '[columnE]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: encrypted_with_grammar: - keyword: ENCRYPTED - keyword: WITH - bracketed: - start_bracket: ( - keyword: COLUMN_ENCRYPTION_KEY - comparison_operator: raw_comparison_operator: '=' - naked_identifier: key_name - comma: ',' - keyword: ENCRYPTION_TYPE - comparison_operator: raw_comparison_operator: '=' - keyword: RANDOMIZED - comma: ',' - keyword: ALGORITHM - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AEAD_AES_256_CBC_HMAC_SHA_256'" - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[column1]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: collate collation_reference: naked_identifier: Latin1_General_BIN - end_bracket: ) - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_on_filegroup.sql000066400000000000000000000002131451700765000266520ustar00rootroot00000000000000CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) ON MyFileGroup sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_on_filegroup.yml000066400000000000000000000037611451700765000266670ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e98064d324c4b081392469410dbfab5dddbc12b258a107db6cd00d212abb8f2e file: batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: naked_identifier: MyFileGroup sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_distribution.sql000066400000000000000000000030241451700765000277370ustar00rootroot00000000000000--Azure Synapse Analytics specific CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (CLUSTERED COLUMNSTORE INDEX, DISTRIBUTION = ROUND_ROBIN); GO DROP TABLE [dbo].[EC DC] GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (HEAP, DISTRIBUTION = REPLICATE); GO DROP TABLE [dbo].[EC DC] GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (LOCATION = USER_DB, DISTRIBUTION = HASH([Column B])); GO DROP TABLE [dbo].[EC DC] GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (CLUSTERED COLUMNSTORE INDEX, LOCATION = USER_DB, DISTRIBUTION = HASH([Column B])); GO DROP TABLE [dbo].[EC DC] GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (CLUSTERED INDEX ([Column B]), DISTRIBUTION = HASH([Column B])); GO DROP TABLE [dbo].[EC DC]; GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (CLUSTERED COLUMNSTORE INDEX ORDER ([Column B]), DISTRIBUTION = HASH([Column B])); GO DROP TABLE [dbo].[EC DC]; GO CREATE TABLE [dbo].[table] ( [name] [varchar](100) NOT NULL, [month_num] [int] NULL ) WITH ( DISTRIBUTION = REPLICATE, CLUSTERED INDEX ( [name] ASC, [month_num] ASC ) ) GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_distribution.yml000066400000000000000000000411451451700765000277470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2b5704dab85931563d9a29f1d454c0ee1fbb3a2278481306089d4be3505ddc96 file: - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_index_clause: - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: ROUND_ROBIN end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_index_clause: keyword: HEAP comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: REPLICATE end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_location_clause: - keyword: LOCATION - comparison_operator: raw_comparison_operator: '=' - keyword: USER_DB comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: - start_bracket: ( - table_index_clause: - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - comma: ',' - table_location_clause: - keyword: LOCATION - comparison_operator: raw_comparison_operator: '=' - keyword: USER_DB - comma: ',' - table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_index_clause: - keyword: CLUSTERED - keyword: INDEX - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_index_clause: - keyword: CLUSTERED - keyword: COLUMNSTORE - keyword: INDEX - keyword: ORDER - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) comma: ',' table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: HASH - bracketed: start_bracket: ( column_reference: quoted_identifier: '[Column B]' end_bracket: ) end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[table]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[name]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[month_num]' data_type: data_type_identifier: '[int]' column_constraint_segment: keyword: 'NULL' - end_bracket: ) - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: REPLICATE comma: ',' table_index_clause: - keyword: CLUSTERED - keyword: INDEX - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[name]' - keyword: ASC - comma: ',' - column_reference: quoted_identifier: '[month_num]' - keyword: ASC - end_bracket: ) end_bracket: ) - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_sequence.sql000066400000000000000000000004101451700765000270240ustar00rootroot00000000000000CREATE TABLE DEST.MELDER( [ID] INT PRIMARY KEY NOT NULL DEFAULT NEXT VALUE FOR [dbo].[SEQ_MELDER] ,[DOWNLOADED_TOTAL] INT NULL ,[WARNED_DAILY] INT NULL ,[DATE_OF_REPORT] DATETIME NULL ,DATE_LAST_INSERTED DATETIME DEFAULT GETDATE() ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_sequence.yml000066400000000000000000000047321451700765000270410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 458cd9df734d33511ad60a3b8b9118650d0e83500c9b115cec616230efc3041c file: batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: DEST - dot: . - naked_identifier: MELDER - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQ_MELDER]' - comma: ',' - column_definition: quoted_identifier: '[DOWNLOADED_TOTAL]' data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[WARNED_DAILY]' data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: quoted_identifier: '[DATE_OF_REPORT]' data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_LAST_INSERTED data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_sequence_bracketed.sql000066400000000000000000000045471451700765000310470ustar00rootroot00000000000000IF NOT EXISTS(SELECT * FROM sys.sequences WHERE object_id = OBJECT_ID(N'[dbo].[SEQ_SCHEMA_NAME_TABLE_NAME]') AND type = 'SO') CREATE SEQUENCE SEQ_SCHEMA_NAME_TABLE_NAME START WITH 1 INCREMENT BY 1; GO CREATE TABLE SCHEMA_NAME.TABLE_NAME( [ID] INT PRIMARY KEY NOT NULL DEFAULT (NEXT VALUE FOR [dbo].[SEQ_SCHEMA_NAME_TABLE_NAME]), [WEEK_UNIX] BIGINT, GMCODE VARCHAR(100), AVERAGE_RNA_FLOW_PER_100000 DECIMAL(16,2) NULL, NUMBER_OF_MEASUREMENTS INT NULL, NUMBER_OF_LOCATIONS INT NULL, TOTAL_LOCATIONS INT NULL, DATE_LAST_INSERTED DATETIME DEFAULT GETDATE() ); IF NOT EXISTS(SELECT * FROM sys.sequences WHERE object_id = OBJECT_ID(N'[dbo].[SEQ_STAGE_CBS_POPULATION_BASE]') AND type = 'SO') CREATE SEQUENCE SEQ_STAGE_CBS_POPULATION_BASE START WITH 1 INCREMENT BY 1; GO CREATE TABLE STAGE.CBS_POPULATION_BASE( [ID] INT PRIMARY KEY NONCLUSTERED NOT NULL DEFAULT (NEXT VALUE FOR [dbo].[SEQ_STAGE_CBS_POPULATION_BASE]), GEMEENTE_CODE VARCHAR(100) NULL, GEMEENTE VARCHAR(100) NULL, LEEFTIJD VARCHAR(100) NULL, GESLACHT VARCHAR(100) NULL, DATUM_PEILING VARCHAR(100) NULL, POPULATIE VARCHAR(100) NULL, VEILIGHEIDSREGIO_CODE VARCHAR(100) NULL, VEILIGHEIDSREGIO_NAAM VARCHAR(100) NULL, PROVINCIE_CODE VARCHAR(100) NULL, PROVINCIE_NAAM VARCHAR(100) NULL, GGD_CODE VARCHAR(100) NULL, GGD_NAAM VARCHAR(100) NULL, DATE_LAST_INSERTED DATETIME DEFAULT GETDATE() ); GO CREATE CLUSTERED INDEX CI_DLI_STAGE_CBS_POPULATION_BASE ON STAGE.CBS_POPULATION_BASE (DATE_LAST_INSERTED) GO CREATE NONCLUSTERED INDEX NCI_DLI_STAGE_CIMS_VACCINATED_AGE_GROUP ON STAGE.CBS_POPULATION_BASE (DATE_LAST_INSERTED, GEMEENTE_CODE, GEMEENTE, LEEFTIJD, GESLACHT, DATUM_PEILING, POPULATIE, VEILIGHEIDSREGIO_CODE, VEILIGHEIDSREGIO_NAAM, PROVINCIE_CODE, PROVINCIE_NAAM, GGD_CODE, GGD_NAAM); CREATE TABLE DEST.POSITIVE_TESTED_PEOPLE( [ID] INT PRIMARY KEY NOT NULL DEFAULT (NEXT VALUE FOR [dbo].[SEQ_DEST_POSITIVE_TESTED_PEOPLE]), DATE_OF_REPORT DATETIME NULL, DATE_OF_REPORT_UNIX BIGINT NULL, INFECTED_DAILY_INCREASE DECIMAL(16, 1) NULL, INFECTED_DAILY_TOTAL INT NULL, DATE_LAST_INSERTED DATETIME DEFAULT GETDATE(), [DATE_RANGE_START] datetime, [DATE_OF_REPORTS_LAG] datetime, [DATE_RANGE_START_LAG] datetime, [7D_AVERAGE_INFECTED_DAILY_INCREASE_TOTAL] decimal (16,2), [7D_AVERAGE_INFECTED_DAILY_INCREASE_LAG] decimal (16,2), [7D_AVERAGE_INFECTED_DAILY_INCREASE_ABSOLUTE] decimal (16,2) ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_sequence_bracketed.yml000066400000000000000000000535771451700765000310600ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bd8e5b608fd5cd901c0f50ac677f1ae452f8d065fe2abcb5f38423944df59dae file: - batch: statement: if_then_statement: if_clause: keyword: IF expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: sequences where_clause: keyword: WHERE expression: - column_reference: naked_identifier: object_id - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: OBJECT_ID bracketed: start_bracket: ( expression: quoted_literal: "N'[dbo].[SEQ_SCHEMA_NAME_TABLE_NAME]'" end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SO'" end_bracket: ) statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: SEQ_SCHEMA_NAME_TABLE_NAME - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '1' - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' statement_terminator: ; - go_statement: keyword: GO - batch: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: SCHEMA_NAME - dot: . - naked_identifier: TABLE_NAME - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bracketed: start_bracket: ( sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQ_SCHEMA_NAME_TABLE_NAME]' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[WEEK_UNIX]' data_type: data_type_identifier: BIGINT - comma: ',' - column_definition: naked_identifier: GMCODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: naked_identifier: AVERAGE_RNA_FLOW_PER_100000 data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: NUMBER_OF_MEASUREMENTS data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: NUMBER_OF_LOCATIONS data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: TOTAL_LOCATIONS data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_LAST_INSERTED data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: - keyword: NOT - keyword: EXISTS - bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: sequences where_clause: keyword: WHERE expression: - column_reference: naked_identifier: object_id - comparison_operator: raw_comparison_operator: '=' - function: function_name: function_name_identifier: OBJECT_ID bracketed: start_bracket: ( expression: quoted_literal: "N'[dbo].[SEQ_STAGE_CBS_POPULATION_BASE]'" end_bracket: ) - binary_operator: AND - column_reference: naked_identifier: type - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SO'" end_bracket: ) statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: SEQ_STAGE_CBS_POPULATION_BASE - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '1' - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: CBS_POPULATION_BASE - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: NONCLUSTERED - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bracketed: start_bracket: ( sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQ_STAGE_CBS_POPULATION_BASE]' end_bracket: ) - comma: ',' - column_definition: naked_identifier: GEMEENTE_CODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: GEMEENTE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: LEEFTIJD data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: GESLACHT data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATUM_PEILING data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: POPULATIE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: VEILIGHEIDSREGIO_CODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: VEILIGHEIDSREGIO_NAAM data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: PROVINCIE_CODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: PROVINCIE_NAAM data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: GGD_CODE data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: GGD_NAAM data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_LAST_INSERTED data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_index_statement: - keyword: CREATE - keyword: CLUSTERED - keyword: INDEX - index_reference: naked_identifier: CI_DLI_STAGE_CBS_POPULATION_BASE - keyword: 'ON' - table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: CBS_POPULATION_BASE - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: DATE_LAST_INSERTED end_bracket: ) - go_statement: keyword: GO - batch: - statement: create_index_statement: - keyword: CREATE - keyword: NONCLUSTERED - keyword: INDEX - index_reference: naked_identifier: NCI_DLI_STAGE_CIMS_VACCINATED_AGE_GROUP - keyword: 'ON' - table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: CBS_POPULATION_BASE - bracketed_index_column_list_grammar: bracketed: - start_bracket: ( - index_column_definition: naked_identifier: DATE_LAST_INSERTED - comma: ',' - index_column_definition: naked_identifier: GEMEENTE_CODE - comma: ',' - index_column_definition: naked_identifier: GEMEENTE - comma: ',' - index_column_definition: naked_identifier: LEEFTIJD - comma: ',' - index_column_definition: naked_identifier: GESLACHT - comma: ',' - index_column_definition: naked_identifier: DATUM_PEILING - comma: ',' - index_column_definition: naked_identifier: POPULATIE - comma: ',' - index_column_definition: naked_identifier: VEILIGHEIDSREGIO_CODE - comma: ',' - index_column_definition: naked_identifier: VEILIGHEIDSREGIO_NAAM - comma: ',' - index_column_definition: naked_identifier: PROVINCIE_CODE - comma: ',' - index_column_definition: naked_identifier: PROVINCIE_NAAM - comma: ',' - index_column_definition: naked_identifier: GGD_CODE - comma: ',' - index_column_definition: naked_identifier: GGD_NAAM - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - naked_identifier: DEST - dot: . - naked_identifier: POSITIVE_TESTED_PEOPLE - bracketed: - start_bracket: ( - column_definition: - quoted_identifier: '[ID]' - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: keyword: DEFAULT bracketed: start_bracket: ( sequence_next_value: - keyword: NEXT - keyword: VALUE - keyword: FOR - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[SEQ_DEST_POSITIVE_TESTED_PEOPLE]' end_bracket: ) - comma: ',' - column_definition: naked_identifier: DATE_OF_REPORT data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_OF_REPORT_UNIX data_type: data_type_identifier: BIGINT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: INFECTED_DAILY_INCREASE data_type: data_type_identifier: DECIMAL bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '1' - end_bracket: ) column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: INFECTED_DAILY_TOTAL data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: DATE_LAST_INSERTED data_type: data_type_identifier: DATETIME column_constraint_segment: keyword: DEFAULT function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[DATE_RANGE_START]' data_type: data_type_identifier: datetime - comma: ',' - column_definition: quoted_identifier: '[DATE_OF_REPORTS_LAG]' data_type: data_type_identifier: datetime - comma: ',' - column_definition: quoted_identifier: '[DATE_RANGE_START_LAG]' data_type: data_type_identifier: datetime - comma: ',' - column_definition: quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_TOTAL]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_LAG]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[7D_AVERAGE_INFECTED_DAILY_INCREASE_ABSOLUTE]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '16' - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_table_option_segment.sql000066400000000000000000000020541451700765000314230ustar00rootroot00000000000000CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = INFINITE ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = 1 DAYS, DATA_CONSISTENCY_CHECK = ON ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = 10 WEEKS ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = 4 MONTHS ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY], HISTORY_RETENTION_PERIOD = 1 YEARS ) ); CREATE TABLE [dbo].[TEST] ( [id] INT NOT NULL ) ON [PRIMARY] WITH ( SYSTEM_VERSIONING = ON ( HISTORY_TABLE = [dbo].[TEST_HISTORY] ) ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_table_option_segment.yml000066400000000000000000000226421451700765000314320ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a78aab88572e4946b8b3c63814c0754aa063e66d94dc1dffc3ea9dcdf7659559 file: batch: - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - keyword: INFINITE - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: DAYS - comma: ',' - keyword: DATA_CONSISTENCY_CHECK - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '10' - keyword: WEEKS - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '4' - keyword: MONTHS - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: HISTORY_TABLE - comparison_operator: raw_comparison_operator: '=' - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' - comma: ',' - keyword: HISTORY_RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: YEARS - end_bracket: ) - end_bracket: ) - statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[id]' data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' end_bracket: ) - on_partition_or_filegroup_statement: filegroup_clause: keyword: 'ON' filegroup_name: quoted_identifier: '[PRIMARY]' - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[TEST_HISTORY]' end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_trailing_comma.sql000066400000000000000000000000751451700765000302100ustar00rootroot00000000000000CREATE TABLE [dbo].[test] ( [Column B] [varchar](100), ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_table_with_trailing_comma.yml000066400000000000000000000020331451700765000302060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1741f41c3ad5d4387b08448fc575cf531f48835b024826f368d22069ecf693d9 file: batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[test]' - bracketed: start_bracket: ( column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) comma: ',' end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_type.sql000066400000000000000000000002001451700765000237700ustar00rootroot00000000000000CREATE TYPE person AS TABLE ( name nvarchar(10), height int, favorite_color int ); CREATE TYPE weird_int FROM int; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_type.yml000066400000000000000000000031001451700765000237740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1873d9ddf12a690dee729b24d8f6678ce9686f646e8321db57a10d32d349aadb file: batch: - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: person - keyword: AS - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: name data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - comma: ',' - column_definition: naked_identifier: height data_type: data_type_identifier: int - comma: ',' - column_definition: naked_identifier: favorite_color data_type: data_type_identifier: int - end_bracket: ) - statement_terminator: ; - statement: create_type_statement: - keyword: CREATE - keyword: TYPE - object_reference: naked_identifier: weird_int - keyword: FROM - object_reference: naked_identifier: int - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view.sql000066400000000000000000000030401451700765000237660ustar00rootroot00000000000000CREATE OR ALTER VIEW Sales.SalesPersonPerform AS SELECT TOP (100) SalesPersonID, SUM(TotalDue) AS TotalSales FROM Sales.SalesOrderHeader WHERE OrderDate > CONVERT(DATETIME, '20001231', 101) GROUP BY SalesPersonID; CREATE OR ALTER VIEW Sales.SalesPersonPerform AS SELECT TOP (100) SalesPersonID, SUM(TotalDue) AS TotalSales FROM Sales.SalesOrderHeader WHERE OrderDate > CONVERT(DATETIME, '20001231', 101) GROUP BY SalesPersonID; CREATE VIEW Purchasing.PurchaseOrderReject WITH SCHEMABINDING AS SELECT PurchaseOrderID, ReceivedQty, RejectedQty, RejectedQty / ReceivedQty AS RejectRatio, DueDate FROM Purchasing.PurchaseOrderDetail WHERE RejectedQty / ReceivedQty > 0 AND DueDate > CONVERT(DATETIME,'20010630',101) ; CREATE VIEW dbo.SeattleOnly AS SELECT p.LastName, p.FirstName, e.JobTitle, a.City, sp.StateProvinceCode FROM HumanResources.Employee e INNER JOIN Person.Person p ON p.BusinessEntityID = e.BusinessEntityID INNER JOIN Person.BusinessEntityAddress bea ON bea.BusinessEntityID = e.BusinessEntityID INNER JOIN Person.Address a ON a.AddressID = bea.AddressID INNER JOIN Person.StateProvince sp ON sp.StateProvinceID = a.StateProvinceID WHERE a.City = 'Seattle' WITH CHECK OPTION ; CREATE VIEW dbo.all_supplier_view WITH SCHEMABINDING AS SELECT supplyID, supplier FROM dbo.SUPPLY1 UNION ALL SELECT supplyID, supplier FROM dbo.SUPPLY2 UNION ALL SELECT supplyID, supplier FROM dbo.SUPPLY3 UNION ALL SELECT supplyID, supplier FROM dbo.SUPPLY4; create view vw_view with schemabinding, view_metadata as select A.ID from A sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view.yml000066400000000000000000000420651451700765000240020ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a1c91938da11ffd5936f7cae72bc57c3e7cd19437f6a0335210c8ab2b9dd3d72 file: batch: - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - object_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonPerform - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - select_clause_element: column_reference: naked_identifier: SalesPersonID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: TotalDue end_bracket: ) alias_expression: keyword: AS naked_identifier: TotalSales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader where_clause: keyword: WHERE expression: column_reference: naked_identifier: OrderDate comparison_operator: raw_comparison_operator: '>' function: function_name: keyword: CONVERT bracketed: - start_bracket: ( - data_type: data_type_identifier: DATETIME - comma: ',' - expression: quoted_literal: "'20001231'" - comma: ',' - numeric_literal: '101' - end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: SalesPersonID statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - object_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonPerform - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - select_clause_element: column_reference: naked_identifier: SalesPersonID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: TotalDue end_bracket: ) alias_expression: keyword: AS naked_identifier: TotalSales from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader where_clause: keyword: WHERE expression: column_reference: naked_identifier: OrderDate comparison_operator: raw_comparison_operator: '>' function: function_name: keyword: CONVERT bracketed: - start_bracket: ( - data_type: data_type_identifier: DATETIME - comma: ',' - expression: quoted_literal: "'20001231'" - comma: ',' - numeric_literal: '101' - end_bracket: ) groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: SalesPersonID statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderReject - keyword: WITH - keyword: SCHEMABINDING - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: PurchaseOrderID - comma: ',' - select_clause_element: column_reference: naked_identifier: ReceivedQty - comma: ',' - select_clause_element: column_reference: naked_identifier: RejectedQty - comma: ',' - select_clause_element: expression: - column_reference: naked_identifier: RejectedQty - binary_operator: / - column_reference: naked_identifier: ReceivedQty alias_expression: keyword: AS naked_identifier: RejectRatio - comma: ',' - select_clause_element: column_reference: naked_identifier: DueDate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderDetail where_clause: keyword: WHERE expression: - column_reference: naked_identifier: RejectedQty - binary_operator: / - column_reference: naked_identifier: ReceivedQty - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '0' - binary_operator: AND - column_reference: naked_identifier: DueDate - comparison_operator: raw_comparison_operator: '>' - function: function_name: keyword: CONVERT bracketed: - start_bracket: ( - data_type: data_type_identifier: DATETIME - comma: ',' - expression: quoted_literal: "'20010630'" - comma: ',' - numeric_literal: '101' - end_bracket: ) statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: SeattleOnly - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: - naked_identifier: p - dot: . - naked_identifier: FirstName - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: City - comma: ',' - select_clause_element: column_reference: - naked_identifier: sp - dot: . - naked_identifier: StateProvinceCode from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: naked_identifier: e - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Person alias_expression: naked_identifier: p - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: p - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: BusinessEntityID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: BusinessEntityAddress alias_expression: naked_identifier: bea - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: bea - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: BusinessEntityID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address alias_expression: naked_identifier: a - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: AddressID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: bea - dot: . - naked_identifier: AddressID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: StateProvince alias_expression: naked_identifier: sp - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sp - dot: . - naked_identifier: StateProvinceID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: a - dot: . - naked_identifier: StateProvinceID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: a - dot: . - naked_identifier: City comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Seattle'" - keyword: WITH - keyword: CHECK - keyword: OPTION - statement_terminator: ; - statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: all_supplier_view - keyword: WITH - keyword: SCHEMABINDING - keyword: AS - set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: supplyID - comma: ',' - select_clause_element: column_reference: naked_identifier: supplier from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SUPPLY1 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: supplyID - comma: ',' - select_clause_element: column_reference: naked_identifier: supplier from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SUPPLY2 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: supplyID - comma: ',' - select_clause_element: column_reference: naked_identifier: supplier from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SUPPLY3 - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: supplyID - comma: ',' - select_clause_element: column_reference: naked_identifier: supplier from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SUPPLY4 statement_terminator: ; - statement: create_view_statement: - keyword: create - keyword: view - object_reference: naked_identifier: vw_view - keyword: with - keyword: schemabinding - comma: ',' - keyword: view_metadata - keyword: as - select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: A - dot: . - naked_identifier: ID from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: A sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_columns.sql000066400000000000000000000001331451700765000265610ustar00rootroot00000000000000CREATE VIEW my_view ( col1, col2 ) AS SELECT col1, col2 FROM source_table; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_columns.yml000066400000000000000000000025621451700765000265730ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2db271fceda6ccd8c8d621d9b6d8774a19de4d4c85bd20d81494d816ed3249f4 file: batch: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: my_view - bracketed: - start_bracket: ( - index_column_definition: naked_identifier: col1 - comma: ',' - index_column_definition: naked_identifier: col2 - end_bracket: ) - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_cte.sql000066400000000000000000000007011451700765000256550ustar00rootroot00000000000000CREATE VIEW vwCTE AS --Creates an infinite loop WITH cte (EmployeeID, ManagerID, Title) AS ( SELECT EmployeeID, ManagerID, Title FROM HumanResources.Employee WHERE ManagerID IS NOT NULL UNION ALL SELECT cte.EmployeeID, cte.ManagerID, cte.Title FROM cte JOIN HumanResources.Employee AS e ON cte.ManagerID = e.EmployeeID ) -- Notice the MAXRECURSION option is removed SELECT EmployeeID, ManagerID, Title FROM cte GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_cte.yml000066400000000000000000000125451451700765000256700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 951d51cf8b09c685fcd1dd86f94137588be1f2bda875d0bee4c132f58f7035a6 file: batch: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: vwCTE - keyword: AS - with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: EmployeeID - comma: ',' - naked_identifier: ManagerID - comma: ',' - naked_identifier: Title end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: EmployeeID - comma: ',' - select_clause_element: column_reference: naked_identifier: ManagerID - comma: ',' - select_clause_element: column_reference: naked_identifier: Title from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ManagerID - keyword: IS - keyword: NOT - keyword: 'NULL' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: EmployeeID - comma: ',' - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: ManagerID - comma: ',' - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: Title from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: keyword: AS naked_identifier: e join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: cte - dot: . - naked_identifier: ManagerID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: EmployeeID end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: EmployeeID - comma: ',' - select_clause_element: column_reference: naked_identifier: ManagerID - comma: ',' - select_clause_element: column_reference: naked_identifier: Title from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_pivot.sql000066400000000000000000000006071451700765000262500ustar00rootroot00000000000000CREATE OR ALTER VIEW DEST.V_HOSPITAL_ADMISSIONS_OVERTIME_BYAGEGROUP AS -- Pivot table with one row and five columns SELECT 'AverageCost' AS Cost_Sorted_By_Production_Days, [0], [1], [2], [3], [4] FROM ( SELECT DaysToManufacture, StandardCost FROM Production.Product ) AS SourceTable PIVOT ( AVG(StandardCost) FOR DaysToManufacture IN ([0], [1], [2], [3], [4]) ) AS PivotTable; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_pivot.yml000066400000000000000000000104241451700765000262500ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: df4715f63f5dd0a56000567de395eb865dc0e1eeef7e2570651814e8e78f023c file: batch: statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - object_reference: - naked_identifier: DEST - dot: . - naked_identifier: V_HOSPITAL_ADMISSIONS_OVERTIME_BYAGEGROUP - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: quoted_literal: "'AverageCost'" alias_expression: keyword: AS naked_identifier: Cost_Sorted_By_Production_Days - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[0]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[1]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[2]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[3]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[4]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: DaysToManufacture - comma: ',' - select_clause_element: column_reference: naked_identifier: StandardCost from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: Product end_bracket: ) alias_expression: keyword: AS naked_identifier: SourceTable from_pivot_expression: - keyword: PIVOT - bracketed: - start_bracket: ( - function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: StandardCost end_bracket: ) - keyword: FOR - column_reference: naked_identifier: DaysToManufacture - keyword: IN - bracketed: - start_bracket: ( - pivot_column_reference: quoted_identifier: '[0]' - comma: ',' - pivot_column_reference: quoted_identifier: '[1]' - comma: ',' - pivot_column_reference: quoted_identifier: '[2]' - comma: ',' - pivot_column_reference: quoted_identifier: '[3]' - comma: ',' - pivot_column_reference: quoted_identifier: '[4]' - end_bracket: ) - end_bracket: ) - keyword: AS - table_reference: naked_identifier: PivotTable statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_set_statements.sql000066400000000000000000000015211451700765000301450ustar00rootroot00000000000000SET ANSI_NULLS ON GO SET QUOTED_IDENTIFIER ON GO CREATE VIEW [DEST].[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION] AS SELECT TOP 1 DATE_OF_REPORT FROM BASE_CTE; GO CREATE OR ALTER VIEW [DEST].[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION] AS SELECT TOP 1 DATE_OF_REPORT FROM BASE_CTE ORDER BY DATE_OF_REPORT; GO ALTER VIEW [DEST].[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION] AS SELECT DATE_OF_REPORT ,NEW_DATE_OF_REPORT_UNIX AS NEW_DATE_UNIX ,OLD_DATE_OF_REPORT_UNIX AS OLD_DATE_UNIX ,T1.VRCODE ,CASE WHEN OLD_VALUE IS NULL THEN 0 ELSE OLD_VALUE END AS OLD_VALUE ,CASE WHEN [DIFFERENCE] IS NULL THEN 0 ELSE [DIFFERENCE] END AS [DIFFERENCE] FROM BASE_CTE T1 LEFT JOIN LAST_DATE_OF_REPORT T2 ON T1.[VRCODE] = T2.[VRCODE] WHERE DATE_OF_REPORT = LAST_DATE_OF_REPORT; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_set_statements.yml000066400000000000000000000157761451700765000301700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3c4376ca88135168a4d7c082dc67d118bb7bf79ea8f244951072a2c4c99409e7 file: - batch: statement: set_segment: - keyword: SET - keyword: ANSI_NULLS - keyword: 'ON' - go_statement: keyword: GO - batch: statement: set_segment: - keyword: SET - keyword: QUOTED_IDENTIFIER - keyword: 'ON' - go_statement: keyword: GO - batch: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: - quoted_identifier: '[DEST]' - dot: . - quoted_identifier: '[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION]' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '1' select_clause_element: column_reference: naked_identifier: DATE_OF_REPORT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BASE_CTE statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_view_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: VIEW - object_reference: - quoted_identifier: '[DEST]' - dot: . - quoted_identifier: '[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION]' - keyword: AS - select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '1' select_clause_element: column_reference: naked_identifier: DATE_OF_REPORT from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BASE_CTE orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: DATE_OF_REPORT statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_view_statement: - keyword: ALTER - keyword: VIEW - object_reference: - quoted_identifier: '[DEST]' - dot: . - quoted_identifier: '[V_DIFFERENCE_NURSING_HOME__INFECTED_LOCATIONS_TOTAL_PER_REGION]' - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: DATE_OF_REPORT - comma: ',' - select_clause_element: column_reference: naked_identifier: NEW_DATE_OF_REPORT_UNIX alias_expression: keyword: AS naked_identifier: NEW_DATE_UNIX - comma: ',' - select_clause_element: column_reference: naked_identifier: OLD_DATE_OF_REPORT_UNIX alias_expression: keyword: AS naked_identifier: OLD_DATE_UNIX - comma: ',' - select_clause_element: column_reference: - naked_identifier: T1 - dot: . - naked_identifier: VRCODE - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: naked_identifier: OLD_VALUE - keyword: IS - keyword: 'NULL' - keyword: THEN - expression: numeric_literal: '0' - else_clause: keyword: ELSE expression: column_reference: naked_identifier: OLD_VALUE - keyword: END alias_expression: keyword: AS naked_identifier: OLD_VALUE - comma: ',' - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - column_reference: quoted_identifier: '[DIFFERENCE]' - keyword: IS - keyword: 'NULL' - keyword: THEN - expression: numeric_literal: '0' - else_clause: keyword: ELSE expression: column_reference: quoted_identifier: '[DIFFERENCE]' - keyword: END alias_expression: keyword: AS quoted_identifier: '[DIFFERENCE]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BASE_CTE alias_expression: naked_identifier: T1 join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: LAST_DATE_OF_REPORT alias_expression: naked_identifier: T2 - join_on_condition: keyword: 'ON' expression: - column_reference: naked_identifier: T1 dot: . quoted_identifier: '[VRCODE]' - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: T2 dot: . quoted_identifier: '[VRCODE]' where_clause: keyword: WHERE expression: - column_reference: naked_identifier: DATE_OF_REPORT - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: LAST_DATE_OF_REPORT statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_unpivot.sql000066400000000000000000000003631451700765000266120ustar00rootroot00000000000000CREATE VIEW UnpivotView AS -- Unpivot the table. SELECT VendorID, Employee, Orders FROM (SELECT VendorID, Emp1, Emp2, Emp3, Emp4, Emp5 FROM pvt) p UNPIVOT (Orders FOR Employee IN (Emp1, Emp2, Emp3, Emp4, Emp5) ) AS unpvt; sqlfluff-2.3.5/test/fixtures/dialects/tsql/create_view_with_unpivot.yml000066400000000000000000000076551451700765000266270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ff9accf3326e5c3ff779931121cfa95f74c8d8e9ad0ffd8ecc256fc44f14798 file: batch: statement: create_view_statement: - keyword: CREATE - keyword: VIEW - object_reference: naked_identifier: UnpivotView - keyword: AS - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: VendorID - comma: ',' - select_clause_element: column_reference: naked_identifier: Employee - comma: ',' - select_clause_element: column_reference: naked_identifier: Orders from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: VendorID - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp1 - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp2 - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp3 - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp4 - comma: ',' - select_clause_element: column_reference: naked_identifier: Emp5 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: pvt end_bracket: ) alias_expression: naked_identifier: p from_pivot_expression: - keyword: UNPIVOT - bracketed: - start_bracket: ( - column_reference: naked_identifier: Orders - keyword: FOR - column_reference: naked_identifier: Employee - keyword: IN - bracketed: - start_bracket: ( - pivot_column_reference: naked_identifier: Emp1 - comma: ',' - pivot_column_reference: naked_identifier: Emp2 - comma: ',' - pivot_column_reference: naked_identifier: Emp3 - comma: ',' - pivot_column_reference: naked_identifier: Emp4 - comma: ',' - pivot_column_reference: naked_identifier: Emp5 - end_bracket: ) - end_bracket: ) - keyword: AS - table_reference: naked_identifier: unpvt statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/cte_s.sql000066400000000000000000000004011451700765000225640ustar00rootroot00000000000000WITH Sales_CTE (SalesPersonID, NumberOfOrders) AS ( SELECT SalesPersonID, COUNT(*) FROM Sales.SalesOrderHeader WHERE SalesPersonID IS NOT NULL GROUP BY SalesPersonID ) SELECT AVG(NumberOfOrders) AS "Average Sales Per Person" FROM Sales_CTE; sqlfluff-2.3.5/test/fixtures/dialects/tsql/cte_s.yml000066400000000000000000000057721451700765000226060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00cee9d2c0d7a5946f8c3b726fc6c88810c061a192cdcd90fa1c23f3f64a5f28 file: batch: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: Sales_CTE cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: SalesPersonID - comma: ',' - naked_identifier: NumberOfOrders end_bracket: ) keyword: AS bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: SalesPersonID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader where_clause: keyword: WHERE expression: - column_reference: naked_identifier: SalesPersonID - keyword: IS - keyword: NOT - keyword: 'NULL' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: SalesPersonID end_bracket: ) select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: AVG bracketed: start_bracket: ( expression: column_reference: naked_identifier: NumberOfOrders end_bracket: ) alias_expression: keyword: AS quoted_identifier: '"Average Sales Per Person"' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Sales_CTE statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/cursor.sql000066400000000000000000000004151451700765000230110ustar00rootroot00000000000000DECLARE pointy CURSOR LOCAL FORWARD_ONLY READ_ONLY FOR SELECT column_a, column_b FROM some_table WHERE column_a IS NOT NULL ORDER BY column_b OPEN pointy; FETCH FIRST FROM @pointy into @result; FETCH NEXT FROM GLOBAL pointy; CLOSE GLOBAL pointy; DEALLOCATE pointy; sqlfluff-2.3.5/test/fixtures/dialects/tsql/cursor.yml000066400000000000000000000045371451700765000230240ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9575b8d9a12401ef837abd4a8a7621be306d389f2908bf191fc06a6646ecc962 file: batch: - statement: declare_segment: - keyword: DECLARE - naked_identifier: pointy - keyword: CURSOR - keyword: LOCAL - keyword: FORWARD_ONLY - keyword: READ_ONLY - keyword: FOR - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: column_a - comma: ',' - select_clause_element: column_reference: naked_identifier: column_b from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: some_table where_clause: keyword: WHERE expression: - column_reference: naked_identifier: column_a - keyword: IS - keyword: NOT - keyword: 'NULL' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: column_b - statement: open_cursor_statement: keyword: OPEN naked_identifier: pointy - statement_terminator: ; - statement: fetch_cursor_statement: - keyword: FETCH - keyword: FIRST - keyword: FROM - parameter: '@pointy' - keyword: into - parameter: '@result' - statement_terminator: ; - statement: fetch_cursor_statement: - keyword: FETCH - keyword: NEXT - keyword: FROM - keyword: GLOBAL - naked_identifier: pointy - statement_terminator: ; - statement: close_cursor_statement: - keyword: CLOSE - keyword: GLOBAL - naked_identifier: pointy - statement_terminator: ; - statement: deallocate_cursor_statement: keyword: DEALLOCATE naked_identifier: pointy - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/date_functions.sql000066400000000000000000000006041451700765000245010ustar00rootroot00000000000000select convert( date, dateadd( month, datediff( month, 0, t.valid_from_date ), 0 ) ) as valid_from_date from t as t where t.activity_month >= convert( date, dateadd( yy, datediff(yy, 0, getdate() ) - 1, 0) ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/date_functions.yml000066400000000000000000000104151451700765000245040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 76ffb6c247bc1ab4862e89a0b33e56c584e6a7f02a4c60a987ee3b4c748402b0 file: batch: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: keyword: convert bracketed: start_bracket: ( data_type: data_type_identifier: date comma: ',' expression: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: valid_from_date - end_bracket: ) - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) end_bracket: ) alias_expression: keyword: as naked_identifier: valid_from_date from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t alias_expression: keyword: as naked_identifier: t where_clause: keyword: where expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: activity_month comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' function: function_name: keyword: convert bracketed: start_bracket: ( data_type: data_type_identifier: date comma: ',' expression: function: function_name: function_name_identifier: dateadd bracketed: - start_bracket: ( - date_part: yy - comma: ',' - expression: function: function_name: function_name_identifier: datediff bracketed: - start_bracket: ( - date_part: yy - comma: ',' - expression: numeric_literal: '0' - comma: ',' - expression: function: function_name: function_name_identifier: getdate bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) binary_operator: '-' numeric_literal: '1' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/datepart.sql000066400000000000000000000000771451700765000233040ustar00rootroot00000000000000SELECT DATEPART(DW, my_table.date) AS dayofweek FROM my_table; sqlfluff-2.3.5/test/fixtures/dialects/tsql/datepart.yml000066400000000000000000000024301451700765000233010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3d8b1b8b053f8cc176ef243b44288a0b382680912425f31cb6e90a7832855ee2 file: batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DATEPART bracketed: start_bracket: ( date_part: DW comma: ',' expression: column_reference: - naked_identifier: my_table - dot: . - naked_identifier: date end_bracket: ) alias_expression: keyword: AS naked_identifier: dayofweek from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/declare_table_type.sql000066400000000000000000000002351451700765000253030ustar00rootroot00000000000000DECLARE @MyTableVar TABLE( EmpID INT NOT NULL, OldVacationHours INT, NewVacationHours INT, ModifiedDate DATETIME, PRIMARY KEY (EmpID) ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/declare_table_type.yml000066400000000000000000000031661451700765000253130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 82f903b527bab32939d1faabb97886364d5cc3ad173215ad572f30e922c54460 file: batch: statement: declare_segment: - keyword: DECLARE - parameter: '@MyTableVar' - keyword: TABLE - bracketed: - start_bracket: ( - column_definition: naked_identifier: EmpID data_type: data_type_identifier: INT column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: OldVacationHours data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: NewVacationHours data_type: data_type_identifier: INT - comma: ',' - column_definition: naked_identifier: ModifiedDate data_type: data_type_identifier: DATETIME - comma: ',' - table_constraint: - keyword: PRIMARY - keyword: KEY - bracketed_index_column_list_grammar: bracketed: start_bracket: ( index_column_definition: naked_identifier: EmpID end_bracket: ) - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/declare_with_following_statements.sql000066400000000000000000000006761451700765000304660ustar00rootroot00000000000000CREATE PROC Reporting.DeclareProblem AS BEGIN DECLARE @startdate AS DATE; DECLARE @DateNow DATE = GETDATE(); DECLARE @DateStart DATETIME2 = GETDATE() ,@DateEnd DATETIME2 = GETDATE() DECLARE @EOMONTH DATE = ('1900-01-01') DECLARE @USER DATE = SYSTEM_USER; DECLARE @CURRENTTIME DATE = CURRENT_TIMESTAMP; SET @EOMONTH = ('2000-01-01') SET @EOMONTH = ('2001-01-01'); IF OBJECT_ID('tempdb..#UP') IS NOT NULL DROP TABLE #UP; END sqlfluff-2.3.5/test/fixtures/dialects/tsql/declare_with_following_statements.yml000066400000000000000000000127401451700765000304630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 965bf0479aba3f070f232c24f5cd5972ded3a634a9f5752eead4702eb1548291 file: batch: create_procedure_statement: - keyword: CREATE - keyword: PROC - object_reference: - naked_identifier: Reporting - dot: . - naked_identifier: DeclareProblem - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: declare_segment: - keyword: DECLARE - parameter: '@startdate' - keyword: AS - data_type: data_type_identifier: DATE - statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@DateNow' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@DateStart' - data_type: data_type_identifier: DATETIME2 - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - comma: ',' - parameter: '@DateEnd' - data_type: data_type_identifier: DATETIME2 - comparison_operator: raw_comparison_operator: '=' - expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - statement: declare_segment: keyword: DECLARE parameter: '@EOMONTH' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: quoted_literal: "'1900-01-01'" end_bracket: ) - statement: declare_segment: keyword: DECLARE parameter: '@USER' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: bare_function: SYSTEM_USER statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@CURRENTTIME' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: bare_function: CURRENT_TIMESTAMP statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@EOMONTH' assignment_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: quoted_literal: "'2000-01-01'" end_bracket: ) - statement: set_segment: keyword: SET parameter: '@EOMONTH' assignment_operator: raw_comparison_operator: '=' expression: bracketed: start_bracket: ( expression: quoted_literal: "'2001-01-01'" end_bracket: ) statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: - function: function_name: function_name_identifier: OBJECT_ID bracketed: start_bracket: ( expression: quoted_literal: "'tempdb..#UP'" end_bracket: ) - keyword: IS - keyword: NOT - keyword: 'NULL' statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: hash_identifier: '#UP' - statement_terminator: ; - keyword: END sqlfluff-2.3.5/test/fixtures/dialects/tsql/delete.sql000066400000000000000000000054471451700765000227500ustar00rootroot00000000000000DELETE FROM Sales.SalesPersonQuotaHistory; GO DELETE FROM Production.ProductCostHistory WHERE StandardCost > 1000.00; GO DELETE Production.ProductCostHistory WHERE StandardCost BETWEEN 12.00 AND 14.00 AND EndDate IS NULL; PRINT 'Number of rows deleted is ' + CAST(@@ROWCOUNT as char(3)); GO DECLARE complex_cursor CURSOR FOR SELECT a.BusinessEntityID FROM HumanResources.EmployeePayHistory AS a WHERE RateChangeDate <> (SELECT MAX(RateChangeDate) FROM HumanResources.EmployeePayHistory AS b WHERE a.BusinessEntityID = b.BusinessEntityID) ; OPEN complex_cursor; FETCH FROM complex_cursor; DELETE FROM HumanResources.EmployeePayHistory WHERE CURRENT OF complex_cursor; CLOSE complex_cursor; DEALLOCATE complex_cursor; GO -- SQL-2003 Standard subquery DELETE FROM Sales.SalesPersonQuotaHistory WHERE BusinessEntityID IN (SELECT BusinessEntityID FROM Sales.SalesPerson WHERE SalesYTD > 2500000.00); GO -- Transact-SQL extension DELETE FROM Sales.SalesPersonQuotaHistory FROM Sales.SalesPersonQuotaHistory AS spqh INNER JOIN Sales.SalesPerson AS sp ON spqh.BusinessEntityID = sp.BusinessEntityID WHERE sp.SalesYTD > 2500000.00; GO -- No need to mention target table more than once. DELETE spqh FROM Sales.SalesPersonQuotaHistory AS spqh INNER JOIN Sales.SalesPerson AS sp ON spqh.BusinessEntityID = sp.BusinessEntityID WHERE sp.SalesYTD > 2500000.00; DELETE TOP (20) FROM Purchasing.PurchaseOrderDetail WHERE DueDate < '20020701'; GO DELETE FROM Purchasing.PurchaseOrderDetail WHERE PurchaseOrderDetailID IN (SELECT TOP 10 PurchaseOrderDetailID FROM Purchasing.PurchaseOrderDetail ORDER BY DueDate ASC); GO -- Specify the remote data source using a four-part name -- in the form linked_server.catalog.schema.object. DELETE MyLinkServer.AdventureWorks2012.HumanResources.Department WHERE DepartmentID > 16; GO DELETE OPENQUERY (MyLinkServer, 'SELECT Name, GroupName FROM AdventureWorks2012.HumanResources.Department WHERE DepartmentID = 18'); GO DELETE OPENROWSET('SQLNCLI', 'Server=Seattle1;Trusted_Connection=yes;', Department) GO DELETE FROM OPENDATASOURCE('SQLNCLI', 'Data Source= ; Integrated Security=SSPI') .AdventureWorks2012.HumanResources.Department WHERE DepartmentID = 17; DELETE Sales.ShoppingCartItem OUTPUT DELETED.* WHERE ShoppingCartID = 20621; DECLARE @MyTableVar table ( ProductID int NOT NULL, ProductName nvarchar(50)NOT NULL, ProductModelID int NOT NULL, PhotoID int NOT NULL); DELETE Production.ProductProductPhoto OUTPUT DELETED.ProductID, p.Name, p.ProductModelID, DELETED.ProductPhotoID INTO @MyTableVar FROM Production.ProductProductPhoto AS ph JOIN Production.Product as p ON ph.ProductID = p.ProductID WHERE p.ProductModelID BETWEEN 120 and 130; sqlfluff-2.3.5/test/fixtures/dialects/tsql/delete.yml000066400000000000000000000506271451700765000227520ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 499e9012f5cb95c4d85b318e244cf97db07a0782422e2f77aa327eb635e1e8a9 file: - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductCostHistory - where_clause: keyword: WHERE expression: column_reference: naked_identifier: StandardCost comparison_operator: raw_comparison_operator: '>' numeric_literal: '1000.00' - statement_terminator: ; - go_statement: keyword: GO - batch: - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductCostHistory where_clause: keyword: WHERE expression: - column_reference: naked_identifier: StandardCost - keyword: BETWEEN - numeric_literal: '12.00' - keyword: AND - numeric_literal: '14.00' - binary_operator: AND - column_reference: naked_identifier: EndDate - keyword: IS - keyword: 'NULL' statement_terminator: ; - statement: print_statement: keyword: PRINT expression: quoted_literal: "'Number of rows deleted is '" binary_operator: + function: function_name: keyword: CAST bracketed: start_bracket: ( expression: system_variable: '@@ROWCOUNT' keyword: as data_type: data_type_identifier: char bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: - statement: declare_segment: - keyword: DECLARE - naked_identifier: complex_cursor - keyword: CURSOR - keyword: FOR - select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: a - dot: . - naked_identifier: BusinessEntityID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: EmployeePayHistory alias_expression: keyword: AS naked_identifier: a where_clause: keyword: WHERE expression: column_reference: naked_identifier: RateChangeDate comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: MAX bracketed: start_bracket: ( expression: column_reference: naked_identifier: RateChangeDate end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: EmployeePayHistory alias_expression: keyword: AS naked_identifier: b where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: BusinessEntityID end_bracket: ) statement_terminator: ; - statement: open_cursor_statement: keyword: OPEN naked_identifier: complex_cursor - statement_terminator: ; - statement: fetch_cursor_statement: - keyword: FETCH - keyword: FROM - naked_identifier: complex_cursor - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: EmployeePayHistory - keyword: WHERE - keyword: CURRENT - keyword: OF - naked_identifier: complex_cursor - statement_terminator: ; - statement: close_cursor_statement: keyword: CLOSE naked_identifier: complex_cursor - statement_terminator: ; - statement: deallocate_cursor_statement: keyword: DEALLOCATE naked_identifier: complex_cursor - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory - where_clause: keyword: WHERE expression: column_reference: naked_identifier: BusinessEntityID keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: BusinessEntityID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPerson where_clause: keyword: WHERE expression: column_reference: naked_identifier: SalesYTD comparison_operator: raw_comparison_operator: '>' numeric_literal: '2500000.00' end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory - from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory alias_expression: keyword: AS naked_identifier: spqh join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPerson alias_expression: keyword: AS naked_identifier: sp - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: spqh - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sp - dot: . - naked_identifier: BusinessEntityID - where_clause: keyword: WHERE expression: column_reference: - naked_identifier: sp - dot: . - naked_identifier: SalesYTD comparison_operator: raw_comparison_operator: '>' numeric_literal: '2500000.00' - statement_terminator: ; - go_statement: keyword: GO - batch: - statement: delete_statement: keyword: DELETE table_reference: naked_identifier: spqh from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPersonQuotaHistory alias_expression: keyword: AS naked_identifier: spqh join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesPerson alias_expression: keyword: AS naked_identifier: sp - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: spqh - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: sp - dot: . - naked_identifier: BusinessEntityID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: sp - dot: . - naked_identifier: SalesYTD comparison_operator: raw_comparison_operator: '>' numeric_literal: '2500000.00' statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: TOP - bracketed: start_bracket: ( expression: numeric_literal: '20' end_bracket: ) - keyword: FROM - table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderDetail - where_clause: keyword: WHERE expression: column_reference: naked_identifier: DueDate comparison_operator: raw_comparison_operator: < quoted_literal: "'20020701'" - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderDetail - where_clause: keyword: WHERE expression: column_reference: naked_identifier: PurchaseOrderDetailID keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '10' select_clause_element: column_reference: naked_identifier: PurchaseOrderDetailID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderDetail orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: DueDate - keyword: ASC end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: MyLinkServer - dot: . - naked_identifier: AdventureWorks2012 - dot: . - naked_identifier: HumanResources - dot: . - naked_identifier: Department where_clause: keyword: WHERE expression: column_reference: naked_identifier: DepartmentID comparison_operator: raw_comparison_operator: '>' numeric_literal: '16' statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: - keyword: DELETE - keyword: OPENQUERY - bracketed: start_bracket: ( naked_identifier: MyLinkServer comma: ',' quoted_literal: "'SELECT Name, GroupName\nFROM AdventureWorks2012.HumanResources.Department\n\ WHERE DepartmentID = 18'" end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: delete_statement: keyword: DELETE openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Server=Seattle1;Trusted_Connection=yes;'" - comma: ',' - table_reference: naked_identifier: Department - end_bracket: ) - go_statement: keyword: GO - batch: - statement: delete_statement: - keyword: DELETE - keyword: FROM - keyword: OPENDATASOURCE - bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Data Source= ; Integrated Security=SSPI'" - end_bracket: ) - dot: . - table_reference: - naked_identifier: AdventureWorks2012 - dot: . - naked_identifier: HumanResources - dot: . - naked_identifier: Department - where_clause: keyword: WHERE expression: column_reference: naked_identifier: DepartmentID comparison_operator: raw_comparison_operator: '=' numeric_literal: '17' - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: Sales - dot: . - naked_identifier: ShoppingCartItem output_clause: keyword: OUTPUT wildcard_expression: wildcard_identifier: naked_identifier: DELETED dot: . star: '*' where_clause: keyword: WHERE expression: column_reference: naked_identifier: ShoppingCartID comparison_operator: raw_comparison_operator: '=' numeric_literal: '20621' statement_terminator: ; - statement: declare_segment: - keyword: DECLARE - parameter: '@MyTableVar' - keyword: table - bracketed: - start_bracket: ( - column_definition: naked_identifier: ProductID data_type: data_type_identifier: int column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: ProductName data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: ProductModelID data_type: data_type_identifier: int column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: PhotoID data_type: data_type_identifier: int column_constraint_segment: - keyword: NOT - keyword: 'NULL' - end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductProductPhoto output_clause: - keyword: OUTPUT - column_reference: - naked_identifier: DELETED - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: p - dot: . - naked_identifier: Name - comma: ',' - column_reference: - naked_identifier: p - dot: . - naked_identifier: ProductModelID - comma: ',' - column_reference: - naked_identifier: DELETED - dot: . - naked_identifier: ProductPhotoID - keyword: INTO - table_reference: parameter: '@MyTableVar' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductProductPhoto alias_expression: keyword: AS naked_identifier: ph join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: Product alias_expression: keyword: as naked_identifier: p join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: ph - dot: . - naked_identifier: ProductID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: p - dot: . - naked_identifier: ProductID where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: p - dot: . - naked_identifier: ProductModelID - keyword: BETWEEN - numeric_literal: '120' - keyword: and - numeric_literal: '130' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/delete_azure_synapse_analytics.sql000066400000000000000000000015071451700765000277600ustar00rootroot00000000000000DELETE dbo.Table2 FROM dbo.Table2 INNER JOIN dbo.Table1 ON (dbo.Table2.ColA = dbo.Table1.ColA) WHERE dboTable2.ColA = 1; DELETE FROM dodos WITH(NOLOCK) OUTPUT age INTO ages DELETE FROM Table1; DELETE FROM Table1 WHERE StandardCost > 1000.00; DELETE FROM Table1 OPTION ( LABEL = N'label1' ); DELETE FROM dbo.FactInternetSales WHERE ProductKey IN ( SELECT T1.ProductKey FROM dbo.DimProduct T1 JOIN dbo.DimProductSubcategory T2 ON T1.ProductSubcategoryKey = T2.ProductSubcategoryKey WHERE T2.EnglishProductSubcategoryName = 'Road Bikes' ) OPTION ( LABEL = N'CustomJoin', HASH JOIN ) ; DELETE tableA WHERE EXISTS ( SELECT TOP 1 1 FROM tableB tb WHERE tb.col1 = tableA.col1 ) DELETE dbo.Table2 FROM dbo.Table2 INNER JOIN dbo.Table1 ON (dbo.Table2.ColA = dbo.Table1.ColA) WHERE dboTable2.ColA = 1; sqlfluff-2.3.5/test/fixtures/dialects/tsql/delete_azure_synapse_analytics.yml000066400000000000000000000241341451700765000277630ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 263d90711353d53f5f77479efc5bd525ccbf67b45fc4382aba7ee621eb1d836d file: batch: - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table1 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 - dot: . - naked_identifier: ColA - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table1 - dot: . - naked_identifier: ColA end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: dboTable2 - dot: . - naked_identifier: ColA comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: dodos - post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: NOLOCK end_bracket: ) - output_clause: - keyword: OUTPUT - column_reference: naked_identifier: age - keyword: INTO - table_reference: naked_identifier: ages - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: Table1 - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: Table1 - where_clause: keyword: WHERE expression: column_reference: naked_identifier: StandardCost comparison_operator: raw_comparison_operator: '>' numeric_literal: '1000.00' - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: naked_identifier: Table1 - option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: LABEL comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'label1'" end_bracket: ) - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: FactInternetSales - where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey keyword: IN bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: T1 - dot: . - naked_identifier: ProductKey from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: DimProduct alias_expression: naked_identifier: T1 join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: DimProductSubcategory alias_expression: naked_identifier: T2 join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: T1 - dot: . - naked_identifier: ProductSubcategoryKey - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: T2 - dot: . - naked_identifier: ProductSubcategoryKey where_clause: keyword: WHERE expression: column_reference: - naked_identifier: T2 - dot: . - naked_identifier: EnglishProductSubcategoryName comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Road Bikes'" end_bracket: ) - option_clause: keyword: OPTION bracketed: - start_bracket: ( - query_hint_segment: keyword: LABEL comparison_operator: raw_comparison_operator: '=' quoted_literal: "N'CustomJoin'" - comma: ',' - query_hint_segment: - keyword: HASH - keyword: JOIN - end_bracket: ) - statement_terminator: ; - statement: delete_statement: keyword: DELETE table_reference: naked_identifier: tableA where_clause: keyword: WHERE expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_modifier: keyword: TOP expression: numeric_literal: '1' select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tableB alias_expression: naked_identifier: tb where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: tb - dot: . - naked_identifier: col1 - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: tableA - dot: . - naked_identifier: col1 end_bracket: ) - statement: delete_statement: keyword: DELETE table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table1 - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table2 - dot: . - naked_identifier: ColA - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: dbo - dot: . - naked_identifier: Table1 - dot: . - naked_identifier: ColA end_bracket: ) where_clause: keyword: WHERE expression: column_reference: - naked_identifier: dboTable2 - dot: . - naked_identifier: ColA comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_external_table.sql000066400000000000000000000003711451700765000255120ustar00rootroot00000000000000/* https://learn.microsoft.com/en-us/sql/t-sql/statements/drop-external-table-transact-sql?view=sql-server-ver16#examples */ DROP EXTERNAL TABLE SalesPerson; DROP EXTERNAL TABLE dbo.SalesPerson; DROP EXTERNAL TABLE EasternDivision.dbo.SalesPerson; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_external_table.yml000066400000000000000000000022721451700765000255160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7d546bbd3f07452542aa466df7d6a782e74f556f200487bafe3bf1a8c3af2dc3 file: batch: - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: naked_identifier: SalesPerson - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SalesPerson - statement_terminator: ; - statement: drop_external_table_statement: - keyword: DROP - keyword: EXTERNAL - keyword: TABLE - table_reference: - naked_identifier: EasternDivision - dot: . - naked_identifier: dbo - dot: . - naked_identifier: SalesPerson - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_function.sql000066400000000000000000000001141451700765000243410ustar00rootroot00000000000000DROP FUNCTION Sales.fn_SalesByStore; DROP FUNCTION IF EXISTS sales, sales2; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_function.yml000066400000000000000000000017171451700765000243550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 53f098315280e3585eb8fd5446073bad29baa39a2b9c800df558b87c597cc397 file: batch: - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - function_name: naked_identifier: Sales dot: . function_name_identifier: fn_SalesByStore - statement_terminator: ; - statement: drop_function_statement: - keyword: DROP - keyword: FUNCTION - keyword: IF - keyword: EXISTS - function_name: function_name_identifier: sales - comma: ',' - function_name: function_name_identifier: sales2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_procedure.sql000066400000000000000000000002261451700765000245100ustar00rootroot00000000000000DROP PROCEDURE procedure_name; DROP PROC procedure_name; DROP PROCEDURE IF EXISTS procedure_name; DROP PROCEDURE procedure_name1, procedure_name2; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_procedure.yml000066400000000000000000000024501451700765000245130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 59e007e67128ce2a44db60701bcfb4572bcd6224e924c6eca1c0c5910e1c3092 file: batch: - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - object_reference: naked_identifier: procedure_name - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROC - object_reference: naked_identifier: procedure_name - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - keyword: IF - keyword: EXISTS - object_reference: naked_identifier: procedure_name - statement_terminator: ; - statement: drop_procedure_statement: - keyword: DROP - keyword: PROCEDURE - object_reference: naked_identifier: procedure_name1 - comma: ',' - object_reference: naked_identifier: procedure_name2 - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_table.sql000066400000000000000000000003171451700765000236100ustar00rootroot00000000000000drop table some_table; drop table if exists some_table; drop table some_table restrict; drop table if exists some_table restrict; drop table some_table cascade; drop table if exists some_table cascade; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_table.yml000066400000000000000000000033151451700765000236130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3ff08804605c6d9e21042415241ff0949254b85c9f87268efc3d28094f880ad3 file: batch: - statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: some_table - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: some_table - keyword: restrict - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: some_table - keyword: restrict - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - table_reference: naked_identifier: some_table - keyword: cascade - statement_terminator: ; - statement: drop_table_statement: - keyword: drop - keyword: table - keyword: if - keyword: exists - table_reference: naked_identifier: some_table - keyword: cascade - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_user.sql000066400000000000000000000000651451700765000234770ustar00rootroot00000000000000drop user some_user; drop user if exists some_user; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_user.yml000066400000000000000000000014471451700765000235060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5595c4db5e8e74996759f138fd01eb37611cddb31ababccad98cda3094b05676 file: batch: - statement: drop_user_statement: - keyword: drop - keyword: user - role_reference: naked_identifier: some_user - statement_terminator: ; - statement: drop_user_statement: - keyword: drop - keyword: user - keyword: if - keyword: exists - role_reference: naked_identifier: some_user - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_view.sql000066400000000000000000000003031451700765000234660ustar00rootroot00000000000000drop view some_view; drop view if exists some_view; drop view some_view restrict; drop view if exists some_view restrict; drop view some_view cascade; drop view if exists some_view cascade; sqlfluff-2.3.5/test/fixtures/dialects/tsql/drop_view.yml000066400000000000000000000032731451700765000235010ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f976367f46436f7644ecd237eb486bc3e16deb73403f9ff1886f6e683f190589 file: batch: - statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: some_view - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - keyword: if - keyword: exists - table_reference: naked_identifier: some_view - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: some_view - keyword: restrict - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - keyword: if - keyword: exists - table_reference: naked_identifier: some_view - keyword: restrict - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - table_reference: naked_identifier: some_view - keyword: cascade - statement_terminator: ; - statement: drop_view_statement: - keyword: drop - keyword: view - keyword: if - keyword: exists - table_reference: naked_identifier: some_view - keyword: cascade - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/execute.sql000066400000000000000000000025461451700765000231450ustar00rootroot00000000000000EXEC [Reporting].[Load_CLL] -- Specifying a value only for one parameter (@p2). EXECUTE dbo.ProcTestDefaults @p2 = 'A'; -- Specifying a value for the first two parameters. EXECUTE dbo.ProcTestDefaults 68, 'B'; -- Specifying a value for all three parameters. EXECUTE dbo.ProcTestDefaults 68, 'C', 'House'; -- Using the DEFAULT keyword for the first parameter. EXECUTE dbo.ProcTestDefaults @p1 = DEFAULT, @p2 = 'D'; -- Specifying the parameters in an order different from the order defined in the procedure. EXECUTE dbo.ProcTestDefaults DEFAULT, @p3 = 'Local', @p2 = 'E'; -- Using the DEFAULT keyword for the first and third parameters. EXECUTE dbo.ProcTestDefaults DEFAULT, 'H', DEFAULT; EXECUTE dbo.ProcTestDefaults DEFAULT, 'I', @p3 = DEFAULT; EXECUTE sp_addextendedproperty @name = N'MS_Description', @value = 'my text description', @level0type = N'SCHEMA', @level0name = N'my_schema_name', @level1type = N'my_object_type', @level1name = N'my_object_name' -- Executing a stored procedure and capturing the RETURN value in a variable EXEC @pRes = dbo.ProcTestDefaults; EXEC @pRes = dbo.ProcTestDefaults @p1 = DEFAULT; EXECUTE @pRes = dbo.ProcTestDefaults; EXECUTE @pRes = dbo.ProcTestDefaults @p1 = DEFAULT; -- Executing statement from a variable DECLARE @statement nvarchar(max) = 'SELECT 1' EXEC (@statement); EXEC ('DROP TABLE BoardInventory.BoardInventoryFact_Stage;'); sqlfluff-2.3.5/test/fixtures/dialects/tsql/execute.yml000066400000000000000000000154121451700765000231430ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 84fadab934cf174f454d0fa9b518ddb7164fa80fe55111cd14f33d8c97b93fb7 file: batch: - statement: execute_script_statement: keyword: EXEC object_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[Load_CLL]' - statement: execute_script_statement: keyword: EXECUTE object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults parameter: '@p2' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'A'" statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults numeric_literal: '68' comma: ',' quoted_literal: "'B'" statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - numeric_literal: '68' - comma: ',' - quoted_literal: "'C'" - comma: ',' - quoted_literal: "'House'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - parameter: '@p1' - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - comma: ',' - parameter: '@p2' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'D'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - keyword: DEFAULT - comma: ',' - parameter: '@p3' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Local'" - comma: ',' - parameter: '@p2' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'E'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - keyword: DEFAULT - comma: ',' - quoted_literal: "'H'" - comma: ',' - keyword: DEFAULT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - keyword: DEFAULT - comma: ',' - quoted_literal: "'I'" - comma: ',' - parameter: '@p3' - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: naked_identifier: sp_addextendedproperty - parameter: '@name' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'MS_Description'" - comma: ',' - parameter: '@value' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'my text description'" - comma: ',' - parameter: '@level0type' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'SCHEMA'" - comma: ',' - parameter: '@level0name' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'my_schema_name'" - comma: ',' - parameter: '@level1type' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'my_object_type'" - comma: ',' - parameter: '@level1name' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'my_object_name'" - statement: execute_script_statement: keyword: EXEC parameter: '@pRes' comparison_operator: raw_comparison_operator: '=' object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults statement_terminator: ; - statement: execute_script_statement: - keyword: EXEC - parameter: '@pRes' - comparison_operator: raw_comparison_operator: '=' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - parameter: '@p1' - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE parameter: '@pRes' comparison_operator: raw_comparison_operator: '=' object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - parameter: '@pRes' - comparison_operator: raw_comparison_operator: '=' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: ProcTestDefaults - parameter: '@p1' - comparison_operator: raw_comparison_operator: '=' - keyword: DEFAULT - statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@statement' data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( keyword: max end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'SELECT 1'" - statement: execute_script_statement: keyword: EXEC bracketed: start_bracket: ( object_reference: parameter: '@statement' end_bracket: ) statement_terminator: ; - statement: execute_script_statement: keyword: EXEC bracketed: start_bracket: ( quoted_literal: "'DROP TABLE BoardInventory.BoardInventoryFact_Stage;'" end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/function_default_params.sql000066400000000000000000000001541451700765000263700ustar00rootroot00000000000000create or alter procedure name @param1 nvarchar(10) = 'test', @param2 int = 21 as begin return 1; end sqlfluff-2.3.5/test/fixtures/dialects/tsql/function_default_params.yml000066400000000000000000000030241451700765000263710ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d7b3e7a4024bf5794510cdb9fe85e46231c6e7f4a658d817d6396cd865f1b5b3 file: batch: create_procedure_statement: - keyword: create - keyword: or - keyword: alter - keyword: procedure - object_reference: naked_identifier: name - procedure_parameter_list: - parameter: '@param1' - data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - expression: quoted_literal: "'test'" - comma: ',' - parameter: '@param2' - data_type: data_type_identifier: int - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '21' - keyword: as - procedure_statement: statement: begin_end_block: - keyword: begin - statement: return_segment: keyword: return expression: numeric_literal: '1' statement_terminator: ; - keyword: end sqlfluff-2.3.5/test/fixtures/dialects/tsql/function_no_return.sql000066400000000000000000000004661451700765000254220ustar00rootroot00000000000000CREATE PROCEDURE findjobs @nm sysname = NULL AS IF @nm IS NULL BEGIN PRINT 'You must give a user name' RETURN END ELSE BEGIN SELECT o.name, o.id, o.uid FROM sysobjects o INNER JOIN master..syslogins l ON o.uid = l.sid WHERE l.name = @nm END; sqlfluff-2.3.5/test/fixtures/dialects/tsql/function_no_return.yml000066400000000000000000000105501451700765000254170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d1b3caca4f104b93e196ae36b12749b06d10cc6e41868e5c89903c3c5273bc51 file: batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: naked_identifier: findjobs - procedure_parameter_list: parameter: '@nm' data_type: data_type_identifier: sysname comparison_operator: raw_comparison_operator: '=' expression: null_literal: 'NULL' - keyword: AS - procedure_statement: statement: if_then_statement: - if_clause: keyword: IF expression: - parameter: '@nm' - keyword: IS - keyword: 'NULL' - statement: begin_end_block: - keyword: BEGIN - statement: print_statement: keyword: PRINT expression: quoted_literal: "'You must give a user name'" - statement: return_segment: keyword: RETURN - keyword: END - keyword: ELSE - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: o - dot: . - naked_identifier: name - comma: ',' - select_clause_element: column_reference: - naked_identifier: o - dot: . - naked_identifier: id - comma: ',' - select_clause_element: column_reference: - naked_identifier: o - dot: . - naked_identifier: uid from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sysobjects alias_expression: naked_identifier: o join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: master - dot: . - dot: . - naked_identifier: syslogins alias_expression: naked_identifier: l - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: o - dot: . - naked_identifier: uid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: l - dot: . - naked_identifier: sid where_clause: keyword: WHERE expression: column_reference: - naked_identifier: l - dot: . - naked_identifier: name comparison_operator: raw_comparison_operator: '=' parameter: '@nm' - keyword: END - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/function_with_variable.sql000066400000000000000000000003211451700765000262150ustar00rootroot00000000000000CREATE OR ALTER FUNCTION [dbo].[CONVERT_ISO_WEEK_TO_UNIX] (@year INT, @week INT) RETURNS BIGINT AS BEGIN DECLARE @result BIGINT SET @result=4 RETURN @result + @year + @week END sqlfluff-2.3.5/test/fixtures/dialects/tsql/function_with_variable.yml000066400000000000000000000040041451700765000262210ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a4f778729ae8ba6dcd284c585c2f3167abdc3a32fadc7682c3c620a00161f1db file: batch: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: FUNCTION - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[CONVERT_ISO_WEEK_TO_UNIX]' - function_parameter_list: bracketed: - start_bracket: ( - parameter: '@year' - data_type: data_type_identifier: INT - comma: ',' - parameter: '@week' - data_type: data_type_identifier: INT - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: BIGINT - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: declare_segment: keyword: DECLARE parameter: '@result' data_type: data_type_identifier: BIGINT - statement: set_segment: keyword: SET parameter: '@result' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '4' - statement: return_segment: keyword: RETURN expression: - parameter: '@result' - binary_operator: + - parameter: '@year' - binary_operator: + - parameter: '@week' - keyword: END sqlfluff-2.3.5/test/fixtures/dialects/tsql/functions_a.sql000066400000000000000000000011661451700765000240100ustar00rootroot00000000000000SELECT DATE(t) AS t_date, ROUND(b, 2) AS b_round, LEFT(RIGHT(s, 5), LEN(s + 6)) AS compound, DATEADD(month, -1, column1) AS column1_lastmonth, convert(varchar, tbl_b.column1, 23) AS column1_varchar FROM tbl_b GO CREATE FUNCTION dbo.RandDate ( @admit DATE ) RETURNS TABLE AS RETURN ( SELECT @admit FROM dbo.[RandomDate] ); GO CREATE FUNCTION dbo.no_paramters() RETURNS INT AS BEGIN RETURN 2; END GO /* SQLFluff should parse this FROM as a keyword and not as a function name */ SELECT a.* FROM ( SELECT FIN FROM enc ) AS a LEFT JOIN b ON a.FIN = b.FIN WHERE b.FIN IS NULL ; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/functions_a.yml000066400000000000000000000210611451700765000240060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 35529ba8804ad93991b801f34ea63c8abc1f6ece22ba11cd1d91ce801ef6f64a file: - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: DATE bracketed: start_bracket: ( expression: column_reference: naked_identifier: t end_bracket: ) alias_expression: keyword: AS naked_identifier: t_date - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ROUND bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: b - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) alias_expression: keyword: AS naked_identifier: b_round - comma: ',' - select_clause_element: function: function_name: keyword: LEFT bracketed: - start_bracket: ( - expression: function: function_name: keyword: RIGHT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: s - comma: ',' - expression: numeric_literal: '5' - end_bracket: ) - comma: ',' - expression: function: function_name: function_name_identifier: LEN bracketed: start_bracket: ( expression: column_reference: naked_identifier: s binary_operator: + numeric_literal: '6' end_bracket: ) - end_bracket: ) alias_expression: keyword: AS naked_identifier: compound - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - expression: column_reference: naked_identifier: column1 - end_bracket: ) alias_expression: keyword: AS naked_identifier: column1_lastmonth - comma: ',' - select_clause_element: function: function_name: keyword: convert bracketed: - start_bracket: ( - data_type: data_type_identifier: varchar - comma: ',' - expression: column_reference: - naked_identifier: tbl_b - dot: . - naked_identifier: column1 - comma: ',' - numeric_literal: '23' - end_bracket: ) alias_expression: keyword: AS naked_identifier: column1_varchar from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl_b - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: RandDate - function_parameter_list: bracketed: start_bracket: ( parameter: '@admit' data_type: data_type_identifier: DATE end_bracket: ) - keyword: RETURNS - keyword: TABLE - keyword: AS - procedure_statement: statement: return_segment: keyword: RETURN expression: bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: parameter: '@admit' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: dbo dot: . quoted_identifier: '[RandomDate]' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: no_paramters - function_parameter_list: bracketed: start_bracket: ( end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: INT - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: numeric_literal: '2' statement_terminator: ; - keyword: END - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: a dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: FIN from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: enc end_bracket: ) alias_expression: keyword: AS naked_identifier: a join_clause: - keyword: LEFT - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: b - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: a - dot: . - naked_identifier: FIN - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: b - dot: . - naked_identifier: FIN where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: b - dot: . - naked_identifier: FIN - keyword: IS - keyword: 'NULL' statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/functions_agg.sql000066400000000000000000000006431451700765000243250ustar00rootroot00000000000000SELECT string_agg(t.v, '; ') within group (order by v) as column_name1 ,PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY t.Rate) OVER (PARTITION BY Name) AS MedianCont ,PERCENTILE_DISC(0.5) WITHIN GROUP (ORDER BY t.Rate) OVER (PARTITION BY Name) AS MedianDisc from table1 t group by employee_id HAVING MIN([ArrivalDt]) <= MAX([DischargeDt]) DROP TABLE #Mercury; sqlfluff-2.3.5/test/fixtures/dialects/tsql/functions_agg.yml000066400000000000000000000120771451700765000243330ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cd8f3e10bf068f2f584490bf849aea4d78e236541141370781db6c530c204886 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: keyword: string_agg bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: v - comma: ',' - expression: quoted_literal: "'; '" - end_bracket: ) within_group_clause: - keyword: within - keyword: group - bracketed: start_bracket: ( orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: v end_bracket: ) alias_expression: keyword: as naked_identifier: column_name1 - comma: ',' - select_clause_element: function: function_name: keyword: PERCENTILE_CONT bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: Rate end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: Name end_bracket: ) alias_expression: keyword: AS naked_identifier: MedianCont - comma: ',' - select_clause_element: function: function_name: keyword: PERCENTILE_DISC bracketed: start_bracket: ( expression: numeric_literal: '0.5' end_bracket: ) within_group_clause: - keyword: WITHIN - keyword: GROUP - bracketed: start_bracket: ( orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: Rate end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: naked_identifier: Name end_bracket: ) alias_expression: keyword: AS naked_identifier: MedianDisc from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: t groupby_clause: - keyword: group - keyword: by - column_reference: naked_identifier: employee_id having_clause: keyword: HAVING expression: - function: function_name: function_name_identifier: MIN bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[ArrivalDt]' end_bracket: ) - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - function: function_name: function_name_identifier: MAX bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[DischargeDt]' end_bracket: ) - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: hash_identifier: '#Mercury' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/go_delimiters.sql000066400000000000000000000002701451700765000243210ustar00rootroot00000000000000-- It's possible to have a file starting with GO GO -- It's possible to have multiple GO between batches. SELECT foo FROM bar GO GO SELECT foo FROM bar GO GO GO SELECT foo FROM bar GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/go_delimiters.yml000066400000000000000000000034701451700765000243300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b5da83f2dfc435ece584287628e0fcc92d30a506e7f8ca3b44ac5f8deed4f9ff file: - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - go_statement: keyword: GO - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - go_statement: keyword: GO - go_statement: keyword: GO - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: bar - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/goto_statement.sql000066400000000000000000000000541451700765000245270ustar00rootroot00000000000000GOTO Branch_Three; Branch_Three: RETURN sqlfluff-2.3.5/test/fixtures/dialects/tsql/goto_statement.yml000066400000000000000000000012541451700765000245340ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 96f38c44bd2bbf296264d6719fca15641b7a0951aa17203ad06b7e98f3db981b file: batch: - statement: goto_statement: keyword: GOTO naked_identifier: Branch_Three - statement_terminator: ; - statement: label_segment: naked_identifier: Branch_Three colon: ':' - statement: return_segment: keyword: RETURN sqlfluff-2.3.5/test/fixtures/dialects/tsql/grant_deny_revoke.sql000066400000000000000000000014561451700765000252070ustar00rootroot00000000000000GRANT SELECT ON OBJECT::Person.Address TO RosaQdM; GO USE AdventureWorks2012; GRANT EXECUTE ON OBJECT::HumanResources.uspUpdateEmployeeHireInfo TO Recruiting11; GO GRANT REFERENCES (BusinessEntityID) ON OBJECT::HumanResources.vEmployee TO Wanida WITH GRANT OPTION; GO GRANT SELECT ON Person.Address TO RosaQdM; GO GRANT SELECT ON Person.Address TO [AdventureWorks2012\RosaQdM]; GO CREATE ROLE newrole ; GRANT EXECUTE ON dbo.uspGetBillOfMaterials TO newrole ; GO GRANT SELECT ON SCHEMA :: Sales TO Vendors; GO REVOKE SELECT ON SCHEMA :: Sales TO Vendors; GO DENY SELECT ON OBJECT::Person.Address TO RosaQdM; GO DENY EXECUTE ON OBJECT::HumanResources.uspUpdateEmployeeHireInfo TO Recruiting11; GO DENY REFERENCES (BusinessEntityID) ON OBJECT::HumanResources.vEmployee TO Wanida CASCADE; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/grant_deny_revoke.yml000066400000000000000000000130271451700765000252060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: f49b9b7af9353f7d07866b8964a00f9de08c3000e479e3d4a6349a5badd15bf9 file: - batch: statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: Person - dot: . - naked_identifier: Address - keyword: TO - role_reference: naked_identifier: RosaQdM statement_terminator: ; - go_statement: keyword: GO - batch: - statement: use_statement: keyword: USE database_reference: naked_identifier: AdventureWorks2012 - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: uspUpdateEmployeeHireInfo - keyword: TO - role_reference: naked_identifier: Recruiting11 - statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: GRANT - keyword: REFERENCES - bracketed: start_bracket: ( column_reference: naked_identifier: BusinessEntityID end_bracket: ) - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: vEmployee - keyword: TO - role_reference: naked_identifier: Wanida - keyword: WITH - keyword: GRANT - keyword: OPTION statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - object_reference: - naked_identifier: Person - dot: . - naked_identifier: Address - keyword: TO - role_reference: naked_identifier: RosaQdM statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - object_reference: - naked_identifier: Person - dot: . - naked_identifier: Address - keyword: TO - role_reference: quoted_identifier: '[AdventureWorks2012\RosaQdM]' statement_terminator: ; - go_statement: keyword: GO - batch: - statement: create_role_statement: - keyword: CREATE - keyword: ROLE - role_reference: naked_identifier: newrole - statement_terminator: ; - statement: access_statement: - keyword: GRANT - keyword: EXECUTE - keyword: 'ON' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: uspGetBillOfMaterials - keyword: TO - role_reference: naked_identifier: newrole - statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: GRANT - keyword: SELECT - keyword: 'ON' - keyword: SCHEMA - casting_operator: '::' - object_reference: naked_identifier: Sales - keyword: TO - role_reference: naked_identifier: Vendors statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: REVOKE - keyword: SELECT - keyword: 'ON' - keyword: SCHEMA - casting_operator: '::' - object_reference: naked_identifier: Sales - keyword: TO - role_reference: naked_identifier: Vendors statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: DENY - keyword: SELECT - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: Person - dot: . - naked_identifier: Address - keyword: TO - role_reference: naked_identifier: RosaQdM statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: DENY - keyword: EXECUTE - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: uspUpdateEmployeeHireInfo - keyword: TO - role_reference: naked_identifier: Recruiting11 statement_terminator: ; - go_statement: keyword: GO - batch: statement: access_statement: - keyword: DENY - keyword: REFERENCES - bracketed: start_bracket: ( column_reference: naked_identifier: BusinessEntityID end_bracket: ) - keyword: 'ON' - keyword: OBJECT - casting_operator: '::' - object_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: vEmployee - keyword: TO - role_reference: naked_identifier: Wanida - keyword: CASCADE statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/group_by.sql000066400000000000000000000002671451700765000233270ustar00rootroot00000000000000CREATE TABLE #n WITH (DISTRIBUTION = ROUND_ROBIN) AS ( Select acto.ActionDTS FROM Orders_Action acto ) SELECT t.actiondts FROM #n t GROUP BY t.ActionDTS; DROP TABLE #n; sqlfluff-2.3.5/test/fixtures/dialects/tsql/group_by.yml000066400000000000000000000047211451700765000233300ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: ad0814659a8258c79cbd4ff066216c7f4d839397e4f567760a5bf98387b5c454 file: batch: - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: hash_identifier: '#n' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: ROUND_ROBIN end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: keyword: Select select_clause_element: column_reference: - naked_identifier: acto - dot: . - naked_identifier: ActionDTS from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Orders_Action alias_expression: naked_identifier: acto end_bracket: ) - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: actiondts from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#n' alias_expression: naked_identifier: t groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: ActionDTS statement_terminator: ; - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: hash_identifier: '#n' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/hints.sql000066400000000000000000000052341451700765000226250ustar00rootroot00000000000000SELECT * FROM Sales.Customer AS c INNER JOIN Sales.CustomerAddress AS ca ON c.CustomerID = ca.CustomerID WHERE TerritoryID = 5 OPTION (MERGE JOIN); GO CREATE PROCEDURE dbo.RetrievePersonAddress @city_name NVARCHAR(30), @postal_code NVARCHAR(15) AS SELECT * FROM Person.Address WHERE City = @city_name AND PostalCode = @postal_code OPTION ( OPTIMIZE FOR (@city_name = 'Seattle', @postal_code UNKNOWN) ); GO --Creates an infinite loop WITH cte (CustomerID, PersonID, StoreID) AS ( SELECT CustomerID, PersonID, StoreID FROM Sales.Customer WHERE PersonID IS NOT NULL UNION ALL SELECT cte.CustomerID, cte.PersonID, cte.StoreID FROM cte JOIN Sales.Customer AS e ON cte.PersonID = e.CustomerID ) --Uses MAXRECURSION to limit the recursive levels to 2 SELECT CustomerID, PersonID, StoreID FROM cte OPTION (MAXRECURSION 2); GO SELECT * FROM HumanResources.Employee AS e1 UNION SELECT * FROM HumanResources.Employee AS e2 OPTION (MERGE UNION); GO SELECT ProductID, OrderQty, SUM(LineTotal) AS Total FROM Sales.SalesOrderDetail WHERE UnitPrice < 5 GROUP BY ProductID, OrderQty ORDER BY ProductID, OrderQty OPTION (HASH GROUP, FAST 10); GO SELECT ProductID, OrderQty, SUM(LineTotal) AS Total FROM Sales.SalesOrderDetail WHERE UnitPrice < 5 GROUP BY ProductID, OrderQty ORDER BY ProductID, OrderQty OPTION (MAXDOP 2); GO SELECT * FROM Person.Address WHERE City = 'SEATTLE' AND PostalCode = 98104 OPTION (RECOMPILE, USE HINT ('ASSUME_MIN_SELECTIVITY_FOR_FILTER_ESTIMATES', 'DISABLE_PARAMETER_SNIFFING')); GO SELECT * FROM Person.Address WHERE City = 'SEATTLE' AND PostalCode = 98104 OPTION (QUERYTRACEON 4199); GO SELECT * FROM Person.Address WHERE City = 'SEATTLE' AND PostalCode = 98104 OPTION (QUERYTRACEON 4199, QUERYTRACEON 4137); GO UPDATE Production.Product WITH (TABLOCK) SET ListPrice = ListPrice * 1.10 WHERE ProductNumber LIKE 'BK-%'; GO SELECT * FROM Sales.SalesOrderHeader AS h INNER JOIN Sales.SalesOrderDetail AS d WITH (FORCESEEK) ON h.SalesOrderID = d.SalesOrderID WHERE h.TotalDue > 100 AND (d.OrderQty > 5 OR d.LineTotal < 1000.00); GO SELECT h.SalesOrderID, h.TotalDue, d.OrderQty FROM Sales.SalesOrderHeader AS h INNER JOIN Sales.SalesOrderDetail AS d WITH (FORCESEEK (PK_SalesOrderDetail_SalesOrderID_SalesOrderDetailID (SalesOrderID))) ON h.SalesOrderID = d.SalesOrderID WHERE h.TotalDue > 100 AND (d.OrderQty > 5 OR d.LineTotal < 1000.00); GO SELECT h.SalesOrderID, h.TotalDue, d.OrderQty FROM Sales.SalesOrderHeader AS h INNER JOIN Sales.SalesOrderDetail AS d WITH (FORCESCAN) ON h.SalesOrderID = d.SalesOrderID WHERE h.TotalDue > 100 AND (d.OrderQty > 5 OR d.LineTotal < 1000.00); GO SELECT ID FROM dbo.tableX WITH(NOLOCK) GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/hints.yml000066400000000000000000000757651451700765000226470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 6fe6077b68d1043a06174c757f1566f29133888aab96b254faa753d3b667b133 file: - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer alias_expression: keyword: AS naked_identifier: c join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: CustomerAddress alias_expression: keyword: AS naked_identifier: ca - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: c - dot: . - naked_identifier: CustomerID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: ca - dot: . - naked_identifier: CustomerID where_clause: keyword: WHERE expression: column_reference: naked_identifier: TerritoryID comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: - keyword: MERGE - keyword: JOIN end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: RetrievePersonAddress - procedure_parameter_list: - parameter: '@city_name' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '30' end_bracket: ) - comma: ',' - parameter: '@postal_code' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '15' end_bracket: ) - keyword: AS - procedure_statement: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - parameter: '@city_name' - binary_operator: AND - column_reference: naked_identifier: PostalCode - comparison_operator: raw_comparison_operator: '=' - parameter: '@postal_code' option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: - keyword: OPTIMIZE - keyword: FOR - bracketed: - start_bracket: ( - parameter: '@city_name' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Seattle'" - comma: ',' - parameter: '@postal_code' - keyword: UNKNOWN - end_bracket: ) end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: with_compound_statement: keyword: WITH common_table_expression: naked_identifier: cte cte_column_list: bracketed: start_bracket: ( identifier_list: - naked_identifier: CustomerID - comma: ',' - naked_identifier: PersonID - comma: ',' - naked_identifier: StoreID end_bracket: ) keyword: AS bracketed: start_bracket: ( set_expression: - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: CustomerID - comma: ',' - select_clause_element: column_reference: naked_identifier: PersonID - comma: ',' - select_clause_element: column_reference: naked_identifier: StoreID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer where_clause: keyword: WHERE expression: - column_reference: naked_identifier: PersonID - keyword: IS - keyword: NOT - keyword: 'NULL' - set_operator: - keyword: UNION - keyword: ALL - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: CustomerID - comma: ',' - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: PersonID - comma: ',' - select_clause_element: column_reference: - naked_identifier: cte - dot: . - naked_identifier: StoreID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer alias_expression: keyword: AS naked_identifier: e join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: cte - dot: . - naked_identifier: PersonID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: CustomerID end_bracket: ) select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: CustomerID - comma: ',' - select_clause_element: column_reference: naked_identifier: PersonID - comma: ',' - select_clause_element: column_reference: naked_identifier: StoreID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: cte option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: MAXRECURSION numeric_literal: '2' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: set_expression: - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: keyword: AS naked_identifier: e1 - set_operator: keyword: UNION - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: keyword: AS naked_identifier: e2 - option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: - keyword: MERGE - keyword: UNION end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderQty - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: LineTotal end_bracket: ) alias_expression: keyword: AS naked_identifier: Total from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail where_clause: keyword: WHERE expression: column_reference: naked_identifier: UnitPrice comparison_operator: raw_comparison_operator: < numeric_literal: '5' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID - comma: ',' - column_reference: naked_identifier: OrderQty orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ProductID - comma: ',' - column_reference: naked_identifier: OrderQty option_clause: keyword: OPTION bracketed: - start_bracket: ( - query_hint_segment: - keyword: HASH - keyword: GROUP - comma: ',' - query_hint_segment: keyword: FAST numeric_literal: '10' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderQty - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: LineTotal end_bracket: ) alias_expression: keyword: AS naked_identifier: Total from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail where_clause: keyword: WHERE expression: column_reference: naked_identifier: UnitPrice comparison_operator: raw_comparison_operator: < numeric_literal: '5' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID - comma: ',' - column_reference: naked_identifier: OrderQty orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: ProductID - comma: ',' - column_reference: naked_identifier: OrderQty option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: MAXDOP numeric_literal: '2' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SEATTLE'" - binary_operator: AND - column_reference: naked_identifier: PostalCode - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '98104' option_clause: keyword: OPTION bracketed: - start_bracket: ( - query_hint_segment: keyword: RECOMPILE - comma: ',' - query_hint_segment: - keyword: USE - keyword: HINT - bracketed: - start_bracket: ( - quoted_literal: "'ASSUME_MIN_SELECTIVITY_FOR_FILTER_ESTIMATES'" - comma: ',' - quoted_literal: "'DISABLE_PARAMETER_SNIFFING'" - end_bracket: ) - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SEATTLE'" - binary_operator: AND - column_reference: naked_identifier: PostalCode - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '98104' option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: QUERYTRACEON numeric_literal: '4199' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Address where_clause: keyword: WHERE expression: - column_reference: naked_identifier: City - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'SEATTLE'" - binary_operator: AND - column_reference: naked_identifier: PostalCode - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '98104' option_clause: keyword: OPTION bracketed: - start_bracket: ( - query_hint_segment: keyword: QUERYTRACEON numeric_literal: '4199' - comma: ',' - query_hint_segment: keyword: QUERYTRACEON numeric_literal: '4137' - end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: Production - dot: . - naked_identifier: Product post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: TABLOCK end_bracket: ) set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: ListPrice assignment_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: ListPrice binary_operator: '*' numeric_literal: '1.10' where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductNumber keyword: LIKE quoted_literal: "'BK-%'" statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: keyword: AS naked_identifier: h join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: keyword: AS naked_identifier: d post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: FORCESEEK end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: SalesOrderID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' binary_operator: AND bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: OR - column_reference: - naked_identifier: d - dot: . - naked_identifier: LineTotal - comparison_operator: raw_comparison_operator: < - numeric_literal: '1000.00' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comma: ',' - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue - comma: ',' - select_clause_element: column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: keyword: AS naked_identifier: h join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: keyword: AS naked_identifier: d post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: FORCESEEK bracketed: start_bracket: ( index_reference: naked_identifier: PK_SalesOrderDetail_SalesOrderID_SalesOrderDetailID bracketed: start_bracket: ( naked_identifier: SalesOrderID end_bracket: ) end_bracket: ) end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: SalesOrderID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' binary_operator: AND bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: OR - column_reference: - naked_identifier: d - dot: . - naked_identifier: LineTotal - comparison_operator: raw_comparison_operator: < - numeric_literal: '1000.00' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comma: ',' - select_clause_element: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue - comma: ',' - select_clause_element: column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: keyword: AS naked_identifier: h join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: keyword: AS naked_identifier: d post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: FORCESCAN end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: h - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: d - dot: . - naked_identifier: SalesOrderID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: h - dot: . - naked_identifier: TotalDue comparison_operator: raw_comparison_operator: '>' numeric_literal: '100' binary_operator: AND bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: d - dot: . - naked_identifier: OrderQty - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '5' - binary_operator: OR - column_reference: - naked_identifier: d - dot: . - naked_identifier: LineTotal - comparison_operator: raw_comparison_operator: < - numeric_literal: '1000.00' end_bracket: ) statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: ID from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: tableX post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: NOLOCK end_bracket: ) - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/if_else.sql000066400000000000000000000007621451700765000231070ustar00rootroot00000000000000IF 1 <= (SELECT Weight from DimProduct WHERE ProductKey = 1) SELECT ProductKey, EnglishDescription, Weight, 'This product is too heavy to ship and is only available for pickup.' AS ShippingStatus FROM DimProduct WHERE ProductKey = 1 ELSE SELECT ProductKey, EnglishDescription, Weight, 'This product is available for shipping or pickup.' AS ShippingStatus FROM DimProduct WHERE ProductKey = 1 if exists (select * from #a union all select * from #b) set @var = 1; sqlfluff-2.3.5/test/fixtures/dialects/tsql/if_else.yml000066400000000000000000000135731451700765000231150ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: a434408ffdec3fdb7fdd83fea832c56515bd7d363716de0a1723a87358567d20 file: batch: - statement: if_then_statement: - if_clause: keyword: IF expression: numeric_literal: '1' comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: Weight from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductKey - comma: ',' - select_clause_element: column_reference: naked_identifier: EnglishDescription - comma: ',' - select_clause_element: column_reference: naked_identifier: Weight - comma: ',' - select_clause_element: quoted_literal: "'This product is too heavy to ship and is only available\ \ for pickup.'" alias_expression: keyword: AS naked_identifier: ShippingStatus from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: ELSE - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductKey - comma: ',' - select_clause_element: column_reference: naked_identifier: EnglishDescription - comma: ',' - select_clause_element: column_reference: naked_identifier: Weight - comma: ',' - select_clause_element: quoted_literal: "'This product is available for shipping or pickup.'" alias_expression: keyword: AS naked_identifier: ShippingStatus from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - statement: if_then_statement: if_clause: keyword: if expression: keyword: exists bracketed: start_bracket: ( set_expression: - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#a' - set_operator: - keyword: union - keyword: all - select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#b' end_bracket: ) statement: set_segment: keyword: set parameter: '@var' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/if_else_begin_end.sql000066400000000000000000000007441451700765000251010ustar00rootroot00000000000000IF 1 <= (SELECT Weight from DimProduct WHERE ProductKey = 1) BEGIN SELECT ProductKey, EnglishDescription, Weight, 'This product is too heavy to ship and is only available for pickup.' AS ShippingStatus FROM DimProduct WHERE ProductKey = 1 END ELSE BEGIN SELECT ProductKey, EnglishDescription, Weight, 'This product is available for shipping or pickup.' AS ShippingStatus FROM DimProduct WHERE ProductKey = 1 END sqlfluff-2.3.5/test/fixtures/dialects/tsql/if_else_begin_end.yml000066400000000000000000000112611451700765000250770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 4f41c12e1919d54180dd06f40c06e2fa380cfb1a5d33d48bdc07cadb6c15f085 file: batch: statement: if_then_statement: - if_clause: keyword: IF expression: numeric_literal: '1' comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: Weight from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' end_bracket: ) - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductKey - comma: ',' - select_clause_element: column_reference: naked_identifier: EnglishDescription - comma: ',' - select_clause_element: column_reference: naked_identifier: Weight - comma: ',' - select_clause_element: quoted_literal: "'This product is too heavy to ship and is only\ \ available for pickup.'" alias_expression: keyword: AS naked_identifier: ShippingStatus from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: END - keyword: ELSE - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductKey - comma: ',' - select_clause_element: column_reference: naked_identifier: EnglishDescription - comma: ',' - select_clause_element: column_reference: naked_identifier: Weight - comma: ',' - select_clause_element: quoted_literal: "'This product is available for shipping or pickup.'" alias_expression: keyword: AS naked_identifier: ShippingStatus from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: DimProduct where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductKey comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' - keyword: END sqlfluff-2.3.5/test/fixtures/dialects/tsql/insert_default.sql000066400000000000000000000001231451700765000245000ustar00rootroot00000000000000-- Simple statement for setting default values INSERT INTO mytable DEFAULT VALUES; sqlfluff-2.3.5/test/fixtures/dialects/tsql/insert_default.yml000066400000000000000000000011621451700765000245060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 28093a4e7856024d44d9aab2f51c43db14664b9b0e98cb86ec6d744581cc9c52 file: batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: mytable - keyword: DEFAULT - keyword: VALUES statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/insert_statement.sql000066400000000000000000000025111451700765000250630ustar00rootroot00000000000000INSERT INTO INTER.ECDC_CASES ( [COUNTRY], [COUNTRY_CODE], [CONTINENT], [POPULATION], [INDICATOR], [WEEKLY_COUNT], [YEAR_WEEK], [WEEK_START], [WEEK_END], [RATE_14_DAY], [CUMULATIVE_COUNT], [SOURCE] ) SELECT [COUNTRY], [COUNTRY_CODE], [CONTINENT], CAST([POPULATION] AS BIGINT) AS [POPULATION], [INDICATOR], CAST([WEEKLY_COUNT] AS BIGINT) AS [WEEKLY_COUNT], [YEAR_WEEK], CAST([dbo].[CONVERT_ISO_WEEK_TO_DATETIME](LEFT(YEAR_WEEK,4),RIGHT(YEAR_WEEK,2)) AS DATE) AS [WEEK_START], CAST([dbo].[WEEK_END]([dbo].[CONVERT_ISO_WEEK_TO_DATETIME](LEFT(YEAR_WEEK,4),RIGHT(YEAR_WEEK,2))) AS DATE ) AS [WEEK_END], CAST([RATE_14_DAY] AS FLOAT) AS [RATE_14_DAY], CAST([CUMULATIVE_COUNT] AS BIGINT) AS [CUMULATIVE_COUNT], [SOURCE] FROM STAGE.ECDC_CASES GO BEGIN INSERT INTO HumanResources.NewEmployee SELECT EmpID, LastName, FirstName, Phone, Address, City, StateProvince, PostalCode, CurrentFlag FROM EmployeeTemp; END GO INSERT INTO HumanResources.NewEmployee SELECT EmpID, LastName, FirstName, Phone, Address, City, StateProvince, PostalCode, CurrentFlag FROM EmployeeTemp; GO INSERT INTO HumanResources.NewEmployee WITH(TABLOCK) OUTPUT * INTO Results EXEC FindEmployeesFunc @lastName = 'Picard' GO INSERT HumanResources.NewEmployee (LastName, FirstName) values ('Kirk', 'James') GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/insert_statement.yml000066400000000000000000000361521451700765000250750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: db61aae8688f5327aea6bccd1e5d6bdf1e7ce7d7dd2b857143a23b7b8a43bce9 file: - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: INTER - dot: . - naked_identifier: ECDC_CASES - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[COUNTRY]' - comma: ',' - column_reference: quoted_identifier: '[COUNTRY_CODE]' - comma: ',' - column_reference: quoted_identifier: '[CONTINENT]' - comma: ',' - column_reference: quoted_identifier: '[POPULATION]' - comma: ',' - column_reference: quoted_identifier: '[INDICATOR]' - comma: ',' - column_reference: quoted_identifier: '[WEEKLY_COUNT]' - comma: ',' - column_reference: quoted_identifier: '[YEAR_WEEK]' - comma: ',' - column_reference: quoted_identifier: '[WEEK_START]' - comma: ',' - column_reference: quoted_identifier: '[WEEK_END]' - comma: ',' - column_reference: quoted_identifier: '[RATE_14_DAY]' - comma: ',' - column_reference: quoted_identifier: '[CUMULATIVE_COUNT]' - comma: ',' - column_reference: quoted_identifier: '[SOURCE]' - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '[COUNTRY]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[COUNTRY_CODE]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[CONTINENT]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[POPULATION]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[POPULATION]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[INDICATOR]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[WEEKLY_COUNT]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[WEEKLY_COUNT]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[YEAR_WEEK]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[CONVERT_ISO_WEEK_TO_DATETIME]' bracketed: - start_bracket: ( - expression: function: function_name: keyword: LEFT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: RIGHT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) keyword: AS data_type: data_type_identifier: DATE end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[WEEK_START]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[WEEK_END]' bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[CONVERT_ISO_WEEK_TO_DATETIME]' bracketed: - start_bracket: ( - expression: function: function_name: keyword: LEFT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: RIGHT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) end_bracket: ) keyword: AS data_type: data_type_identifier: DATE end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[WEEK_END]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[RATE_14_DAY]' keyword: AS data_type: data_type_identifier: FLOAT end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[RATE_14_DAY]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[CUMULATIVE_COUNT]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[CUMULATIVE_COUNT]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[SOURCE]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: ECDC_CASES - go_statement: keyword: GO - batch: statement: begin_end_block: - keyword: BEGIN - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: NewEmployee - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: EmpID - comma: ',' - select_clause_element: column_reference: naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: naked_identifier: FirstName - comma: ',' - select_clause_element: column_reference: naked_identifier: Phone - comma: ',' - select_clause_element: column_reference: naked_identifier: Address - comma: ',' - select_clause_element: column_reference: naked_identifier: City - comma: ',' - select_clause_element: column_reference: naked_identifier: StateProvince - comma: ',' - select_clause_element: column_reference: naked_identifier: PostalCode - comma: ',' - select_clause_element: column_reference: naked_identifier: CurrentFlag from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EmployeeTemp statement_terminator: ; - keyword: END - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: NewEmployee - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: EmpID - comma: ',' - select_clause_element: column_reference: naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: naked_identifier: FirstName - comma: ',' - select_clause_element: column_reference: naked_identifier: Phone - comma: ',' - select_clause_element: column_reference: naked_identifier: Address - comma: ',' - select_clause_element: column_reference: naked_identifier: City - comma: ',' - select_clause_element: column_reference: naked_identifier: StateProvince - comma: ',' - select_clause_element: column_reference: naked_identifier: PostalCode - comma: ',' - select_clause_element: column_reference: naked_identifier: CurrentFlag from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: EmployeeTemp statement_terminator: ; - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: NewEmployee - post_table_expression: keyword: WITH bracketed: start_bracket: ( query_hint_segment: keyword: TABLOCK end_bracket: ) - output_clause: - keyword: OUTPUT - wildcard_expression: wildcard_identifier: star: '*' - keyword: INTO - table_reference: naked_identifier: Results - execute_script_statement: keyword: EXEC object_reference: naked_identifier: FindEmployeesFunc parameter: '@lastName' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Picard'" - go_statement: keyword: GO - batch: statement: insert_statement: keyword: INSERT table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: NewEmployee bracketed: - start_bracket: ( - column_reference: naked_identifier: LastName - comma: ',' - column_reference: naked_identifier: FirstName - end_bracket: ) values_clause: keyword: values bracketed: - start_bracket: ( - quoted_literal: "'Kirk'" - comma: ',' - quoted_literal: "'James'" - end_bracket: ) - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/insert_with_identity_insert.sql000066400000000000000000000001741451700765000273320ustar00rootroot00000000000000SET IDENTITY_INSERT someTable ON; INSERT INTO someTable (ID, Value) VALUES (1, 2); SET IDENTITY_INSERT someTable OFF; sqlfluff-2.3.5/test/fixtures/dialects/tsql/insert_with_identity_insert.yml000066400000000000000000000026161451700765000273370ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 252552cfe5864de52f3e01b4db756fa4e21bdf58b0f2d0468bdfffa4c65ea343 file: batch: - statement: set_segment: - keyword: SET - keyword: IDENTITY_INSERT - table_reference: naked_identifier: someTable - keyword: 'ON' - statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: someTable - bracketed: - start_bracket: ( - column_reference: naked_identifier: ID - comma: ',' - column_reference: naked_identifier: Value - end_bracket: ) - values_clause: keyword: VALUES bracketed: - start_bracket: ( - numeric_literal: '1' - comma: ',' - numeric_literal: '2' - end_bracket: ) - statement_terminator: ; - statement: set_segment: - keyword: SET - keyword: IDENTITY_INSERT - table_reference: naked_identifier: someTable - keyword: 'OFF' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/join_hints.sql000066400000000000000000000004711451700765000236420ustar00rootroot00000000000000-- HASH JOIN SELECT table1.col FROM table1 INNER HASH JOIN table2 ON table1.col = table2.col; -- OUTER MERGE JOIN SELECT table1.col FROM table1 FULL OUTER MERGE JOIN table2 ON table1.col = table2.col; -- LEFT LOOP JOIN SELECT table1.col FROM table1 LEFT LOOP JOIN table2 ON table1.col = table2.col; sqlfluff-2.3.5/test/fixtures/dialects/tsql/join_hints.yml000066400000000000000000000100671451700765000236460ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 42b1d9e81e443eb8a7372ec44596ca40406cfc582fb11a9e1bb04dafac2a41fd file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: INNER - keyword: HASH - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: FULL - keyword: OUTER - keyword: MERGE - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 join_clause: - keyword: LEFT - keyword: LOOP - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table2 - dot: . - naked_identifier: col statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/merge.sql000066400000000000000000000116241451700765000225770ustar00rootroot00000000000000merge schema1.table1 dst using schema1.table1 src on src.rn = 1 and dst.e_date_to is null and dst.cc_id = src.cc_id when matched then update set dst.l_id = src.l_id, dst.e_date_to = src.e_date_from go with source_data as ( select cc_id , cc_name , cc_description from DW.sch1.tbl1 where e_date_to is null and l_id >= dd and l_id <= dd ) merge DM.sch1.tbl2 dst using source_data src on src.cc_id = dst.cc_id when matched then update set dst.cc_name = src.cc_name , dst.cc_description = src.cc_description when not matched then insert ( cc_id , cc_name , cc_description ) values ( cc_id , cc_name , cc_description ); go merge DW.tt.dd dst using LA.tt.dd src on dst.s_id = src.s_id and dst.c_id = src.c_id when matched then update set dst.c_name = src.c_name , dst.col1 = src.col1 , dst.col2 = src.col2 when not matched by target and src.c_id is not null then insert ( s_id , c_id , c_name , col1 , col2 ) values ( src.s_id , src.c_id , src.c_name , src.col1 , src.col2 ) when not matched by source and s_id =1 in ( select s_id from LA.g.tbl3) then update set dst.col1 = 'N' , dst.col2 = col2 ; go MERGE Production.UnitMeasure AS tgt USING (SELECT @UnitMeasureCode, @Name) as src (UnitMeasureCode, Name) ON (tgt.UnitMeasureCode = src.UnitMeasureCode) WHEN MATCHED THEN UPDATE SET Name = src.Name WHEN NOT MATCHED THEN INSERT (UnitMeasureCode, Name) VALUES (src.UnitMeasureCode, src.Name) OUTPUT deleted.*, $action, inserted.* INTO #MyTempTable; GO MERGE Production.ProductInventory AS tgt USING (SELECT ProductID, SUM(OrderQty) FROM Sales.SalesOrderDetail AS sod JOIN Sales.SalesOrderHeader AS soh ON sod.SalesOrderID = soh.SalesOrderID AND soh.OrderDate = @OrderDate GROUP BY ProductID) as src (ProductID, OrderQty) ON (tgt.ProductID = src.ProductID) WHEN MATCHED AND tgt.Quantity - src.OrderQty <= 0 THEN DELETE WHEN MATCHED THEN UPDATE SET tgt.Quantity = tgt.Quantity - src.OrderQty, tgt.ModifiedDate = GETDATE() OUTPUT $action, Inserted.ProductID, Inserted.Quantity, Inserted.ModifiedDate, Deleted.ProductID, Deleted.Quantity, Deleted.ModifiedDate; GO MERGE Production.ProductInventory AS pi USING (SELECT ProductID, SUM(OrderQty) FROM Sales.SalesOrderDetail AS sod JOIN Sales.SalesOrderHeader AS soh ON sod.SalesOrderID = soh.SalesOrderID AND soh.OrderDate BETWEEN '20030701' AND '20030731' GROUP BY ProductID) AS src (ProductID, OrderQty) ON pi.ProductID = src.ProductID WHEN MATCHED AND pi.Quantity - src.OrderQty >= 0 THEN UPDATE SET pi.Quantity = pi.Quantity - src.OrderQty WHEN MATCHED AND pi.Quantity - src.OrderQty <= 0 THEN DELETE OUTPUT $action, Inserted.ProductID, Inserted.LocationID, Inserted.Quantity AS NewQty, Deleted.Quantity AS PreviousQty; GO insert into sch1.table1 ( columnC ) select upd.columnC from ( merge sch1.table1 trg using ( select gr.columnC from sch2.table2 as gr ) src on trg.columnC = src.columnC when matched then update set columnC = src.columnC output inserted.columnC ) as upd ; GO MERGE Production.UnitMeasure WITH (PAGLOCK) AS tgt USING (SELECT @UnitMeasureCode, @Name) as src (UnitMeasureCode, Name) ON (tgt.UnitMeasureCode = src.UnitMeasureCode) WHEN MATCHED THEN UPDATE SET Name = src.Name WHEN NOT MATCHED THEN INSERT (UnitMeasureCode, Name) VALUES (src.UnitMeasureCode, src.Name) OUTPUT deleted.*, $action, inserted.* INTO #MyTempTable; GO MERGE INTO Production.ProductInventory WITH (ROWLOCK, INDEX(myindex, myindex2)) AS pi USING (SELECT ProductID, SUM(OrderQty) FROM Sales.SalesOrderDetail AS sod JOIN Sales.SalesOrderHeader AS soh ON sod.SalesOrderID = soh.SalesOrderID AND soh.OrderDate BETWEEN '20030701' AND '20030731' GROUP BY ProductID) AS src (ProductID, OrderQty) ON pi.ProductID = src.ProductID WHEN MATCHED AND pi.Quantity - src.OrderQty >= 0 THEN UPDATE SET pi.Quantity = pi.Quantity - src.OrderQty WHEN MATCHED AND pi.Quantity - src.OrderQty <= 0 THEN DELETE OUTPUT $action, Inserted.ProductID, Inserted.LocationID, Inserted.Quantity AS NewQty, Deleted.Quantity AS PreviousQty; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/merge.yml000066400000000000000000001370361451700765000226070ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: eeed743b0f3ee6e127b892a23031b17bc824eeb3d889397a2755b167a44cea34 file: - batch: statement: merge_statement: - keyword: merge - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: table1 - alias_expression: naked_identifier: dst - keyword: using - table_reference: - naked_identifier: schema1 - dot: . - naked_identifier: table1 - alias_expression: naked_identifier: src - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: src - dot: . - naked_identifier: rn - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: and - column_reference: - naked_identifier: dst - dot: . - naked_identifier: e_date_to - keyword: is - keyword: 'null' - binary_operator: and - column_reference: - naked_identifier: dst - dot: . - naked_identifier: cc_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: cc_id - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: - keyword: set - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: l_id assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: l_id - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: e_date_to assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: e_date_from - go_statement: keyword: go - batch: statement: with_compound_statement: keyword: with common_table_expression: naked_identifier: source_data keyword: as bracketed: start_bracket: ( select_statement: select_clause: - keyword: select - select_clause_element: column_reference: naked_identifier: cc_id - comma: ',' - select_clause_element: column_reference: naked_identifier: cc_name - comma: ',' - select_clause_element: column_reference: naked_identifier: cc_description from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: DW - dot: . - naked_identifier: sch1 - dot: . - naked_identifier: tbl1 where_clause: keyword: where expression: - column_reference: naked_identifier: e_date_to - keyword: is - keyword: 'null' - binary_operator: and - column_reference: naked_identifier: l_id - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - column_reference: naked_identifier: dd - binary_operator: and - column_reference: naked_identifier: l_id - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - column_reference: naked_identifier: dd end_bracket: ) merge_statement: - keyword: merge - table_reference: - naked_identifier: DM - dot: . - naked_identifier: sch1 - dot: . - naked_identifier: tbl2 - alias_expression: naked_identifier: dst - keyword: using - table_reference: naked_identifier: source_data - alias_expression: naked_identifier: src - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: src - dot: . - naked_identifier: cc_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: dst - dot: . - naked_identifier: cc_id - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: - keyword: set - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: cc_name assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: cc_name - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: cc_description assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: cc_description merge_when_not_matched_clause: - keyword: when - keyword: not - keyword: matched - keyword: then - merge_insert_clause: - keyword: insert - bracketed: - start_bracket: ( - column_reference: naked_identifier: cc_id - comma: ',' - column_reference: naked_identifier: cc_name - comma: ',' - column_reference: naked_identifier: cc_description - end_bracket: ) - keyword: values - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: cc_id - comma: ',' - expression: column_reference: naked_identifier: cc_name - comma: ',' - expression: column_reference: naked_identifier: cc_description - end_bracket: ) statement_terminator: ; - go_statement: keyword: go - batch: statement: merge_statement: - keyword: merge - table_reference: - naked_identifier: DW - dot: . - naked_identifier: tt - dot: . - naked_identifier: dd - alias_expression: naked_identifier: dst - keyword: using - table_reference: - naked_identifier: LA - dot: . - naked_identifier: tt - dot: . - naked_identifier: dd - alias_expression: naked_identifier: src - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: dst - dot: . - naked_identifier: s_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: s_id - binary_operator: and - column_reference: - naked_identifier: dst - dot: . - naked_identifier: c_id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: c_id - merge_match: - merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: - keyword: set - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: c_name assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: c_name - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: col1 assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: col1 - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: col2 assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: col2 - merge_when_not_matched_clause: - keyword: when - keyword: not - keyword: matched - keyword: by - keyword: target - keyword: and - expression: - column_reference: - naked_identifier: src - dot: . - naked_identifier: c_id - keyword: is - keyword: not - keyword: 'null' - keyword: then - merge_insert_clause: - keyword: insert - bracketed: - start_bracket: ( - column_reference: naked_identifier: s_id - comma: ',' - column_reference: naked_identifier: c_id - comma: ',' - column_reference: naked_identifier: c_name - comma: ',' - column_reference: naked_identifier: col1 - comma: ',' - column_reference: naked_identifier: col2 - end_bracket: ) - keyword: values - bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: s_id - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: c_id - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: c_name - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: col1 - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: col2 - end_bracket: ) - merge_when_not_matched_clause: - keyword: when - keyword: not - keyword: matched - keyword: by - keyword: source - keyword: and - expression: column_reference: naked_identifier: s_id comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' keyword: in bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: s_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: LA - dot: . - naked_identifier: g - dot: . - naked_identifier: tbl3 end_bracket: ) - keyword: then - merge_update_clause: keyword: update set_clause_list: - keyword: set - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: col1 assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "'N'" - comma: ',' - set_clause: column_reference: - naked_identifier: dst - dot: . - naked_identifier: col2 assignment_operator: raw_comparison_operator: '=' expression: column_reference: naked_identifier: col2 statement_terminator: ; - go_statement: keyword: go - batch: statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: UnitMeasure - alias_expression: keyword: AS naked_identifier: tgt - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: parameter: '@UnitMeasureCode' - comma: ',' - select_clause_element: parameter: '@Name' end_bracket: ) - alias_expression: keyword: as naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: UnitMeasureCode - comma: ',' - naked_identifier: Name end_bracket: ) - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: UnitMeasureCode - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: UnitMeasureCode end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: Name assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: Name merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: - keyword: INSERT - bracketed: - start_bracket: ( - column_reference: naked_identifier: UnitMeasureCode - comma: ',' - column_reference: naked_identifier: Name - end_bracket: ) - keyword: VALUES - bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: UnitMeasureCode - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: Name - end_bracket: ) output_clause: - keyword: OUTPUT - wildcard_expression: wildcard_identifier: naked_identifier: deleted dot: . star: '*' - comma: ',' - column_reference: variable_identifier: $action - comma: ',' - wildcard_expression: wildcard_identifier: naked_identifier: inserted dot: . star: '*' - keyword: INTO - table_reference: hash_identifier: '#MyTempTable' statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductInventory - alias_expression: keyword: AS naked_identifier: tgt - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: keyword: AS naked_identifier: sod join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: keyword: AS naked_identifier: soh join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sod - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: soh - dot: . - naked_identifier: SalesOrderID - binary_operator: AND - column_reference: - naked_identifier: soh - dot: . - naked_identifier: OrderDate - comparison_operator: raw_comparison_operator: '=' - parameter: '@OrderDate' groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID end_bracket: ) - alias_expression: keyword: as naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: ProductID - comma: ',' - naked_identifier: OrderQty end_bracket: ) - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: ProductID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: ProductID end_bracket: ) - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_delete_clause: keyword: DELETE - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: - keyword: SET - set_clause: column_reference: - naked_identifier: tgt - dot: . - naked_identifier: Quantity assignment_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comma: ',' - set_clause: column_reference: - naked_identifier: tgt - dot: . - naked_identifier: ModifiedDate assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - output_clause: - keyword: OUTPUT - column_reference: variable_identifier: $action - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: Quantity - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: ModifiedDate - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: Quantity - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: ModifiedDate statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductInventory - alias_expression: keyword: AS naked_identifier: pi - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: keyword: AS naked_identifier: sod join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: keyword: AS naked_identifier: soh join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sod - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: soh - dot: . - naked_identifier: SalesOrderID - binary_operator: AND - column_reference: - naked_identifier: soh - dot: . - naked_identifier: OrderDate - keyword: BETWEEN - quoted_literal: "'20030701'" - keyword: AND - quoted_literal: "'20030731'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID end_bracket: ) - alias_expression: keyword: AS naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: ProductID - comma: ',' - naked_identifier: OrderQty end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: ProductID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: ProductID - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity assignment_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_delete_clause: keyword: DELETE - output_clause: - keyword: OUTPUT - column_reference: variable_identifier: $action - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: LocationID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: Quantity - alias_expression: keyword: AS naked_identifier: NewQty - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: Quantity - alias_expression: keyword: AS naked_identifier: PreviousQty statement_terminator: ; - go_statement: keyword: GO - batch: statement: insert_statement: - keyword: insert - keyword: into - table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: table1 - bracketed: start_bracket: ( column_reference: naked_identifier: columnC end_bracket: ) - select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: upd - dot: . - naked_identifier: columnC from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( merge_statement: - keyword: merge - table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: table1 - alias_expression: naked_identifier: trg - keyword: using - bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: column_reference: - naked_identifier: gr - dot: . - naked_identifier: columnC from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch2 - dot: . - naked_identifier: table2 alias_expression: keyword: as naked_identifier: gr end_bracket: ) - alias_expression: naked_identifier: src - join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: trg - dot: . - naked_identifier: columnC - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: columnC - merge_match: merge_when_matched_clause: - keyword: when - keyword: matched - keyword: then - merge_update_clause: keyword: update set_clause_list: keyword: set set_clause: column_reference: naked_identifier: columnC assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: columnC output_clause: keyword: output column_reference: - naked_identifier: inserted - dot: . - naked_identifier: columnC end_bracket: ) alias_expression: keyword: as naked_identifier: upd statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - table_reference: - naked_identifier: Production - dot: . - naked_identifier: UnitMeasure - keyword: WITH - bracketed: start_bracket: ( query_hint_segment: keyword: PAGLOCK end_bracket: ) - alias_expression: keyword: AS naked_identifier: tgt - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: parameter: '@UnitMeasureCode' - comma: ',' - select_clause_element: parameter: '@Name' end_bracket: ) - alias_expression: keyword: as naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: UnitMeasureCode - comma: ',' - naked_identifier: Name end_bracket: ) - join_on_condition: keyword: 'ON' bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: tgt - dot: . - naked_identifier: UnitMeasureCode - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: UnitMeasureCode end_bracket: ) - merge_match: merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: Name assignment_operator: raw_comparison_operator: '=' expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: Name merge_when_not_matched_clause: - keyword: WHEN - keyword: NOT - keyword: MATCHED - keyword: THEN - merge_insert_clause: - keyword: INSERT - bracketed: - start_bracket: ( - column_reference: naked_identifier: UnitMeasureCode - comma: ',' - column_reference: naked_identifier: Name - end_bracket: ) - keyword: VALUES - bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: UnitMeasureCode - comma: ',' - expression: column_reference: - naked_identifier: src - dot: . - naked_identifier: Name - end_bracket: ) output_clause: - keyword: OUTPUT - wildcard_expression: wildcard_identifier: naked_identifier: deleted dot: . star: '*' - comma: ',' - column_reference: variable_identifier: $action - comma: ',' - wildcard_expression: wildcard_identifier: naked_identifier: inserted dot: . star: '*' - keyword: INTO - table_reference: hash_identifier: '#MyTempTable' statement_terminator: ; - go_statement: keyword: GO - batch: statement: merge_statement: - keyword: MERGE - keyword: INTO - table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductInventory - keyword: WITH - bracketed: - start_bracket: ( - query_hint_segment: keyword: ROWLOCK - comma: ',' - query_hint_segment: keyword: INDEX bracketed: - start_bracket: ( - index_reference: naked_identifier: myindex - comma: ',' - index_reference: naked_identifier: myindex2 - end_bracket: ) - end_bracket: ) - alias_expression: keyword: AS naked_identifier: pi - keyword: USING - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductID - comma: ',' - select_clause_element: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: naked_identifier: OrderQty end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderDetail alias_expression: keyword: AS naked_identifier: sod join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader alias_expression: keyword: AS naked_identifier: soh join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: sod - dot: . - naked_identifier: SalesOrderID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: soh - dot: . - naked_identifier: SalesOrderID - binary_operator: AND - column_reference: - naked_identifier: soh - dot: . - naked_identifier: OrderDate - keyword: BETWEEN - quoted_literal: "'20030701'" - keyword: AND - quoted_literal: "'20030731'" groupby_clause: - keyword: GROUP - keyword: BY - column_reference: naked_identifier: ProductID end_bracket: ) - alias_expression: keyword: AS naked_identifier: src bracketed: start_bracket: ( identifier_list: - naked_identifier: ProductID - comma: ',' - naked_identifier: OrderQty end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: ProductID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: ProductID - merge_match: - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_update_clause: keyword: UPDATE set_clause_list: keyword: SET set_clause: column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity assignment_operator: raw_comparison_operator: '=' expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - merge_when_matched_clause: - keyword: WHEN - keyword: MATCHED - keyword: AND - expression: - column_reference: - naked_identifier: pi - dot: . - naked_identifier: Quantity - binary_operator: '-' - column_reference: - naked_identifier: src - dot: . - naked_identifier: OrderQty - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '0' - keyword: THEN - merge_delete_clause: keyword: DELETE - output_clause: - keyword: OUTPUT - column_reference: variable_identifier: $action - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: ProductID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: LocationID - comma: ',' - column_reference: - naked_identifier: Inserted - dot: . - naked_identifier: Quantity - alias_expression: keyword: AS naked_identifier: NewQty - comma: ',' - column_reference: - naked_identifier: Deleted - dot: . - naked_identifier: Quantity - alias_expression: keyword: AS naked_identifier: PreviousQty statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/minimal_function.sql000066400000000000000000000001631451700765000250270ustar00rootroot00000000000000CREATE OR ALTER FUNCTION [dbo].[add] (@add_1 int, @add_2 int) RETURNS integer AS BEGIN RETURN @add_1 + @add_2 END sqlfluff-2.3.5/test/fixtures/dialects/tsql/minimal_function.yml000066400000000000000000000026671451700765000250440ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: c1ee4c4a5132279380bf93678e9edc3063d5bd6e6c5b1e84652c8fd36d2301c9 file: batch: statement: create_function_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: FUNCTION - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[add]' - function_parameter_list: bracketed: - start_bracket: ( - parameter: '@add_1' - data_type: data_type_identifier: int - comma: ',' - parameter: '@add_2' - data_type: data_type_identifier: int - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: - parameter: '@add_1' - binary_operator: + - parameter: '@add_2' - keyword: END sqlfluff-2.3.5/test/fixtures/dialects/tsql/minimal_function_no_alter.sql000066400000000000000000000004511451700765000267120ustar00rootroot00000000000000-- including just in case; Azure Synapse Analytics does not support OR ALTER -- https://docs.microsoft.com/en-us/sql/t-sql/statements/create-function-sql-data-warehouse?view=aps-pdw-2016-au7 CREATE FUNCTION [dbo].[add] (@add_1 int, @add_2 int) RETURNS integer AS BEGIN RETURN @add_1 + @add_2 END sqlfluff-2.3.5/test/fixtures/dialects/tsql/minimal_function_no_alter.yml000066400000000000000000000026141451700765000267170ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7ef5fdb06c6bf32c08a8b240339f63760eab274afdfd3921f0881cb69c18ff13 file: batch: statement: create_function_statement: - keyword: CREATE - keyword: FUNCTION - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[add]' - function_parameter_list: bracketed: - start_bracket: ( - parameter: '@add_1' - data_type: data_type_identifier: int - comma: ',' - parameter: '@add_2' - data_type: data_type_identifier: int - end_bracket: ) - keyword: RETURNS - data_type: data_type_identifier: integer - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: return_segment: keyword: RETURN expression: - parameter: '@add_1' - binary_operator: + - parameter: '@add_2' - keyword: END sqlfluff-2.3.5/test/fixtures/dialects/tsql/multi_statement_without_semicolon.sql000066400000000000000000000000541451700765000305440ustar00rootroot00000000000000select a from tbl1 GO select b from tbl2 GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/multi_statement_without_semicolon.yml000066400000000000000000000023741451700765000305550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 00434c5df2aba3c7d90dd38aa4ffc873f52105e27a1d2df9496025becb226785 file: - batch: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/nested_joins.sql000066400000000000000000000011421451700765000241560ustar00rootroot00000000000000SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN I LEFT OUTER JOIN P ON I.Pcd = P.Iid ON BA.Iid = I.Bcd; GO SELECT 1 FROM BA RIGHT OUTER JOIN I LEFT OUTER JOIN P AS P_1 LEFT OUTER JOIN IP AS IP_1 ON P_1.NID = IP_1.NID ON I.PID = CAST(P_1.IDEID AS varchar) LEFT OUTER JOIN P AS P_2 LEFT OUTER JOIN IP AS IP_2 ON P_2.NID = IP_2.NID ON I.SecondaryPID = CAST(P_2.IDEID AS varchar) ON CAST(BA.IDEID AS varchar) = I.BAID SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN ( I JOIN P ON I.Pcd = P.Iid ) ON BA.Iid = I.Bcd; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/nested_joins.yml000066400000000000000000000245571451700765000241770ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 7cbf5938384c1ea604a0b07ea850f8a356d98fcbfdd4e80c991f4a0ef5e9814d file: - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: RegionCode from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BA join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: I - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: P - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: I - dot: . - naked_identifier: Pcd - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: P - dot: . - naked_identifier: Iid - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: BA - dot: . - naked_identifier: Iid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: I - dot: . - naked_identifier: Bcd statement_terminator: ; - go_statement: keyword: GO - batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BA join_clause: - keyword: RIGHT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: I - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: P alias_expression: keyword: AS naked_identifier: P_1 - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: IP alias_expression: keyword: AS naked_identifier: IP_1 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: P_1 - dot: . - naked_identifier: NID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: IP_1 - dot: . - naked_identifier: NID - join_on_condition: keyword: 'ON' expression: column_reference: - naked_identifier: I - dot: . - naked_identifier: PID comparison_operator: raw_comparison_operator: '=' function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: - naked_identifier: P_1 - dot: . - naked_identifier: IDEID keyword: AS data_type: data_type_identifier: varchar end_bracket: ) - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: P alias_expression: keyword: AS naked_identifier: P_2 - join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: IP alias_expression: keyword: AS naked_identifier: IP_2 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: P_2 - dot: . - naked_identifier: NID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: IP_2 - dot: . - naked_identifier: NID - join_on_condition: keyword: 'ON' expression: column_reference: - naked_identifier: I - dot: . - naked_identifier: SecondaryPID comparison_operator: raw_comparison_operator: '=' function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: - naked_identifier: P_2 - dot: . - naked_identifier: IDEID keyword: AS data_type: data_type_identifier: varchar end_bracket: ) - join_on_condition: keyword: 'ON' expression: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: - naked_identifier: BA - dot: . - naked_identifier: IDEID keyword: AS data_type: data_type_identifier: varchar end_bracket: ) comparison_operator: raw_comparison_operator: '=' column_reference: - naked_identifier: I - dot: . - naked_identifier: BAID - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: RegionCode from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: BA join_clause: - keyword: LEFT - keyword: OUTER - keyword: JOIN - from_expression_element: table_expression: bracketed: start_bracket: ( table_expression: table_reference: naked_identifier: I join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: P join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: I - dot: . - naked_identifier: Pcd - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: P - dot: . - naked_identifier: Iid end_bracket: ) - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: BA - dot: . - naked_identifier: Iid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: I - dot: . - naked_identifier: Bcd statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/openjson.sql000066400000000000000000000027651451700765000233410ustar00rootroot00000000000000/* https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16#examples */ SELECT * FROM products INNER JOIN OPENJSON(N'[1,2,3,4]') AS productTypes ON product.productTypeID = productTypes.value ; SELECT * FROM OPENJSON(@json) WITH ( month VARCHAR(3), temp int, month_id tinyint '$.sql:identity()') as months ; SELECT * FROM OPENJSON ( @json ) WITH ( Number VARCHAR(200) '$.Order.Number', Date DATETIME '$.Order.Date', Customer VARCHAR(200) '$.AccountNumber', Quantity INT '$.Item.Quantity', [Order] NVARCHAR(MAX) AS JSON ); SELECT SalesOrderID, OrderDate, value AS Reason FROM Sales.SalesOrderHeader CROSS APPLY OPENJSON (SalesReasons) WITH (value NVARCHAR(100) '$') ; SELECT store.title, location.street, location.lat, location.long FROM store CROSS APPLY OPENJSON(store.jsonCol, 'lax $.location') WITH (street VARCHAR(500) , postcode VARCHAR(500) '$.postcode' , lon int '$.geo.longitude', lat int '$.geo.latitude') AS location ; INSERT INTO Person SELECT * FROM OPENJSON(@json) WITH (id INT, firstName NVARCHAR(50), lastName NVARCHAR(50), isAlive BIT, age INT, dateOfBirth DATETIME, spouse NVARCHAR(50)) ; SELECT root.[key] AS [Order],TheValues.[key], TheValues.[value] FROM OPENJSON ( @JSON ) AS root CROSS APPLY OPENJSON ( root.value) AS TheValues ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/openjson.yml000066400000000000000000000420571451700765000233410ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b0aed6157fb7463eb1ea734057995c2536cb98aabdd752bf4b40ec583132683d file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: products join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: function: function_name: function_name_identifier: OPENJSON bracketed: start_bracket: ( expression: quoted_literal: "N'[1,2,3,4]'" end_bracket: ) alias_expression: keyword: AS naked_identifier: productTypes - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: product - dot: . - naked_identifier: productTypeID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: productTypes - dot: . - naked_identifier: value statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: parameter: '@json' end_bracket: ) openjson_with_clause: keyword: WITH bracketed: - start_bracket: ( - column_reference: naked_identifier: month - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) - comma: ',' - column_reference: naked_identifier: temp - data_type: data_type_identifier: int - comma: ',' - column_reference: naked_identifier: month_id - data_type: data_type_identifier: tinyint - quoted_literal: "'$.sql:identity()'" - end_bracket: ) alias_expression: keyword: as naked_identifier: months statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: parameter: '@json' end_bracket: ) openjson_with_clause: keyword: WITH bracketed: - start_bracket: ( - column_reference: naked_identifier: Number - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '200' end_bracket: ) - quoted_literal: "'$.Order.Number'" - comma: ',' - column_reference: naked_identifier: Date - data_type: data_type_identifier: DATETIME - quoted_literal: "'$.Order.Date'" - comma: ',' - column_reference: naked_identifier: Customer - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '200' end_bracket: ) - quoted_literal: "'$.AccountNumber'" - comma: ',' - column_reference: naked_identifier: Quantity - data_type: data_type_identifier: INT - quoted_literal: "'$.Item.Quantity'" - comma: ',' - column_reference: quoted_identifier: '[Order]' - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( keyword: MAX end_bracket: ) - keyword: AS - keyword: JSON - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: SalesOrderID - comma: ',' - select_clause_element: column_reference: naked_identifier: OrderDate - comma: ',' - select_clause_element: column_reference: naked_identifier: value alias_expression: keyword: AS naked_identifier: Reason from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: SalesOrderHeader join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: naked_identifier: SalesReasons end_bracket: ) openjson_with_clause: keyword: WITH bracketed: start_bracket: ( column_reference: naked_identifier: value data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) quoted_literal: "'$'" end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: store - dot: . - naked_identifier: title - comma: ',' - select_clause_element: column_reference: - naked_identifier: location - dot: . - naked_identifier: street - comma: ',' - select_clause_element: column_reference: - naked_identifier: location - dot: . - naked_identifier: lat - comma: ',' - select_clause_element: column_reference: - naked_identifier: location - dot: . - naked_identifier: long from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: store join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: - naked_identifier: store - dot: . - naked_identifier: jsonCol comma: ',' quoted_literal: "'lax $.location'" end_bracket: ) openjson_with_clause: keyword: WITH bracketed: - start_bracket: ( - column_reference: naked_identifier: street - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '500' end_bracket: ) - comma: ',' - column_reference: naked_identifier: postcode - data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '500' end_bracket: ) - quoted_literal: "'$.postcode'" - comma: ',' - column_reference: naked_identifier: lon - data_type: data_type_identifier: int - quoted_literal: "'$.geo.longitude'" - comma: ',' - column_reference: naked_identifier: lat - data_type: data_type_identifier: int - quoted_literal: "'$.geo.latitude'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: location statement_terminator: ; - statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: naked_identifier: Person - select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openjson_segment: keyword: OPENJSON bracketed: start_bracket: ( column_reference: parameter: '@json' end_bracket: ) openjson_with_clause: keyword: WITH bracketed: - start_bracket: ( - column_reference: naked_identifier: id - data_type: data_type_identifier: INT - comma: ',' - column_reference: naked_identifier: firstName - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: lastName - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - comma: ',' - column_reference: naked_identifier: isAlive - data_type: data_type_identifier: BIT - comma: ',' - column_reference: naked_identifier: age - data_type: data_type_identifier: INT - comma: ',' - column_reference: naked_identifier: dateOfBirth - data_type: data_type_identifier: DATETIME - comma: ',' - column_reference: naked_identifier: spouse - data_type: data_type_identifier: NVARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) - end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: root dot: . quoted_identifier: '[key]' alias_expression: keyword: AS quoted_identifier: '[Order]' - comma: ',' - select_clause_element: column_reference: naked_identifier: TheValues dot: . quoted_identifier: '[key]' - comma: ',' - select_clause_element: column_reference: naked_identifier: TheValues dot: . quoted_identifier: '[value]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: function: function_name: function_name_identifier: OPENJSON bracketed: start_bracket: ( expression: parameter: '@JSON' end_bracket: ) alias_expression: keyword: AS naked_identifier: root join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: function: function_name: function_name_identifier: OPENJSON bracketed: start_bracket: ( expression: column_reference: - naked_identifier: root - dot: . - naked_identifier: value end_bracket: ) alias_expression: keyword: AS naked_identifier: TheValues statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/openrowset.sql000066400000000000000000000017431451700765000237060ustar00rootroot00000000000000SELECT a.* FROM OPENROWSET('Microsoft.Jet.OLEDB.4.0', 'C:\SAMPLES\Northwind.mdb'; 'admin'; 'password', Customers) AS a; GO SELECT d.* FROM OPENROWSET('SQLNCLI', 'Server=Seattle1;Trusted_Connection=yes;', Department) AS d; GO SELECT d.* FROM OPENROWSET('SQLNCLI', 'Server=Seattle1;Trusted_Connection=yes;', AdventureWorks2012.HumanResources.Department) AS d; GO SELECT a.* FROM OPENROWSET('SQLNCLI', 'Server=Seattle1;Trusted_Connection=yes;', 'SELECT TOP 10 GroupName, Name FROM AdventureWorks2012.HumanResources.Department') AS a; GO SELECT * FROM OPENROWSET( BULK 'C:\DATA\inv-2017-01-19.csv', SINGLE_CLOB) AS DATA; GO SELECT * FROM OPENROWSET(BULK N'C:\Text1.txt', SINGLE_NCLOB) AS Document; GO SELECT * FROM OPENROWSET(BULK N'D:\XChange\test-csv.csv', FORMATFILE = N'D:\XChange\test-csv.fmt', FIRSTROW=2, FORMAT='CSV') AS cars; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/openrowset.yml000066400000000000000000000177111451700765000237120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3711edeb255f7411613a876ef888e2da9cab67a2a5f15523df1a0e9fc71ec47c file: - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: a dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'Microsoft.Jet.OLEDB.4.0'" - comma: ',' - quoted_literal: "'C:\\SAMPLES\\Northwind.mdb'" - statement_terminator: ; - quoted_literal: "'admin'" - statement_terminator: ; - quoted_literal: "'password'" - comma: ',' - table_reference: naked_identifier: Customers - end_bracket: ) alias_expression: keyword: AS naked_identifier: a statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: d dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Server=Seattle1;Trusted_Connection=yes;'" - comma: ',' - table_reference: naked_identifier: Department - end_bracket: ) alias_expression: keyword: AS naked_identifier: d statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: d dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Server=Seattle1;Trusted_Connection=yes;'" - comma: ',' - table_reference: - naked_identifier: AdventureWorks2012 - dot: . - naked_identifier: HumanResources - dot: . - naked_identifier: Department - end_bracket: ) alias_expression: keyword: AS naked_identifier: d statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: a dot: . star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - quoted_literal: "'SQLNCLI'" - comma: ',' - quoted_literal: "'Server=Seattle1;Trusted_Connection=yes;'" - comma: ',' - quoted_literal: "'SELECT TOP 10 GroupName, Name\n FROM AdventureWorks2012.HumanResources.Department'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: a statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "'C:\\DATA\\inv-2017-01-19.csv'" - comma: ',' - keyword: SINGLE_CLOB - end_bracket: ) alias_expression: keyword: AS naked_identifier: DATA statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "N'C:\\Text1.txt'" - comma: ',' - keyword: SINGLE_NCLOB - end_bracket: ) alias_expression: keyword: AS naked_identifier: Document statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: openrowset_segment: keyword: OPENROWSET bracketed: - start_bracket: ( - keyword: BULK - quoted_literal: "N'D:\\XChange\\test-csv.csv'" - comma: ',' - keyword: FORMATFILE - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "N'D:\\XChange\\test-csv.fmt'" - comma: ',' - keyword: FIRSTROW - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '2' - comma: ',' - keyword: FORMAT - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'CSV'" - end_bracket: ) alias_expression: keyword: AS naked_identifier: cars statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/outer_apply.sql000066400000000000000000000002271451700765000240400ustar00rootroot00000000000000-- JOIN should not be parsed as nested in OUTER APPLY SELECT table1.* FROM table1 OUTER APPLY table2 INNER JOIN table3 ON table1.col = table3.col; sqlfluff-2.3.5/test/fixtures/dialects/tsql/outer_apply.yml000066400000000000000000000035331451700765000240450ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2c00e0d4150da03673fd39303dddebf447f51e071d6c0843ed313d8e708b8a37 file: batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: naked_identifier: table1 dot: . star: '*' from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: naked_identifier: table1 - join_clause: - keyword: OUTER - keyword: APPLY - from_expression_element: table_expression: table_reference: naked_identifier: table2 - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table3 - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: table1 - dot: . - naked_identifier: col - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: table3 - dot: . - naked_identifier: col statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/print.sql000066400000000000000000000001641451700765000226310ustar00rootroot00000000000000DECLARE @TestVal VARCHAR(20) = 'Test Print' PRINT '#Dates' PRINT CAST(GETDATE() AS VARCHAR(50)); PRINT @TestVal sqlfluff-2.3.5/test/fixtures/dialects/tsql/print.yml000066400000000000000000000035731451700765000226420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 683357bdaa8ac0f3bc686790545ee9e82d84517b8cb80ce8b441081659f15244 file: batch: - statement: declare_segment: keyword: DECLARE parameter: '@TestVal' data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '20' end_bracket: ) comparison_operator: raw_comparison_operator: '=' expression: quoted_literal: "'Test Print'" - statement: print_statement: keyword: PRINT expression: quoted_literal: "'#Dates'" - statement: print_statement: keyword: PRINT expression: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) keyword: AS data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) end_bracket: ) statement_terminator: ; - statement: print_statement: keyword: PRINT expression: parameter: '@TestVal' sqlfluff-2.3.5/test/fixtures/dialects/tsql/raiserror.sql000066400000000000000000000014341451700765000235060ustar00rootroot00000000000000RAISERROR(15600, -1, -1, 'mysp_CreateCustomer'); RAISERROR('This is message %s %d.', 10, 1, 'number'); RAISERROR('Error raised in TRY block.', 16, 1); RAISERROR (N'Unicode error', 16, 1); RAISERROR ('WITH option', 16, 1) WITH LOG; RAISERROR ('Error with lots of arguments %a %b %c %d %e %f %g %h %i %j %k %l %m %n %o %p %q %r %s %t', 16, 1, 'a', N'b', @c, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20); RAISERROR (@ErrorMessage, -- Message text. @ErrorSeverity, -- Severity. @ErrorState -- State. ); RAISERROR ( 'The specified table does not exist. Please enter @tableName in the following format: Schemaname.Tablename OR [Schemaname].[Tablename]' ,11 ,- 1 ); sqlfluff-2.3.5/test/fixtures/dialects/tsql/raiserror.yml000066400000000000000000000107461451700765000235160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dae7edf9b4f2ced19f8ec4581f665819a6a0fe87cea7e5c233c5d26a09120828 file: batch: - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - numeric_literal: '15600' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '1' - comma: ',' - quoted_literal: "'mysp_CreateCustomer'" - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'This is message %s %d.'" - comma: ',' - numeric_literal: '10' - comma: ',' - numeric_literal: '1' - comma: ',' - quoted_literal: "'number'" - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'Error raised in TRY block.'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "N'Unicode error'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: - keyword: RAISERROR - bracketed: - start_bracket: ( - quoted_literal: "'WITH option'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - end_bracket: ) - keyword: WITH - keyword: LOG - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'Error with lots of arguments %a %b %c %d %e %f %g %h %i\ \ %j %k %l %m %n %o %p %q %r %s %t'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - comma: ',' - quoted_literal: "'a'" - comma: ',' - quoted_literal: "N'b'" - comma: ',' - parameter: '@c' - comma: ',' - numeric_literal: '4' - comma: ',' - numeric_literal: '5' - comma: ',' - numeric_literal: '6' - comma: ',' - numeric_literal: '7' - comma: ',' - numeric_literal: '8' - comma: ',' - numeric_literal: '9' - comma: ',' - numeric_literal: '10' - comma: ',' - numeric_literal: '11' - comma: ',' - numeric_literal: '12' - comma: ',' - numeric_literal: '13' - comma: ',' - numeric_literal: '14' - comma: ',' - numeric_literal: '15' - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '17' - comma: ',' - numeric_literal: '18' - comma: ',' - numeric_literal: '19' - comma: ',' - numeric_literal: '20' - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - parameter: '@ErrorMessage' - comma: ',' - parameter: '@ErrorSeverity' - comma: ',' - parameter: '@ErrorState' - end_bracket: ) - statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'The specified table does not exist. Please enter @tableName\ \ in the following format: Schemaname.Tablename OR [Schemaname].[Tablename]'" - comma: ',' - numeric_literal: '11' - comma: ',' - numeric_literal: sign_indicator: '-' numeric_literal: '1' - end_bracket: ) - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/rename_table.sql000066400000000000000000000001331451700765000241070ustar00rootroot00000000000000--Azure Synapse Analytics specific RENAME OBJECT [Reporting].[TABLE_NEW] to [TABLE_BASE]; sqlfluff-2.3.5/test/fixtures/dialects/tsql/rename_table.yml000066400000000000000000000013111451700765000241100ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 96147ebb74fd822dd58899940d78bf266f6eb6c299a286b677b88408937c0e06 file: batch: statement: rename_statement: - keyword: RENAME - keyword: OBJECT - object_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[TABLE_NEW]' - keyword: to - quoted_identifier: '[TABLE_BASE]' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/select.sql000066400000000000000000000055031451700765000227560ustar00rootroot00000000000000--For testing valid select clause elements SELECT CASE WHEN 1 = 1 THEN 'True' WHEN 1 > 1 THEN 'False' WHEN 1 < 1 THEN 'False' WHEN 1 >= 1 THEN 'True' WHEN 1 > = 1 THEN 'True' WHEN 1 <= 1 THEN 'True' WHEN 1 < = 1 THEN 'True' WHEN 1 <> 1 THEN 'False' WHEN 1 < > 1 THEN 'False' WHEN 1 !< 1 THEN 'Why is this a thing?' WHEN 1 ! < 1 THEN 'Or this sort of thing?' WHEN 1 != 1 THEN 'False' WHEN 1 ! = 1 THEN 'False' WHEN 1 !> 1 THEN 'NULL Handling, Probably' WHEN 1 ! > 1 THEN 'NULL Handling, Probably' ELSE 'Silly Tests' END, all_pop. [Arrival Date], all_pop.Row#, all_pop.b@nanas, [# POAs], 'TSQLs escaping quotes test', 'TSQL''s escaping quotes test', 'TSQL' 's escaping quotes test', 'TSQL' AS 's escaping quotes test', '', '''', --unreserved words all_pop.Language, ANSI_DEFAULTS , ANSI_NULL_DFLT_OFF , ANSI_NULL_DFLT_ON , ANSI_NULLS , ANSI_PADDING , ANSI_WARNINGS , ARITHABORT , ARITHIGNORE , CONCAT_NULL_YIELDS_NULL , CURSOR_CLOSE_ON_COMMIT , DATEFIRST , DATEFORMAT , DEADLOCK_PRIORITY , DISK , DUMP , FIPS_FLAGGER , FMTONLY , FORCEPLAN , IMPLICIT_TRANSACTIONS , LOAD , LOCK_TIMEOUT , NOCOUNT , NOEXEC , NUMERIC_ROUNDABORT , PARSEONLY , PRECISION , QUERY_GOVERNOR_COST_LIMIT , QUOTED_IDENTIFIER , REMOTE_PROC_TRANSACTIONS , SECURITYAUDIT , SHOWPLAN_ALL , SHOWPLAN_TEXT , SHOWPLAN_XML , XACT_ABORT, --TSQL non-keywords Rows, NaN, Rlike, Ilike, Separator, Auto_Increment, Unsigned, Describe, Comment, Ml, Modify, Minus, ROW_NUMBER()OVER(PARTITION BY [EventNM], [PersonID] ORDER BY [DateofEvent] desc) AS [RN], RANK()OVER(PARTITION BY [EventNM] ORDER BY [DateofEvent] desc) AS [R], DENSE_RANK()OVER(PARTITION BY [EventNM] ORDER BY [DateofEvent] desc) AS [DR], NTILE(5)OVER(PARTITION BY [EventNM] ORDER BY [DateofEvent] desc) AS [NT], sum(t.col1) over (partition by t.col2, t.col3), ROW_NUMBER() OVER (PARTITION BY (SELECT mediaty FROM dbo.MediaTypes ms WHERE ms.MediaTypeID = f.mediatypeid) ORDER BY AdjustedPriorityScore DESC) AS Subselect_Partition, ROW_NUMBER() OVER (PARTITION BY COALESCE(NPI1, NPI2) ORDER BY COALESCE(SystemEffectiveDTS1, SystemEffectiveDTS2) DESC) AS Coalesce_Partition, ROW_NUMBER() OVER (PARTITION BY (DayInMonth), (DaySuffix) ORDER BY Month ASC), COUNT(*) OVER (PARTITION BY NULL), [preceding] = count(*) over(order by object_id ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW ), [central] = count(*) over(order by object_id ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING ), [following] = count(*) over(order by object_id ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING), EqualsAlias = ColumnName, OtherColumnName AS AsAlias, cast(1 as character varying(1)), cast([central] as int), --unbracketed functions CURRENT_TIMESTAMP, CURRENT_USER, SESSION_USER, SYSTEM_USER FROM dbo . all_pop sqlfluff-2.3.5/test/fixtures/dialects/tsql/select.yml000066400000000000000000000735331451700765000227700ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5ce6e3c7c4dc3263e4f470a51c0b7c52a2173bba1d16f2852067b0d232b40bc6 file: batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: < - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '>' - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'True'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: < - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'Why is this a thing?'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: < - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'Or this sort of thing?'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '=' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'False'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'NULL Handling, Probably'" - when_clause: - keyword: WHEN - expression: - numeric_literal: '1' - comparison_operator: - raw_comparison_operator: '!' - raw_comparison_operator: '>' - numeric_literal: '1' - keyword: THEN - expression: quoted_literal: "'NULL Handling, Probably'" - else_clause: keyword: ELSE expression: quoted_literal: "'Silly Tests'" - keyword: END - comma: ',' - select_clause_element: column_reference: naked_identifier: all_pop dot: . quoted_identifier: '[Arrival Date]' - comma: ',' - select_clause_element: column_reference: - naked_identifier: all_pop - dot: . - naked_identifier: Row# - comma: ',' - select_clause_element: column_reference: - naked_identifier: all_pop - dot: . - naked_identifier: b@nanas - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[# POAs]' - comma: ',' - select_clause_element: quoted_literal: "'TSQLs escaping quotes test'" - comma: ',' - select_clause_element: quoted_literal: "'TSQL''s escaping quotes test'" - comma: ',' - select_clause_element: quoted_literal: "'TSQL'" alias_expression: quoted_identifier: "'s escaping quotes test'" - comma: ',' - select_clause_element: quoted_literal: "'TSQL'" alias_expression: keyword: AS quoted_identifier: "'s escaping quotes test'" - comma: ',' - select_clause_element: quoted_literal: "''" - comma: ',' - select_clause_element: quoted_literal: "''''" - comma: ',' - select_clause_element: column_reference: - naked_identifier: all_pop - dot: . - naked_identifier: Language - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_DEFAULTS - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_NULL_DFLT_OFF - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_NULL_DFLT_ON - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_NULLS - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_PADDING - comma: ',' - select_clause_element: column_reference: naked_identifier: ANSI_WARNINGS - comma: ',' - select_clause_element: column_reference: naked_identifier: ARITHABORT - comma: ',' - select_clause_element: column_reference: naked_identifier: ARITHIGNORE - comma: ',' - select_clause_element: column_reference: naked_identifier: CONCAT_NULL_YIELDS_NULL - comma: ',' - select_clause_element: column_reference: naked_identifier: CURSOR_CLOSE_ON_COMMIT - comma: ',' - select_clause_element: column_reference: naked_identifier: DATEFIRST - comma: ',' - select_clause_element: column_reference: naked_identifier: DATEFORMAT - comma: ',' - select_clause_element: column_reference: naked_identifier: DEADLOCK_PRIORITY - comma: ',' - select_clause_element: column_reference: naked_identifier: DISK - comma: ',' - select_clause_element: column_reference: naked_identifier: DUMP - comma: ',' - select_clause_element: column_reference: naked_identifier: FIPS_FLAGGER - comma: ',' - select_clause_element: column_reference: naked_identifier: FMTONLY - comma: ',' - select_clause_element: column_reference: naked_identifier: FORCEPLAN - comma: ',' - select_clause_element: column_reference: naked_identifier: IMPLICIT_TRANSACTIONS - comma: ',' - select_clause_element: column_reference: naked_identifier: LOAD - comma: ',' - select_clause_element: column_reference: naked_identifier: LOCK_TIMEOUT - comma: ',' - select_clause_element: column_reference: naked_identifier: NOCOUNT - comma: ',' - select_clause_element: column_reference: naked_identifier: NOEXEC - comma: ',' - select_clause_element: column_reference: naked_identifier: NUMERIC_ROUNDABORT - comma: ',' - select_clause_element: column_reference: naked_identifier: PARSEONLY - comma: ',' - select_clause_element: column_reference: naked_identifier: PRECISION - comma: ',' - select_clause_element: column_reference: naked_identifier: QUERY_GOVERNOR_COST_LIMIT - comma: ',' - select_clause_element: column_reference: naked_identifier: QUOTED_IDENTIFIER - comma: ',' - select_clause_element: column_reference: naked_identifier: REMOTE_PROC_TRANSACTIONS - comma: ',' - select_clause_element: column_reference: naked_identifier: SECURITYAUDIT - comma: ',' - select_clause_element: column_reference: naked_identifier: SHOWPLAN_ALL - comma: ',' - select_clause_element: column_reference: naked_identifier: SHOWPLAN_TEXT - comma: ',' - select_clause_element: column_reference: naked_identifier: SHOWPLAN_XML - comma: ',' - select_clause_element: column_reference: naked_identifier: XACT_ABORT - comma: ',' - select_clause_element: column_reference: naked_identifier: Rows - comma: ',' - select_clause_element: column_reference: naked_identifier: NaN - comma: ',' - select_clause_element: column_reference: naked_identifier: Rlike - comma: ',' - select_clause_element: column_reference: naked_identifier: Ilike - comma: ',' - select_clause_element: column_reference: naked_identifier: Separator - comma: ',' - select_clause_element: column_reference: naked_identifier: Auto_Increment - comma: ',' - select_clause_element: column_reference: naked_identifier: Unsigned - comma: ',' - select_clause_element: column_reference: naked_identifier: Describe - comma: ',' - select_clause_element: column_reference: naked_identifier: Comment - comma: ',' - select_clause_element: column_reference: naked_identifier: Ml - comma: ',' - select_clause_element: column_reference: naked_identifier: Modify - comma: ',' - select_clause_element: column_reference: naked_identifier: Minus - comma: ',' - select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: quoted_identifier: '[EventNM]' - comma: ',' - column_reference: quoted_identifier: '[PersonID]' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[DateofEvent]' - keyword: desc end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[RN]' - comma: ',' - select_clause_element: function: - function_name: keyword: RANK - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: quoted_identifier: '[EventNM]' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[DateofEvent]' - keyword: desc end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[R]' - comma: ',' - select_clause_element: function: - function_name: keyword: DENSE_RANK - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: quoted_identifier: '[EventNM]' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[DateofEvent]' - keyword: desc end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[DR]' - comma: ',' - select_clause_element: function: - function_name: keyword: NTILE - bracketed: start_bracket: ( numeric_literal: '5' end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: quoted_identifier: '[EventNM]' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: quoted_identifier: '[DateofEvent]' - keyword: desc end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[NT]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: sum bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: partition - keyword: by - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 - comma: ',' - column_reference: - naked_identifier: t - dot: . - naked_identifier: col3 end_bracket: ) - comma: ',' - select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: mediaty from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: MediaTypes alias_expression: naked_identifier: ms where_clause: keyword: WHERE expression: - column_reference: - naked_identifier: ms - dot: . - naked_identifier: MediaTypeID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: f - dot: . - naked_identifier: mediatypeid end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: AdjustedPriorityScore - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: Subselect_Partition - comma: ',' - select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - expression: function: function_name: keyword: COALESCE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: NPI1 - comma: ',' - expression: column_reference: naked_identifier: NPI2 - end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - expression: function: function_name: keyword: COALESCE bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: SystemEffectiveDTS1 - comma: ',' - expression: column_reference: naked_identifier: SystemEffectiveDTS2 - end_bracket: ) - keyword: DESC end_bracket: ) alias_expression: keyword: AS naked_identifier: Coalesce_Partition - comma: ',' - select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( column_reference: naked_identifier: DayInMonth end_bracket: ) - comma: ',' - bracketed: start_bracket: ( column_reference: naked_identifier: DaySuffix end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: Month - keyword: ASC end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: OVER bracketed: start_bracket: ( window_specification: partitionby_clause: - keyword: PARTITION - keyword: BY - expression: null_literal: 'NULL' end_bracket: ) - comma: ',' - select_clause_element: alias_expression: quoted_identifier: '[preceding]' raw_comparison_operator: '=' function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: object_id frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: UNBOUNDED - keyword: PRECEDING - keyword: AND - keyword: CURRENT - keyword: ROW end_bracket: ) - comma: ',' - select_clause_element: alias_expression: quoted_identifier: '[central]' raw_comparison_operator: '=' function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: object_id frame_clause: - keyword: ROWS - keyword: BETWEEN - numeric_literal: '2' - keyword: PRECEDING - keyword: AND - numeric_literal: '2' - keyword: FOLLOWING end_bracket: ) - comma: ',' - select_clause_element: alias_expression: quoted_identifier: '[following]' raw_comparison_operator: '=' function: function_name: function_name_identifier: count bracketed: start_bracket: ( star: '*' end_bracket: ) over_clause: keyword: over bracketed: start_bracket: ( window_specification: orderby_clause: - keyword: order - keyword: by - column_reference: naked_identifier: object_id frame_clause: - keyword: ROWS - keyword: BETWEEN - keyword: CURRENT - keyword: ROW - keyword: AND - keyword: UNBOUNDED - keyword: FOLLOWING end_bracket: ) - comma: ',' - select_clause_element: alias_expression: naked_identifier: EqualsAlias raw_comparison_operator: '=' column_reference: naked_identifier: ColumnName - comma: ',' - select_clause_element: column_reference: naked_identifier: OtherColumnName alias_expression: keyword: AS naked_identifier: AsAlias - comma: ',' - select_clause_element: function: function_name: keyword: cast bracketed: start_bracket: ( expression: numeric_literal: '1' keyword: as data_type: data_type_identifier: character keyword: varying bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '1' end_bracket: ) end_bracket: ) - comma: ',' - select_clause_element: function: function_name: keyword: cast bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[central]' keyword: as data_type: data_type_identifier: int end_bracket: ) - comma: ',' - select_clause_element: bare_function: CURRENT_TIMESTAMP - comma: ',' - select_clause_element: bare_function: CURRENT_USER - comma: ',' - select_clause_element: bare_function: SESSION_USER - comma: ',' - select_clause_element: bare_function: SYSTEM_USER from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: all_pop sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_cross_apply.sql000066400000000000000000000005251451700765000253730ustar00rootroot00000000000000SELECT DeptID, DeptName, DeptMgrID, EmpID, EmpLastName, EmpSalary FROM Departments d CROSS APPLY dbo.GetReports(d.DeptMgrID) ; SELECT * FROM Department D OUTER APPLY dbo.fn_GetAllEmployeeOfADepartment(D.DepartmentID); select s.column_id , sp.value from table1 as s cross apply string_split(replace(s.some_path, '->', '{'), '{') as sp; sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_cross_apply.yml000066400000000000000000000127551451700765000254050ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 978778c9521da1e4ee1f39a434056c89bc91157e37ca808ef7949771b01af7a9 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: DeptID - comma: ',' - select_clause_element: column_reference: naked_identifier: DeptName - comma: ',' - select_clause_element: column_reference: naked_identifier: DeptMgrID - comma: ',' - select_clause_element: column_reference: naked_identifier: EmpID - comma: ',' - select_clause_element: column_reference: naked_identifier: EmpLastName - comma: ',' - select_clause_element: column_reference: naked_identifier: EmpSalary from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Departments alias_expression: naked_identifier: d join_clause: - keyword: CROSS - keyword: APPLY - from_expression_element: table_expression: function: function_name: naked_identifier: dbo dot: . function_name_identifier: GetReports bracketed: start_bracket: ( expression: column_reference: - naked_identifier: d - dot: . - naked_identifier: DeptMgrID end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Department alias_expression: naked_identifier: D join_clause: - keyword: OUTER - keyword: APPLY - from_expression_element: table_expression: function: function_name: naked_identifier: dbo dot: . function_name_identifier: fn_GetAllEmployeeOfADepartment bracketed: start_bracket: ( expression: column_reference: - naked_identifier: D - dot: . - naked_identifier: DepartmentID end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: - naked_identifier: s - dot: . - naked_identifier: column_id - comma: ',' - select_clause_element: column_reference: - naked_identifier: sp - dot: . - naked_identifier: value from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: keyword: as naked_identifier: s join_clause: - keyword: cross - keyword: apply - from_expression_element: table_expression: function: function_name: function_name_identifier: string_split bracketed: - start_bracket: ( - expression: function: function_name: function_name_identifier: replace bracketed: - start_bracket: ( - expression: column_reference: - naked_identifier: s - dot: . - naked_identifier: some_path - comma: ',' - expression: quoted_literal: "'->'" - comma: ',' - expression: quoted_literal: "'{'" - end_bracket: ) - comma: ',' - expression: quoted_literal: "'{'" - end_bracket: ) alias_expression: keyword: as naked_identifier: sp statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_date_functions.sql000066400000000000000000000027501451700765000260440ustar00rootroot00000000000000SELECT [hello], DATEDIFF(day, [mydate], GETDATE()) AS [test], DATEPART(day, [mydate], GETDATE()) AS [test2], DATEDIFF(year, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(quarter, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(month, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(dayofyear, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(day, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(week, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(hour, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(minute, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(second, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(millisecond, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF(microsecond, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEDIFF_BIG(microsecond, '2005-12-31 23:59:59.9999999', '2006-01-01 00:00:00.0000000'), DATEADD(year,2147483647, '20060731'), DATEADD(year,-2147483647, '20060731'), DATENAME(year, '12:10:30.123'), DATENAME(month, '12:10:30.123'), DATENAME(day, '12:10:30.123'), DATENAME(dayofyear, '12:10:30.123'), DATENAME(weekday, '12:10:30.123'), DAY(GetDate()) as today FROM mytable; sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_date_functions.yml000066400000000000000000000277411451700765000260550ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: b4517367d9445caae672c983ab18ef4d4ec97810f5222aaba0da046aa7647acc file: batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '[hello]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: day - comma: ',' - expression: column_reference: quoted_identifier: '[mydate]' - comma: ',' - expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[test]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEPART bracketed: - start_bracket: ( - date_part: day - comma: ',' - expression: column_reference: quoted_identifier: '[mydate]' - comma: ',' - expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[test2]' - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: year - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: quarter - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: month - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: dayofyear - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: day - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: week - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: hour - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: minute - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: second - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: millisecond - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF bracketed: - start_bracket: ( - date_part: microsecond - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEDIFF_BIG bracketed: - start_bracket: ( - date_part: microsecond - comma: ',' - expression: quoted_literal: "'2005-12-31 23:59:59.9999999'" - comma: ',' - expression: quoted_literal: "'2006-01-01 00:00:00.0000000'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD bracketed: - start_bracket: ( - date_part: year - comma: ',' - expression: numeric_literal: '2147483647' - comma: ',' - expression: quoted_literal: "'20060731'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATEADD bracketed: - start_bracket: ( - date_part: year - comma: ',' - expression: numeric_literal: sign_indicator: '-' numeric_literal: '2147483647' - comma: ',' - expression: quoted_literal: "'20060731'" - end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME bracketed: start_bracket: ( date_part: year comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME bracketed: start_bracket: ( date_part: month comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME bracketed: start_bracket: ( date_part: day comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME bracketed: start_bracket: ( date_part: dayofyear comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DATENAME bracketed: start_bracket: ( date_part: weekday comma: ',' expression: quoted_literal: "'12:10:30.123'" end_bracket: ) - comma: ',' - select_clause_element: function: function_name: function_name_identifier: DAY bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: GetDate bracketed: start_bracket: ( end_bracket: ) end_bracket: ) alias_expression: keyword: as naked_identifier: today from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_for.sql000066400000000000000000000016161451700765000236250ustar00rootroot00000000000000-- FOR JSON SELECT name, surname FROM emp FOR JSON AUTO; GO SELECT 1 AS a FOR JSON PATH; GO SELECT 1 AS a FOR JSON PATH, WITHOUT_ARRAY_WRAPPER GO SELECT c.ClassName, s.StudentName FROM #tabClass AS c RIGHT JOIN #tabStudent AS s ON s.ClassGuid = c.ClassGuid ORDER BY c.ClassName, s.StudentName FOR JSON AUTO; GO SELECT 1 AS a FOR JSON PATH, ROOT ('RootName'), WITHOUT_ARRAY_WRAPPER, INCLUDE_NULL_VALUES; GO -- FOR XML SELECT ProductModelID, Name FROM Production.ProductModel WHERE ProductModelID=122 or ProductModelID=119 FOR XML RAW; SELECT ProductPhotoID, ThumbNailPhoto FROM Production.ProductPhoto WHERE ProductPhotoID=70 FOR XML AUTO; SELECT 1 as Tag FROM HumanResources.Employee AS E FOR XML EXPLICIT; SELECT ProductModelID, Name FROM Production.ProductModel WHERE ProductModelID=122 OR ProductModelID=119 FOR XML PATH ('root'); -- FOR BROWSE SELECT 1 AS a FOR BROWSE GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_for.yml000066400000000000000000000221451451700765000236270ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 9475bf138df2b698b5fcdea7d4d615c8ea76ce29b7e3f08dc3b1b10335f5b6a8 file: - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: name - comma: ',' - select_clause_element: column_reference: naked_identifier: surname from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: emp for_clause: - keyword: FOR - keyword: JSON - keyword: AUTO statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: a for_clause: - keyword: FOR - keyword: JSON - keyword: PATH statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: a for_clause: - keyword: FOR - keyword: JSON - keyword: PATH - comma: ',' - keyword: WITHOUT_ARRAY_WRAPPER - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: c - dot: . - naked_identifier: ClassName - comma: ',' - select_clause_element: column_reference: - naked_identifier: s - dot: . - naked_identifier: StudentName from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#tabClass' alias_expression: keyword: AS naked_identifier: c join_clause: - keyword: RIGHT - keyword: JOIN - from_expression_element: table_expression: table_reference: hash_identifier: '#tabStudent' alias_expression: keyword: AS naked_identifier: s - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: ClassGuid - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: c - dot: . - naked_identifier: ClassGuid orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: c - dot: . - naked_identifier: ClassName - comma: ',' - column_reference: - naked_identifier: s - dot: . - naked_identifier: StudentName for_clause: - keyword: FOR - keyword: JSON - keyword: AUTO statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: a for_clause: - keyword: FOR - keyword: JSON - keyword: PATH - comma: ',' - keyword: ROOT - bracketed: start_bracket: ( quoted_literal: "'RootName'" end_bracket: ) - comma: ',' - keyword: WITHOUT_ARRAY_WRAPPER - comma: ',' - keyword: INCLUDE_NULL_VALUES statement_terminator: ; - go_statement: keyword: GO - batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductModelID - comma: ',' - select_clause_element: column_reference: naked_identifier: Name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductModel where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ProductModelID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '122' - binary_operator: or - column_reference: naked_identifier: ProductModelID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '119' for_clause: - keyword: FOR - keyword: XML - keyword: RAW - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductPhotoID - comma: ',' - select_clause_element: column_reference: naked_identifier: ThumbNailPhoto from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductPhoto where_clause: keyword: WHERE expression: column_reference: naked_identifier: ProductPhotoID comparison_operator: raw_comparison_operator: '=' numeric_literal: '70' for_clause: - keyword: FOR - keyword: XML - keyword: AUTO - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: as naked_identifier: Tag from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: keyword: AS naked_identifier: E for_clause: - keyword: FOR - keyword: XML - keyword: EXPLICIT - statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: ProductModelID - comma: ',' - select_clause_element: column_reference: naked_identifier: Name from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Production - dot: . - naked_identifier: ProductModel where_clause: keyword: WHERE expression: - column_reference: naked_identifier: ProductModelID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '122' - binary_operator: OR - column_reference: naked_identifier: ProductModelID - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '119' for_clause: - keyword: FOR - keyword: XML - keyword: PATH - bracketed: start_bracket: ( quoted_literal: "'root'" end_bracket: ) - statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' alias_expression: keyword: AS naked_identifier: a for_clause: - keyword: FOR - keyword: BROWSE - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_into.sql000066400000000000000000000002001451700765000237740ustar00rootroot00000000000000SELECT [ID] ,[FIN] ,[Unit] ,[EventNM] ,[Date] ,[CHGFlag] INTO #CHG FROM Final GROUP BY [FIN] ,[EventNM] ,[Unit] ,[Date] sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_into.yml000066400000000000000000000036601451700765000240130ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 571e816a81663534057907cc5669e5ee844180a98182353e94f101aca0e2c5c4 file: batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '[ID]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[FIN]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[Unit]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[EventNM]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[Date]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[CHGFlag]' into_table_clause: keyword: INTO object_reference: hash_identifier: '#CHG' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Final groupby_clause: - keyword: GROUP - keyword: BY - column_reference: quoted_identifier: '[FIN]' - comma: ',' - column_reference: quoted_identifier: '[EventNM]' - comma: ',' - column_reference: quoted_identifier: '[Unit]' - comma: ',' - column_reference: quoted_identifier: '[Date]' sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_natural_join.sql000066400000000000000000000003451451700765000255220ustar00rootroot00000000000000SELECT * FROM table1 natural -- this should parse as an alias as TSQL does not have NATURAL joins JOIN table2; SELECT * FROM table1 natural -- this should parse as an alias as TSQL does not have NATURAL joins INNER JOIN table2; sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_natural_join.yml000066400000000000000000000036041451700765000255250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1fe1d14060793fedd68e83b08daf90de7e961caccf510607300b656d76e0ee75 file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: natural join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: naked_identifier: table2 statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: naked_identifier: natural join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: table2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_pivot.sql000066400000000000000000000006711451700765000242000ustar00rootroot00000000000000select [1], [2], [3] from table1 as t1 pivot (max(value) for rn in ([1], [2], [3]) ) as pvt; select [1], [2], [3] from table1 as t1 pivot (max(value) for rn in ([1], [2], [3]) ) pvt; GO SELECT unpvt.Program , dd.[Month Number] AS Month FROM p UNPIVOT ( MonthValue FOR MonthColumn IN (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec) ) AS unpvt INNER JOIN d ON [Month Name] = unpvt.MonthColumn; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_pivot.yml000066400000000000000000000166501451700765000242060ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 106d6f8d42771389c6da924eb555c61ceb570fdf54406d4c24823fe5ec9746c3 file: - batch: - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: quoted_identifier: '[1]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[2]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[3]' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: keyword: as naked_identifier: t1 from_pivot_expression: - keyword: pivot - bracketed: - start_bracket: ( - function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) - keyword: for - column_reference: naked_identifier: rn - keyword: in - bracketed: - start_bracket: ( - pivot_column_reference: quoted_identifier: '[1]' - comma: ',' - pivot_column_reference: quoted_identifier: '[2]' - comma: ',' - pivot_column_reference: quoted_identifier: '[3]' - end_bracket: ) - end_bracket: ) - keyword: as - table_reference: naked_identifier: pvt statement_terminator: ; - statement: select_statement: select_clause: - keyword: select - select_clause_element: column_reference: quoted_identifier: '[1]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[2]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[3]' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: keyword: as naked_identifier: t1 from_pivot_expression: keyword: pivot bracketed: - start_bracket: ( - function: function_name: function_name_identifier: max bracketed: start_bracket: ( expression: column_reference: naked_identifier: value end_bracket: ) - keyword: for - column_reference: naked_identifier: rn - keyword: in - bracketed: - start_bracket: ( - pivot_column_reference: quoted_identifier: '[1]' - comma: ',' - pivot_column_reference: quoted_identifier: '[2]' - comma: ',' - pivot_column_reference: quoted_identifier: '[3]' - end_bracket: ) - end_bracket: ) table_reference: naked_identifier: pvt statement_terminator: ; - go_statement: keyword: GO - batch: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: - naked_identifier: unpvt - dot: . - naked_identifier: Program - comma: ',' - select_clause_element: column_reference: naked_identifier: dd dot: . quoted_identifier: '[Month Number]' alias_expression: keyword: AS naked_identifier: Month from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: p from_pivot_expression: - keyword: UNPIVOT - bracketed: - start_bracket: ( - column_reference: naked_identifier: MonthValue - keyword: FOR - column_reference: naked_identifier: MonthColumn - keyword: IN - bracketed: - start_bracket: ( - pivot_column_reference: naked_identifier: Jan - comma: ',' - pivot_column_reference: naked_identifier: Feb - comma: ',' - pivot_column_reference: naked_identifier: Mar - comma: ',' - pivot_column_reference: naked_identifier: Apr - comma: ',' - pivot_column_reference: naked_identifier: May - comma: ',' - pivot_column_reference: naked_identifier: Jun - comma: ',' - pivot_column_reference: naked_identifier: Jul - comma: ',' - pivot_column_reference: naked_identifier: Aug - comma: ',' - pivot_column_reference: naked_identifier: Sep - comma: ',' - pivot_column_reference: naked_identifier: Oct - comma: ',' - pivot_column_reference: naked_identifier: Nov - comma: ',' - pivot_column_reference: naked_identifier: Dec - end_bracket: ) - end_bracket: ) - keyword: AS - table_reference: naked_identifier: unpvt join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: d - join_on_condition: keyword: 'ON' expression: - column_reference: quoted_identifier: '[Month Name]' - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: unpvt - dot: . - naked_identifier: MonthColumn statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_top.sql000066400000000000000000000012511451700765000236340ustar00rootroot00000000000000select top 1 t.date_column1 as last_date_column1 from t1.t2.table_name t order by t.column1 desc; SELECT TOP(10)JobTitle, HireDate FROM HumanResources.Employee; SELECT TOP(10)JobTitle, HireDate FROM HumanResources.Employee ORDER BY HireDate DESC; SELECT TOP(5)PERCENT JobTitle, HireDate FROM HumanResources.Employee ORDER BY HireDate DESC; SELECT TOP(10) PERCENT WITH TIES pp.FirstName, pp.LastName, e.JobTitle, e.Gender, r.Rate FROM Person.Person AS pp INNER JOIN HumanResources.Employee AS e ON pp.BusinessEntityID = e.BusinessEntityID INNER JOIN HumanResources.EmployeePayHistory AS r ON r.BusinessEntityID = e.BusinessEntityID ORDER BY Rate DESC; sqlfluff-2.3.5/test/fixtures/dialects/tsql/select_top.yml000066400000000000000000000176511451700765000236510ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: dd5cab8477da6c6e8b56cb9f9d93ea42dff24632776a6b869e42485f2235e432 file: batch: - statement: select_statement: select_clause: keyword: select select_clause_modifier: keyword: top expression: numeric_literal: '1' select_clause_element: column_reference: - naked_identifier: t - dot: . - naked_identifier: date_column1 alias_expression: keyword: as naked_identifier: last_date_column1 from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: t1 - dot: . - naked_identifier: t2 - dot: . - naked_identifier: table_name alias_expression: naked_identifier: t orderby_clause: - keyword: order - keyword: by - column_reference: - naked_identifier: t - dot: . - naked_identifier: column1 - keyword: desc statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - select_clause_element: column_reference: naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: naked_identifier: HireDate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: keyword: TOP bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - select_clause_element: column_reference: naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: naked_identifier: HireDate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: HireDate - keyword: DESC statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: TOP - bracketed: start_bracket: ( expression: numeric_literal: '5' end_bracket: ) - keyword: PERCENT - select_clause_element: column_reference: naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: naked_identifier: HireDate from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: HireDate - keyword: DESC statement_terminator: ; - statement: select_statement: select_clause: - keyword: SELECT - select_clause_modifier: - keyword: TOP - bracketed: start_bracket: ( expression: numeric_literal: '10' end_bracket: ) - keyword: PERCENT - keyword: WITH - keyword: TIES - select_clause_element: column_reference: - naked_identifier: pp - dot: . - naked_identifier: FirstName - comma: ',' - select_clause_element: column_reference: - naked_identifier: pp - dot: . - naked_identifier: LastName - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: JobTitle - comma: ',' - select_clause_element: column_reference: - naked_identifier: e - dot: . - naked_identifier: Gender - comma: ',' - select_clause_element: column_reference: - naked_identifier: r - dot: . - naked_identifier: Rate from_clause: keyword: FROM from_expression: - from_expression_element: table_expression: table_reference: - naked_identifier: Person - dot: . - naked_identifier: Person alias_expression: keyword: AS naked_identifier: pp - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee alias_expression: keyword: AS naked_identifier: e - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: pp - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: BusinessEntityID - join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: EmployeePayHistory alias_expression: keyword: AS naked_identifier: r - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: r - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: e - dot: . - naked_identifier: BusinessEntityID orderby_clause: - keyword: ORDER - keyword: BY - column_reference: naked_identifier: Rate - keyword: DESC statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/sequence.sql000066400000000000000000000003141451700765000233020ustar00rootroot00000000000000CREATE SEQUENCE SEQ_MELDER START WITH 1 INCREMENT BY 1 GO CREATE SEQUENCE Test.DecSeq AS decimal(3,0) START WITH 125 INCREMENT BY 25 MINVALUE 100 MAXVALUE 200 CYCLE CACHE 3 ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/sequence.yml000066400000000000000000000041031451700765000233040ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: e0e8fd69019db0ed39c2baf5e6e04d8015b32483129f0e256d19de7838d1a0f7 file: - batch: statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: naked_identifier: SEQ_MELDER - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '1' - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '1' - go_statement: keyword: GO - batch: statement: create_sequence_statement: - keyword: CREATE - keyword: SEQUENCE - sequence_reference: - naked_identifier: Test - dot: . - naked_identifier: DecSeq - create_sequence_options_segment: keyword: AS data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '3' - comma: ',' - expression: numeric_literal: '0' - end_bracket: ) - create_sequence_options_segment: - keyword: START - keyword: WITH - numeric_literal: '125' - create_sequence_options_segment: - keyword: INCREMENT - keyword: BY - numeric_literal: '25' - create_sequence_options_segment: keyword: MINVALUE numeric_literal: '100' - create_sequence_options_segment: keyword: MAXVALUE numeric_literal: '200' - create_sequence_options_segment: keyword: CYCLE - create_sequence_options_segment: keyword: CACHE numeric_literal: '3' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/set_statements.sql000066400000000000000000000007451451700765000245440ustar00rootroot00000000000000SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; -- Single params SET @param1 = 1 ; -- Multiple params SET @param1 = 1, @param2 = 2 ; -- Comma separated params with comment with comma SET @param1 = "test, test", @param2 = 2 ; -- Params with expression SET @param1 = ("test", "test"), @param2 = 2 ; -- Assignment operators SET @param1 += 1, @param2 -= 2, @param3 *= 3, @param4 /= 4, @param5 %= 5, @param5 ^= 6, @param5 &= 7, @param5 |= 8 ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/set_statements.yml000066400000000000000000000076441451700765000245530ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 8ff80c388ac8b168e59fdfdbfacafea0a6e3483c6c36156a99ea33dcae05c1c0 file: batch: - statement: set_segment: - keyword: SET - keyword: TRANSACTION - keyword: ISOLATION - keyword: LEVEL - keyword: READ - keyword: UNCOMMITTED - statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@param1' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' statement_terminator: ; - statement: set_segment: - keyword: SET - parameter: '@param1' - assignment_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1' - comma: ',' - parameter: '@param2' - assignment_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - statement: set_segment: - keyword: SET - parameter: '@param1' - assignment_operator: raw_comparison_operator: '=' - expression: column_reference: quoted_identifier: '"test, test"' - comma: ',' - parameter: '@param2' - assignment_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - statement: set_segment: - keyword: SET - parameter: '@param1' - assignment_operator: raw_comparison_operator: '=' - expression: bracketed: - start_bracket: ( - column_reference: quoted_identifier: '"test"' - comma: ',' - column_reference: quoted_identifier: '"test"' - end_bracket: ) - comma: ',' - parameter: '@param2' - assignment_operator: raw_comparison_operator: '=' - expression: numeric_literal: '2' - statement_terminator: ; - statement: set_segment: - keyword: SET - parameter: '@param1' - assignment_operator: binary_operator: + raw_comparison_operator: '=' - expression: numeric_literal: '1' - comma: ',' - parameter: '@param2' - assignment_operator: binary_operator: '-' raw_comparison_operator: '=' - expression: numeric_literal: '2' - comma: ',' - parameter: '@param3' - assignment_operator: binary_operator: '*' raw_comparison_operator: '=' - expression: numeric_literal: '3' - comma: ',' - parameter: '@param4' - assignment_operator: binary_operator: / raw_comparison_operator: '=' - expression: numeric_literal: '4' - comma: ',' - parameter: '@param5' - assignment_operator: binary_operator: '%' raw_comparison_operator: '=' - expression: numeric_literal: '5' - comma: ',' - parameter: '@param5' - assignment_operator: binary_operator: ^ raw_comparison_operator: '=' - expression: numeric_literal: '6' - comma: ',' - parameter: '@param5' - assignment_operator: binary_operator: ampersand: '&' raw_comparison_operator: '=' - expression: numeric_literal: '7' - comma: ',' - parameter: '@param5' - assignment_operator: binary_operator: pipe: '|' raw_comparison_operator: '=' - expression: numeric_literal: '8' - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/sqlcmd_command.sql000066400000000000000000000005711451700765000244600ustar00rootroot00000000000000/* https://learn.microsoft.com/en-us/sql/tools/sqlcmd/sqlcmd-utility?view=sql-server-ver16#sqlcmd-commands */ -- reference / execute other SQL files :r script.sql :r script#01_a-b.sql :r ...\folder\script.SQL :r .\folder_1\folder_2\folder_3\folder_4\script.sql -- define *sqlcmd* scripting variable :setvar variable_name variable_value :setvar variable_name "variable_value" sqlfluff-2.3.5/test/fixtures/dialects/tsql/sqlcmd_command.yml000066400000000000000000000026271451700765000244660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 140f2fcb33d59f14b4ca6e0615627f232ebce91d797029e84fe854515cb4213f file: batch: - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: r unquoted_relative_sql_file_path: script.sql - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: r unquoted_relative_sql_file_path: script#01_a-b.sql - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: r unquoted_relative_sql_file_path: '...\folder\script.SQL' - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: r unquoted_relative_sql_file_path: .\folder_1\folder_2\folder_3\folder_4\script.sql - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: setvar object_reference: naked_identifier: variable_name word: variable_value - statement: sqlcmd_command_segment: colon: ':' sqlcmd_operator: setvar object_reference: naked_identifier: variable_name double_quote: '"variable_value"' sqlfluff-2.3.5/test/fixtures/dialects/tsql/stored_procedure_begin_end.sql000066400000000000000000000021641451700765000270410ustar00rootroot00000000000000CREATE PROCEDURE dbo.Test_Begin_End AS BEGIN SELECT 'Weekend'; select a from tbl1; select b from tbl2; END; GO CREATE PROCEDURE [dbo].[usp_process_tran_log] @out_vchCode uddt_output_code OUTPUT , @out_vchMsg uddt_output_msg OUTPUT , @in_debug INT = 1 AS --******************************************************************************************* SET NOCOUNT ON; BEGIN SELECT '8' END; GO CREATE OR ALTER PROCEDURE [dbo].[usp_process_tran_log] @out_vchCode uddt_output_code OUTPUT , @out_vchMsg uddt_output_msg OUT , @in_debug INT = 1 READONLY AS --******************************************************************************************* SET NOCOUNT ON; BEGIN SELECT '8' END; GO ALTER PROCEDURE [dbo].[usp_process_tran_log] @out_vchCode uddt_output_code OUTPUT , @out_vchMsg uddt_output_msg OUTPUT , @in_debug INT = 1 AS SET NOCOUNT ON; BEGIN BEGIN TRY SELECT '8'; END TRY BEGIN CATCH SET @v_nSysErrorNum = ERROR_NUMBER(); SET @v_vchCode = ERROR_LINE(); SET @v_vchMsg = N'Missing control type.'; SET @v_vchMsg = @v_vchMsg + N' SQL Error = ' + ERROR_MESSAGE(); GOTO ERROR_HANDLER; END CATCH END; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/stored_procedure_begin_end.yml000066400000000000000000000213151451700765000270420ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 2994b0898ee3fa4c6a39ccc0a26d004d90582cc796255ec0b85138cbb33401b7 file: - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: Test_Begin_End - keyword: AS - procedure_statement: statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'Weekend'" statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: a from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl1 statement_terminator: ; - statement: select_statement: select_clause: keyword: select select_clause_element: column_reference: naked_identifier: b from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl2 statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[usp_process_tran_log]' - procedure_parameter_list: - parameter: '@out_vchCode' - data_type: data_type_identifier: uddt_output_code - keyword: OUTPUT - comma: ',' - parameter: '@out_vchMsg' - data_type: data_type_identifier: uddt_output_msg - keyword: OUTPUT - comma: ',' - parameter: '@in_debug' - data_type: data_type_identifier: INT - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1' - keyword: AS - procedure_statement: - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'8'" - keyword: END - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[usp_process_tran_log]' - procedure_parameter_list: - parameter: '@out_vchCode' - data_type: data_type_identifier: uddt_output_code - keyword: OUTPUT - comma: ',' - parameter: '@out_vchMsg' - data_type: data_type_identifier: uddt_output_msg - keyword: OUT - comma: ',' - parameter: '@in_debug' - data_type: data_type_identifier: INT - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1' - keyword: READONLY - keyword: AS - procedure_statement: - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'8'" - keyword: END - statement_terminator: ; - go_statement: keyword: GO - batch: create_procedure_statement: - keyword: ALTER - keyword: PROCEDURE - object_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[usp_process_tran_log]' - procedure_parameter_list: - parameter: '@out_vchCode' - data_type: data_type_identifier: uddt_output_code - keyword: OUTPUT - comma: ',' - parameter: '@out_vchMsg' - data_type: data_type_identifier: uddt_output_msg - keyword: OUTPUT - comma: ',' - parameter: '@in_debug' - data_type: data_type_identifier: INT - comparison_operator: raw_comparison_operator: '=' - expression: numeric_literal: '1' - keyword: AS - procedure_statement: - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: try_catch: - keyword: BEGIN - keyword: TRY - statement: select_statement: select_clause: keyword: SELECT select_clause_element: quoted_literal: "'8'" statement_terminator: ; - keyword: END - keyword: TRY - keyword: BEGIN - keyword: CATCH - statement: set_segment: keyword: SET parameter: '@v_nSysErrorNum' assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: ERROR_NUMBER bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@v_vchCode' assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: ERROR_LINE bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@v_vchMsg' assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "N'Missing control type.'" statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@v_vchMsg' assignment_operator: raw_comparison_operator: '=' expression: - parameter: '@v_vchMsg' - binary_operator: + - quoted_literal: "N' SQL Error = '" - binary_operator: + - function: function_name: function_name_identifier: ERROR_MESSAGE bracketed: start_bracket: ( end_bracket: ) statement_terminator: ; - statement: goto_statement: keyword: GOTO naked_identifier: ERROR_HANDLER - statement_terminator: ; - keyword: END - keyword: CATCH - keyword: END - statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/stored_procedure_simple.sql000066400000000000000000000000561451700765000264160ustar00rootroot00000000000000CREATE PROC ProcedureName AS SELECT DB_NAME() sqlfluff-2.3.5/test/fixtures/dialects/tsql/stored_procedure_simple.yml000066400000000000000000000016551451700765000264260ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 80a8562e881cc6523b9e57ecffa24e2f8c3f28ccceae740cb7ae1e1b02a46ec7 file: batch: create_procedure_statement: - keyword: CREATE - keyword: PROC - object_reference: naked_identifier: ProcedureName - keyword: AS - procedure_statement: statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: DB_NAME bracketed: start_bracket: ( end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/stored_procedure_single_statement.sql000066400000000000000000000020231451700765000304660ustar00rootroot00000000000000CREATE OR ALTER PROCEDURE DBO.SP_ECDC_CASES_INTER (@Apple [int], @Orange varchar(100)) AS INSERT INTO INTER.ECDC_CASES ( [COUNTRY], [COUNTRY_CODE], [CONTINENT], [POPULATION], [INDICATOR], [WEEKLY_COUNT], [YEAR_WEEK], [WEEK_START], [WEEK_END], [RATE_14_DAY], [CUMULATIVE_COUNT], [SOURCE] ) SELECT [COUNTRY], [COUNTRY_CODE], [CONTINENT], CAST([POPULATION] AS BIGINT) AS [POPULATION], [INDICATOR], CAST([WEEKLY_COUNT] AS BIGINT) AS [WEEKLY_COUNT], [YEAR_WEEK], CAST([dbo].[CONVERT_ISO_WEEK_TO_DATETIME](LEFT(YEAR_WEEK,4),RIGHT(YEAR_WEEK,2)) AS DATE) AS [WEEK_START], CAST([dbo].[WEEK_END]([dbo].[CONVERT_ISO_WEEK_TO_DATETIME](LEFT(YEAR_WEEK,4),RIGHT(YEAR_WEEK,2))) AS DATE ) AS [WEEK_END], CAST([RATE_14_DAY] AS FLOAT) AS [RATE_14_DAY], CAST([CUMULATIVE_COUNT] AS BIGINT) AS [CUMULATIVE_COUNT], [SOURCE] FROM STAGE.ECDC_CASES sqlfluff-2.3.5/test/fixtures/dialects/tsql/stored_procedure_single_statement.yml000066400000000000000000000267661451700765000305140ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 3e7b1bb873be22b78273c31cc7f23750f41e2ccc8bcac842a2ffcccdb1027d2b file: batch: create_procedure_statement: - keyword: CREATE - keyword: OR - keyword: ALTER - keyword: PROCEDURE - object_reference: - naked_identifier: DBO - dot: . - naked_identifier: SP_ECDC_CASES_INTER - procedure_parameter_list: bracketed: - start_bracket: ( - parameter: '@Apple' - data_type: data_type_identifier: '[int]' - comma: ',' - parameter: '@Orange' - data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - end_bracket: ) - keyword: AS - procedure_statement: statement: insert_statement: - keyword: INSERT - keyword: INTO - table_reference: - naked_identifier: INTER - dot: . - naked_identifier: ECDC_CASES - bracketed: - start_bracket: ( - column_reference: quoted_identifier: '[COUNTRY]' - comma: ',' - column_reference: quoted_identifier: '[COUNTRY_CODE]' - comma: ',' - column_reference: quoted_identifier: '[CONTINENT]' - comma: ',' - column_reference: quoted_identifier: '[POPULATION]' - comma: ',' - column_reference: quoted_identifier: '[INDICATOR]' - comma: ',' - column_reference: quoted_identifier: '[WEEKLY_COUNT]' - comma: ',' - column_reference: quoted_identifier: '[YEAR_WEEK]' - comma: ',' - column_reference: quoted_identifier: '[WEEK_START]' - comma: ',' - column_reference: quoted_identifier: '[WEEK_END]' - comma: ',' - column_reference: quoted_identifier: '[RATE_14_DAY]' - comma: ',' - column_reference: quoted_identifier: '[CUMULATIVE_COUNT]' - comma: ',' - column_reference: quoted_identifier: '[SOURCE]' - end_bracket: ) - select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: quoted_identifier: '[COUNTRY]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[COUNTRY_CODE]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[CONTINENT]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[POPULATION]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[POPULATION]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[INDICATOR]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[WEEKLY_COUNT]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[WEEKLY_COUNT]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[YEAR_WEEK]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[CONVERT_ISO_WEEK_TO_DATETIME]' bracketed: - start_bracket: ( - expression: function: function_name: keyword: LEFT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: RIGHT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) keyword: AS data_type: data_type_identifier: DATE end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[WEEK_START]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[WEEK_END]' bracketed: start_bracket: ( expression: function: function_name: quoted_identifier: '[dbo]' dot: . function_name_identifier: '[CONVERT_ISO_WEEK_TO_DATETIME]' bracketed: - start_bracket: ( - expression: function: function_name: keyword: LEFT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '4' - end_bracket: ) - comma: ',' - expression: function: function_name: keyword: RIGHT bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: YEAR_WEEK - comma: ',' - expression: numeric_literal: '2' - end_bracket: ) - end_bracket: ) end_bracket: ) keyword: AS data_type: data_type_identifier: DATE end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[WEEK_END]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[RATE_14_DAY]' keyword: AS data_type: data_type_identifier: FLOAT end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[RATE_14_DAY]' - comma: ',' - select_clause_element: function: function_name: keyword: CAST bracketed: start_bracket: ( expression: column_reference: quoted_identifier: '[CUMULATIVE_COUNT]' keyword: AS data_type: data_type_identifier: BIGINT end_bracket: ) alias_expression: keyword: AS quoted_identifier: '[CUMULATIVE_COUNT]' - comma: ',' - select_clause_element: column_reference: quoted_identifier: '[SOURCE]' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: STAGE - dot: . - naked_identifier: ECDC_CASES sqlfluff-2.3.5/test/fixtures/dialects/tsql/stored_procedured_mixed_statements.sql000066400000000000000000000010001451700765000306340ustar00rootroot00000000000000CREATE PROC [Reporting].[PowerPlan] AS DECLARE @DATEFUNCTION DATE = GETDATE() DROP TABLE [Reporting].[PowerPlan_BASE]; DECLARE @deadlock_var NCHAR(3); SET @deadlock_var = N'LOW'; BEGIN SET NOCOUNT ON SET DEADLOCK_PRIORITY LOW SET DEADLOCK_PRIORITY NORMAL SET DEADLOCK_PRIORITY HIGH SET DEADLOCK_PRIORITY @deadlock_var SET DEADLOCK_PRIORITY 10 SET DEADLOCK_PRIORITY -5 SELECT 1 CREATE TABLE #TempTest WITH (DISTRIBUTION = ROUND_ROBIN, HEAP) AS (SELECT 2 AS Two, 3 AS Three, 4 AS Four ) END sqlfluff-2.3.5/test/fixtures/dialects/tsql/stored_procedured_mixed_statements.yml000066400000000000000000000122051451700765000306470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 47d4898afe32a0c40fb4ec93ac0dc776d568a5b3ff6b43a91ae50a04a1314209 file: batch: create_procedure_statement: - keyword: CREATE - keyword: PROC - object_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[PowerPlan]' - keyword: AS - procedure_statement: - statement: declare_segment: keyword: DECLARE parameter: '@DATEFUNCTION' data_type: data_type_identifier: DATE comparison_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: GETDATE bracketed: start_bracket: ( end_bracket: ) - statement: drop_table_statement: - keyword: DROP - keyword: TABLE - table_reference: - quoted_identifier: '[Reporting]' - dot: . - quoted_identifier: '[PowerPlan_BASE]' - statement_terminator: ; - statement: declare_segment: keyword: DECLARE parameter: '@deadlock_var' data_type: data_type_identifier: NCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '3' end_bracket: ) statement_terminator: ; - statement: set_segment: keyword: SET parameter: '@deadlock_var' assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "N'LOW'" statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: set_segment: - keyword: SET - keyword: NOCOUNT - keyword: 'ON' - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - keyword: LOW - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - keyword: NORMAL - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - keyword: HIGH - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - parameter: '@deadlock_var' - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - numeric_literal: '10' - statement: set_segment: - keyword: SET - keyword: DEADLOCK_PRIORITY - numeric_literal: sign_indicator: '-' numeric_literal: '5' - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - statement: create_table_as_select_statement: - keyword: CREATE - keyword: TABLE - table_reference: hash_identifier: '#TempTest' - table_distribution_index_clause: keyword: WITH bracketed: start_bracket: ( table_distribution_clause: - keyword: DISTRIBUTION - comparison_operator: raw_comparison_operator: '=' - keyword: ROUND_ROBIN comma: ',' table_index_clause: keyword: HEAP end_bracket: ) - keyword: AS - bracketed: start_bracket: ( select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: '2' alias_expression: keyword: AS naked_identifier: Two - comma: ',' - select_clause_element: numeric_literal: '3' alias_expression: keyword: AS naked_identifier: Three - comma: ',' - select_clause_element: numeric_literal: '4' alias_expression: keyword: AS naked_identifier: Four end_bracket: ) - keyword: END sqlfluff-2.3.5/test/fixtures/dialects/tsql/synonym.sql000066400000000000000000000005331451700765000232110ustar00rootroot00000000000000-- Create a synonym CREATE SYNONYM my_synonym FOR mytable; -- Create a synonym for a multi-part schema CREATE SYNONYM my_synonym FOR otherdb.dbo.mytable; -- Drop a synonym DROP SYNONYM my_synonym; -- Conditionally drop synonym DROP SYNONYM IF EXISTS my_synonym; -- Conditionally drop synonym with schema DROP SYNONYM IF EXISTS dbo.my_synonym; sqlfluff-2.3.5/test/fixtures/dialects/tsql/synonym.yml000066400000000000000000000033221451700765000232120ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 5c9c7a9f4248765659055b697db0cc3d3399394303e85f20554b55bf3d83aa6d file: batch: - statement: create_synonym_statement: - keyword: CREATE - keyword: SYNONYM - synonym_reference: naked_identifier: my_synonym - keyword: FOR - object_reference: naked_identifier: mytable - statement_terminator: ; - statement: create_synonym_statement: - keyword: CREATE - keyword: SYNONYM - synonym_reference: naked_identifier: my_synonym - keyword: FOR - object_reference: - naked_identifier: otherdb - dot: . - naked_identifier: dbo - dot: . - naked_identifier: mytable - statement_terminator: ; - statement: drop_synonym_statement: - keyword: DROP - keyword: SYNONYM - synonym_reference: naked_identifier: my_synonym - statement_terminator: ; - statement: drop_synonym_statement: - keyword: DROP - keyword: SYNONYM - keyword: IF - keyword: EXISTS - synonym_reference: naked_identifier: my_synonym - statement_terminator: ; - statement: drop_synonym_statement: - keyword: DROP - keyword: SYNONYM - keyword: IF - keyword: EXISTS - synonym_reference: - naked_identifier: dbo - dot: . - naked_identifier: my_synonym - statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/system-variables.sql000066400000000000000000000005371451700765000247730ustar00rootroot00000000000000UPDATE HumanResources.Employee SET JobTitle = N'Executive' WHERE NationalIDNumber = 123456789 IF @@ROWCOUNT = 0 PRINT 'Warning: No rows were updated'; IF @@ERROR = 547 BEGIN PRINT N'A check constraint violation occurred.'; END GO SELECT @@IDENTITY AS 'Identity'; GO PRINT @@TRANCOUNT GO SELECT @@PACK_RECEIVED AS 'Packets Received'; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/system-variables.yml000066400000000000000000000057601451700765000250000ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: be997bb95ccd1ed890e72a446c3418a44eb32babce3c243dd4b5aeca6f9b528d file: - batch: - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: Employee set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: JobTitle assignment_operator: raw_comparison_operator: '=' expression: quoted_literal: "N'Executive'" where_clause: keyword: WHERE expression: column_reference: naked_identifier: NationalIDNumber comparison_operator: raw_comparison_operator: '=' numeric_literal: '123456789' - statement: if_then_statement: if_clause: keyword: IF expression: system_variable: '@@ROWCOUNT' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' statement: print_statement: keyword: PRINT expression: quoted_literal: "'Warning: No rows were updated'" statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: system_variable: '@@ERROR' comparison_operator: raw_comparison_operator: '=' numeric_literal: '547' statement: begin_end_block: - keyword: BEGIN - statement: print_statement: keyword: PRINT expression: quoted_literal: "N'A check constraint violation occurred.'" statement_terminator: ; - keyword: END - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@IDENTITY' alias_expression: keyword: AS quoted_identifier: "'Identity'" statement_terminator: ; - go_statement: keyword: GO - batch: statement: print_statement: keyword: PRINT expression: system_variable: '@@TRANCOUNT' - go_statement: keyword: GO - batch: statement: select_statement: select_clause: keyword: SELECT select_clause_element: system_variable: '@@PACK_RECEIVED' alias_expression: keyword: AS quoted_identifier: "'Packets Received'" statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/table_variables.sql000066400000000000000000000000671451700765000246160ustar00rootroot00000000000000declare @queue table ( id int, url nvarchar(100) ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/table_variables.yml000066400000000000000000000020541451700765000246160ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 862b2714c5dba3bc89677f4191a6c8ab1c6135281db59a5ba2e5b9183b80c83c file: batch: statement: declare_segment: - keyword: declare - parameter: '@queue' - keyword: table - bracketed: - start_bracket: ( - column_definition: naked_identifier: id data_type: data_type_identifier: int - comma: ',' - column_definition: naked_identifier: url data_type: data_type_identifier: nvarchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - end_bracket: ) sqlfluff-2.3.5/test/fixtures/dialects/tsql/tablesample.sql000066400000000000000000000003601451700765000237640ustar00rootroot00000000000000SELECT * FROM Sales.Customer TABLESAMPLE SYSTEM (10 PERCENT); SELECT * FROM Sales.Customer TABLESAMPLE (10 ROWS); SELECT * FROM Sales.Customer TABLESAMPLE (10); SELECT * FROM Sales.Customer TABLESAMPLE SYSTEM (10 ROWS) REPEATABLE (100); sqlfluff-2.3.5/test/fixtures/dialects/tsql/tablesample.yml000066400000000000000000000071621451700765000237750ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: d871c98295ef570f2ce25f7f7b737c474e7cee35204d5e3e0143b5cbfb93f541 file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' keyword: PERCENT end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '10' keyword: ROWS end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer sample_expression: keyword: TABLESAMPLE bracketed: start_bracket: ( numeric_literal: '10' end_bracket: ) statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer sample_expression: - keyword: TABLESAMPLE - keyword: SYSTEM - bracketed: start_bracket: ( numeric_literal: '10' keyword: ROWS end_bracket: ) - keyword: REPEATABLE - bracketed: start_bracket: ( numeric_literal: '100' end_bracket: ) statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/temp_tables.sql000066400000000000000000000001051451700765000237670ustar00rootroot00000000000000SELECT a ,b ,c FROM #UnionA; Select d ,e ,f FROM ##UnionB; sqlfluff-2.3.5/test/fixtures/dialects/tsql/temp_tables.yml000066400000000000000000000033301451700765000237740ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 29d56dbe3d227788a944c46dc2fd1e4277b8b54abf05e66bd1caf23e8df20212 file: batch: - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: a - comma: ',' - select_clause_element: column_reference: naked_identifier: b - comma: ',' - select_clause_element: column_reference: naked_identifier: c from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '#UnionA' statement_terminator: ; - statement: select_statement: select_clause: - keyword: Select - select_clause_element: column_reference: naked_identifier: d - comma: ',' - select_clause_element: column_reference: naked_identifier: e - comma: ',' - select_clause_element: column_reference: naked_identifier: f from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '##UnionB' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/temporal_tables.sql000066400000000000000000000066751451700765000246670ustar00rootroot00000000000000-- Select Query Temporal Tables SELECT * FROM Employee FOR SYSTEM_TIME BETWEEN '2021-01-01 00:00:00.0000000' AND '2022-01-01 00:00:00.0000000'; SELECT * FROM Employee FOR SYSTEM_TIME ALL; SELECT * FROM Employee FOR SYSTEM_TIME FROM '2021-01-01 00:00:00.0000000' TO '2022-01-01 00:00:00.0000000'; SELECT * FROM Employee FOR SYSTEM_TIME AS OF '2021-01-01 00:00:00.0000000'; SELECT * FROM Employee FOR SYSTEM_TIME CONTAINED IN ('2021-01-01 00:00:00.0000000', '2022-01-01 00:00:00.0000000'); -- Create Temporal Tables CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (SYSTEM_VERSIONING = ON (HISTORY_TABLE = dbo.EmployeeHistory), DURABILITY = SCHEMA_ONLY ); ; GO -- https://learn.microsoft.com/en-us/sql/relational-databases/tables/creating-a-system-versioned-temporal-table?view=sql-server-ver16#creating-a-temporal-table-with-a-default-history-table CREATE TABLE Department ( DeptID INT NOT NULL PRIMARY KEY CLUSTERED , DeptName VARCHAR(50) NOT NULL , ManagerID INT NULL , ParentDeptID INT NULL , ValidFrom DATETIME2 GENERATED ALWAYS AS ROW START NOT NULL , ValidTo DATETIME2 GENERATED ALWAYS AS ROW END NOT NULL , PERIOD FOR SYSTEM_TIME (ValidFrom, ValidTo) ) WITH (SYSTEM_VERSIONING = ON (HISTORY_TABLE = dbo.DepartmentHistory)) ; GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME = COLUMNC ); ; GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH (DATA_DELETION = ON (FILTER_COLUMN = ColumnC, RETENTION_PERIOD = INFINITE)); ; GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( MEMORY_OPTIMIZED = ON, DURABILITY = SCHEMA_AND_DATA, SYSTEM_VERSIONING = ON (HISTORY_TABLE = History.DepartmentHistory) ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( REMOTE_DATA_ARCHIVE = OFF ( MIGRATION_STATE = PAUSED ), LEDGER = ON (LEDGER_VIEW = dbo.ABC (TRANSACTION_ID_COLUMN_NAME = [ColumnC], SEQUENCE_NUMBER_COLUMN_NAME = [ColumnDecimal])) ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( DATA_COMPRESSION = ROW XML_COMPRESSION = ON ON PARTITIONS (2) ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( DATA_COMPRESSION = PAGE ON PARTITIONS (3, 5) XML_COMPRESSION = OFF ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( XML_COMPRESSION = ON ON PARTITIONS (3 TO 5), FILETABLE_DIRECTORY = '/path1/path2', FILETABLE_COLLATE_FILENAME = constraint1, FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME = constraint2, FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME = constraint3, FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME = constraint4 ); GO CREATE TABLE [dbo].[EC DC] ( [Column B] [varchar](100), [ColumnC] varchar(100), [ColumnDecimal] decimal(10,3) ) WITH ( REMOTE_DATA_ARCHIVE = ON ( FILTER_PREDICATE = NULL, MIGRATION_STATE = OUTBOUND), LEDGER = ON (LEDGER_VIEW = dbo.ABC, APPEND_ONLY = ON) ); GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/temporal_tables.yml000066400000000000000000000723171451700765000246650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: bbd05ea5802b6edbcb5baf6d32bf80afd3ce4fa374f6cf87ff658b3bf31190a8 file: - batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: BETWEEN - quoted_literal: "'2021-01-01 00:00:00.0000000'" - keyword: AND - quoted_literal: "'2022-01-01 00:00:00.0000000'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: ALL statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: FROM - quoted_literal: "'2021-01-01 00:00:00.0000000'" - keyword: TO - quoted_literal: "'2022-01-01 00:00:00.0000000'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: AS - keyword: OF - quoted_literal: "'2021-01-01 00:00:00.0000000'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: Employee temporal_query: - keyword: FOR - keyword: SYSTEM_TIME - keyword: CONTAINED - keyword: IN - bracketed: - start_bracket: ( - quoted_literal: "'2021-01-01 00:00:00.0000000'" - comma: ',' - quoted_literal: "'2022-01-01 00:00:00.0000000'" - end_bracket: ) statement_terminator: ; - statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: - naked_identifier: dbo - dot: . - naked_identifier: EmployeeHistory end_bracket: ) - comma: ',' - keyword: DURABILITY - comparison_operator: raw_comparison_operator: '=' - keyword: SCHEMA_ONLY - end_bracket: ) - statement_terminator: ; - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: naked_identifier: Department - bracketed: - start_bracket: ( - column_definition: - naked_identifier: DeptID - data_type: data_type_identifier: INT - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - column_constraint_segment: - keyword: PRIMARY - keyword: KEY - keyword: CLUSTERED - comma: ',' - column_definition: naked_identifier: DeptName data_type: data_type_identifier: VARCHAR bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '50' end_bracket: ) column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: ManagerID data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: naked_identifier: ParentDeptID data_type: data_type_identifier: INT column_constraint_segment: keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ValidFrom - data_type: data_type_identifier: DATETIME2 - column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: ROW - keyword: START - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - column_definition: - naked_identifier: ValidTo - data_type: data_type_identifier: DATETIME2 - column_constraint_segment: - keyword: GENERATED - keyword: ALWAYS - keyword: AS - keyword: ROW - keyword: END - column_constraint_segment: - keyword: NOT - keyword: 'NULL' - comma: ',' - period_segment: - keyword: PERIOD - keyword: FOR - keyword: SYSTEM_TIME - bracketed: - start_bracket: ( - column_reference: naked_identifier: ValidFrom - comma: ',' - column_reference: naked_identifier: ValidTo - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: - naked_identifier: dbo - dot: . - naked_identifier: DepartmentHistory end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: start_bracket: ( keyword: FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME comparison_operator: raw_comparison_operator: '=' object_reference: naked_identifier: COLUMNC end_bracket: ) - statement_terminator: ; statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: DATA_DELETION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: FILTER_COLUMN - comparison_operator: raw_comparison_operator: '=' - column_reference: naked_identifier: ColumnC - comma: ',' - keyword: RETENTION_PERIOD - comparison_operator: raw_comparison_operator: '=' - date_part: INFINITE - end_bracket: ) - end_bracket: ) - statement_terminator: ; statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: MEMORY_OPTIMIZED - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - comma: ',' - keyword: DURABILITY - comparison_operator: raw_comparison_operator: '=' - keyword: SCHEMA_AND_DATA - comma: ',' - keyword: SYSTEM_VERSIONING - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: HISTORY_TABLE comparison_operator: raw_comparison_operator: '=' table_reference: - naked_identifier: History - dot: . - naked_identifier: DepartmentHistory end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: REMOTE_DATA_ARCHIVE - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - bracketed: - start_bracket: ( - keyword: MIGRATION_STATE - comparison_operator: raw_comparison_operator: '=' - keyword: PAUSED - end_bracket: ) - comma: ',' - keyword: LEDGER - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: start_bracket: ( keyword: LEDGER_VIEW comparison_operator: raw_comparison_operator: '=' table_reference: - naked_identifier: dbo - dot: . - naked_identifier: ABC bracketed: - start_bracket: ( - keyword: TRANSACTION_ID_COLUMN_NAME - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '[ColumnC]' - comma: ',' - keyword: SEQUENCE_NUMBER_COLUMN_NAME - comparison_operator: raw_comparison_operator: '=' - column_reference: quoted_identifier: '[ColumnDecimal]' - end_bracket: ) end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: ROW - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - keyword: 'ON' - keyword: PARTITIONS - bracketed: start_bracket: ( numeric_literal: '2' end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: DATA_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: PAGE - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '3' - comma: ',' - numeric_literal: '5' - end_bracket: ) - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'OFF' - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: XML_COMPRESSION - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - keyword: 'ON' - keyword: PARTITIONS - bracketed: - start_bracket: ( - numeric_literal: '3' - keyword: TO - numeric_literal: '5' - end_bracket: ) - comma: ',' - keyword: FILETABLE_DIRECTORY - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'/path1/path2'" - comma: ',' - keyword: FILETABLE_COLLATE_FILENAME - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: constraint1 - comma: ',' - keyword: FILETABLE_PRIMARY_KEY_CONSTRAINT_NAME - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: constraint2 - comma: ',' - keyword: FILETABLE_STREAMID_UNIQUE_CONSTRAINT_NAME - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: constraint3 - comma: ',' - keyword: FILETABLE_FULLPATH_UNIQUE_CONSTRAINT_NAME - comparison_operator: raw_comparison_operator: '=' - object_reference: naked_identifier: constraint4 - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_table_statement: - keyword: CREATE - keyword: TABLE - table_reference: - quoted_identifier: '[dbo]' - dot: . - quoted_identifier: '[EC DC]' - bracketed: - start_bracket: ( - column_definition: quoted_identifier: '[Column B]' data_type: data_type_identifier: '[varchar]' bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnC]' data_type: data_type_identifier: varchar bracketed_arguments: bracketed: start_bracket: ( expression: numeric_literal: '100' end_bracket: ) - comma: ',' - column_definition: quoted_identifier: '[ColumnDecimal]' data_type: data_type_identifier: decimal bracketed_arguments: bracketed: - start_bracket: ( - expression: numeric_literal: '10' - comma: ',' - expression: numeric_literal: '3' - end_bracket: ) - end_bracket: ) - table_option_statement: keyword: WITH bracketed: - start_bracket: ( - keyword: REMOTE_DATA_ARCHIVE - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: FILTER_PREDICATE - comparison_operator: raw_comparison_operator: '=' - keyword: 'NULL' - comma: ',' - keyword: MIGRATION_STATE - comparison_operator: raw_comparison_operator: '=' - keyword: OUTBOUND - end_bracket: ) - comma: ',' - keyword: LEDGER - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - bracketed: - start_bracket: ( - keyword: LEDGER_VIEW - comparison_operator: raw_comparison_operator: '=' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: ABC - comma: ',' - keyword: APPEND_ONLY - comparison_operator: raw_comparison_operator: '=' - keyword: 'ON' - end_bracket: ) - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/transaction.sql000066400000000000000000000004541451700765000240240ustar00rootroot00000000000000BEGIN TRANSACTION; DELETE FROM HumanResources.JobCandidate WHERE JobCandidateID = 13; COMMIT; BEGIN TRAN; DELETE FROM HumanResources.JobCandidate WHERE JobCandidateID = 13; ROLLBACK TRAN; BEGIN TRAN; SAVE TRANSACTION; BEGIN TRAN namey; ROLLBACK namey; SAVE TRAN @variable; COMMIT @variable; sqlfluff-2.3.5/test/fixtures/dialects/tsql/transaction.yml000066400000000000000000000052031451700765000240230ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: fd3b8479ff5e72d7996d7e9a7a1b284d2130dd66ea72112371626a50ab94b12c file: batch: - statement: transaction_statement: - keyword: BEGIN - keyword: TRANSACTION - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: JobCandidate - where_clause: keyword: WHERE expression: column_reference: naked_identifier: JobCandidateID comparison_operator: raw_comparison_operator: '=' numeric_literal: '13' - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT statement_terminator: ; - statement: transaction_statement: - keyword: BEGIN - keyword: TRAN - statement_terminator: ; - statement: delete_statement: - keyword: DELETE - keyword: FROM - table_reference: - naked_identifier: HumanResources - dot: . - naked_identifier: JobCandidate - where_clause: keyword: WHERE expression: column_reference: naked_identifier: JobCandidateID comparison_operator: raw_comparison_operator: '=' numeric_literal: '13' - statement_terminator: ; - statement: transaction_statement: - keyword: ROLLBACK - keyword: TRAN - statement_terminator: ; - statement: transaction_statement: - keyword: BEGIN - keyword: TRAN - statement_terminator: ; - statement: transaction_statement: - keyword: SAVE - keyword: TRANSACTION - statement_terminator: ; - statement: transaction_statement: - keyword: BEGIN - keyword: TRAN - naked_identifier: namey - statement_terminator: ; - statement: transaction_statement: keyword: ROLLBACK naked_identifier: namey statement_terminator: ; - statement: transaction_statement: - keyword: SAVE - keyword: TRAN - parameter: '@variable' - statement_terminator: ; - statement: transaction_statement: keyword: COMMIT parameter: '@variable' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/triggers.sql000066400000000000000000000046701451700765000233310ustar00rootroot00000000000000CREATE TRIGGER reminder1 ON Sales.Customer AFTER INSERT, UPDATE AS RAISERROR ('Notify Customer Relations', 16, 10); GO CREATE TRIGGER reminder2 ON Sales.Customer AFTER INSERT, UPDATE, DELETE AS EXEC msdb.dbo.sp_send_dbmail @profile_name = 'AdventureWorks2012 Administrator', @recipients = 'danw@Adventure-Works.com', @body = 'Don''t forget to print a report for the sales force.', @subject = 'Reminder'; GO CREATE TRIGGER Purchasing.LowCredit ON Purchasing.PurchaseOrderHeader AFTER INSERT AS IF (ROWCOUNT_BIG() = 0) RETURN; IF EXISTS (SELECT 1 FROM inserted AS i JOIN Purchasing.Vendor AS v ON v.BusinessEntityID = i.VendorID WHERE v.CreditRating = 5 ) BEGIN RAISERROR ('A vendor''s credit rating is too low to accept new purchase orders.', 16, 1); ROLLBACK TRANSACTION; RETURN END; GO CREATE TRIGGER safety ON DATABASE FOR DROP_SYNONYM AS IF (@@ROWCOUNT = 0) RETURN; RAISERROR ('You must disable Trigger "safety" to remove synonyms!', 10, 1) ROLLBACK GO DROP TRIGGER safety ON DATABASE; GO CREATE TRIGGER ddl_trig_database ON ALL SERVER FOR CREATE_DATABASE AS PRINT 'Database Created.' SELECT 1 GO CREATE TRIGGER ddl_trig_database ON ALL SERVER FOR CREATE_DATABASE AS PRINT 'Database Created.'; SELECT 1 GO DROP TRIGGER ddl_trig_database ON ALL SERVER; GO CREATE TRIGGER connection_limit_trigger ON ALL SERVER WITH EXECUTE AS 'login_test' FOR LOGON AS BEGIN IF ORIGINAL_LOGIN()= 'login_test' AND (SELECT COUNT(*) FROM sys.dm_exec_sessions WHERE is_user_process = 1 AND original_login_name = 'login_test') > 3 ROLLBACK; END; GO Create TRIGGER dbo.tr_SP_BALS_L2_ATTRIBUTES ON dbo.SP_BALS_L2_ATTRIBUTES AFTER UPDATE AS UPDATE dbo.SP_BALS_L2_ATTRIBUTES SET PDW_LAST_UPDATED = Getdate() FROM dbo.SP_BALS_L2_ATTRIBUTES o INNER JOIN Inserted i ON o.PK_L2_BALS = i.PK_L2_BALS go disable trigger dbo.tr_SP_BALS_L2_ATTRIBUTES on dbo.SP_BALS_L2_ATTRIBUTES go Create TRIGGER dbo.tr_u_SP_BALS_L2_ATTRIBUTES ON dbo.SP_BALS_L2_ATTRIBUTES AFTER UPDATE AS UPDATE dbo.SP_BALS_L2_ATTRIBUTES SET PDW_LAST_UPDATED = sysdatetime() FROM dbo.SP_BALS_L2_ATTRIBUTES o INNER JOIN Inserted i ON o.PK_L2_BALS = i.PK_L2_BALS GO DROP TRIGGER employee_insupd; GO DROP TRIGGER safety ON DATABASE; GO disable trigger dbo.tr_u_SP_BALS_L2_ATTRIBUTES on dbo.SP_BALS_L2_ATTRIBUTES GO DISABLE TRIGGER safety ON DATABASE; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/triggers.yml000066400000000000000000000501141451700765000233250ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cd35978be7ba6609d00c6c3fab646c1ad57806dfc4fbeb7fc4d04ef40784acba file: - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: reminder1 - keyword: 'ON' - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer - keyword: AFTER - keyword: INSERT - comma: ',' - keyword: UPDATE - keyword: AS - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'Notify Customer Relations'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '10' - end_bracket: ) - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: reminder2 - keyword: 'ON' - table_reference: - naked_identifier: Sales - dot: . - naked_identifier: Customer - keyword: AFTER - keyword: INSERT - comma: ',' - keyword: UPDATE - comma: ',' - keyword: DELETE - keyword: AS - statement: execute_script_statement: - keyword: EXEC - object_reference: - naked_identifier: msdb - dot: . - naked_identifier: dbo - dot: . - naked_identifier: sp_send_dbmail - parameter: '@profile_name' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'AdventureWorks2012 Administrator'" - comma: ',' - parameter: '@recipients' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'danw@Adventure-Works.com'" - comma: ',' - parameter: '@body' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Don''t forget to print a report for the sales force.'" - comma: ',' - parameter: '@subject' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'Reminder'" - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: LowCredit - keyword: 'ON' - table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: PurchaseOrderHeader - keyword: AFTER - keyword: INSERT - keyword: AS - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: function: function_name: function_name_identifier: ROWCOUNT_BIG bracketed: start_bracket: ( end_bracket: ) comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) statement: return_segment: keyword: RETURN statement_terminator: ; - statement: if_then_statement: if_clause: keyword: IF expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: inserted alias_expression: keyword: AS naked_identifier: i join_clause: keyword: JOIN from_expression_element: table_expression: table_reference: - naked_identifier: Purchasing - dot: . - naked_identifier: Vendor alias_expression: keyword: AS naked_identifier: v join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: v - dot: . - naked_identifier: BusinessEntityID - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: i - dot: . - naked_identifier: VendorID where_clause: keyword: WHERE expression: column_reference: - naked_identifier: v - dot: . - naked_identifier: CreditRating comparison_operator: raw_comparison_operator: '=' numeric_literal: '5' end_bracket: ) statement: begin_end_block: - keyword: BEGIN - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'A vendor''s credit rating is too low to accept\ \ new\npurchase orders.'" - comma: ',' - numeric_literal: '16' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement_terminator: ; - statement: transaction_statement: - keyword: ROLLBACK - keyword: TRANSACTION - statement_terminator: ; - statement: return_segment: keyword: RETURN - keyword: END statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: safety - keyword: 'ON' - keyword: DATABASE - keyword: FOR - naked_identifier: DROP_SYNONYM - keyword: AS - statement: if_then_statement: if_clause: keyword: IF expression: bracketed: start_bracket: ( expression: system_variable: '@@ROWCOUNT' comparison_operator: raw_comparison_operator: '=' numeric_literal: '0' end_bracket: ) statement: return_segment: keyword: RETURN statement_terminator: ; - statement: raiserror_statement: keyword: RAISERROR bracketed: - start_bracket: ( - quoted_literal: "'You must disable Trigger \"safety\" to remove synonyms!'" - comma: ',' - numeric_literal: '10' - comma: ',' - numeric_literal: '1' - end_bracket: ) - statement: transaction_statement: keyword: ROLLBACK - go_statement: keyword: GO - batch: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: safety - keyword: 'ON' - keyword: DATABASE statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: ddl_trig_database - keyword: 'ON' - keyword: ALL - keyword: SERVER - keyword: FOR - naked_identifier: CREATE_DATABASE - keyword: AS - statement: print_statement: keyword: PRINT expression: quoted_literal: "'Database Created.'" - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: ddl_trig_database - keyword: 'ON' - keyword: ALL - keyword: SERVER - keyword: FOR - naked_identifier: CREATE_DATABASE - keyword: AS - statement: print_statement: keyword: PRINT expression: quoted_literal: "'Database Created.'" statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '1' - go_statement: keyword: GO - batch: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: ddl_trig_database - keyword: 'ON' - keyword: ALL - keyword: SERVER statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: CREATE - keyword: TRIGGER - trigger_reference: naked_identifier: connection_limit_trigger - keyword: 'ON' - keyword: ALL - keyword: SERVER - keyword: WITH - execute_as_clause: - keyword: EXECUTE - keyword: AS - quoted_identifier: "'login_test'" - keyword: FOR - naked_identifier: LOGON - keyword: AS - statement: begin_end_block: - keyword: BEGIN - statement: if_then_statement: if_clause: keyword: IF expression: - function: function_name: function_name_identifier: ORIGINAL_LOGIN bracketed: start_bracket: ( end_bracket: ) - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'login_test'" - binary_operator: AND - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: function: function_name: function_name_identifier: COUNT bracketed: start_bracket: ( star: '*' end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sys - dot: . - naked_identifier: dm_exec_sessions where_clause: keyword: WHERE expression: - column_reference: naked_identifier: is_user_process - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' - binary_operator: AND - column_reference: naked_identifier: original_login_name - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'login_test'" end_bracket: ) - comparison_operator: raw_comparison_operator: '>' - numeric_literal: '3' statement: transaction_statement: keyword: ROLLBACK statement_terminator: ; - keyword: END - statement_terminator: ; - go_statement: keyword: GO - batch: statement: create_trigger: - keyword: Create - keyword: TRIGGER - trigger_reference: - naked_identifier: dbo - dot: . - naked_identifier: tr_SP_BALS_L2_ATTRIBUTES - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES - keyword: AFTER - keyword: UPDATE - keyword: AS - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: PDW_LAST_UPDATED assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: Getdate bracketed: start_bracket: ( end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES alias_expression: naked_identifier: o join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: Inserted alias_expression: naked_identifier: i - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: o - dot: . - naked_identifier: PK_L2_BALS - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: i - dot: . - naked_identifier: PK_L2_BALS - go_statement: keyword: go - batch: statement: disable_trigger: - keyword: disable - keyword: trigger - trigger_reference: - naked_identifier: dbo - dot: . - naked_identifier: tr_SP_BALS_L2_ATTRIBUTES - keyword: 'on' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES - go_statement: keyword: go - batch: statement: create_trigger: - keyword: Create - keyword: TRIGGER - trigger_reference: - naked_identifier: dbo - dot: . - naked_identifier: tr_u_SP_BALS_L2_ATTRIBUTES - keyword: 'ON' - table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES - keyword: AFTER - keyword: UPDATE - keyword: AS - statement: update_statement: keyword: UPDATE table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: PDW_LAST_UPDATED assignment_operator: raw_comparison_operator: '=' expression: function: function_name: function_name_identifier: sysdatetime bracketed: start_bracket: ( end_bracket: ) from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES alias_expression: naked_identifier: o join_clause: - keyword: INNER - keyword: JOIN - from_expression_element: table_expression: table_reference: naked_identifier: Inserted alias_expression: naked_identifier: i - join_on_condition: keyword: 'ON' expression: - column_reference: - naked_identifier: o - dot: . - naked_identifier: PK_L2_BALS - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: i - dot: . - naked_identifier: PK_L2_BALS - go_statement: keyword: GO - batch: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: employee_insupd statement_terminator: ; - go_statement: keyword: GO - batch: statement: drop_trigger: - keyword: DROP - keyword: TRIGGER - trigger_reference: naked_identifier: safety - keyword: 'ON' - keyword: DATABASE statement_terminator: ; - go_statement: keyword: GO - batch: statement: disable_trigger: - keyword: disable - keyword: trigger - trigger_reference: - naked_identifier: dbo - dot: . - naked_identifier: tr_u_SP_BALS_L2_ATTRIBUTES - keyword: 'on' - object_reference: - naked_identifier: dbo - dot: . - naked_identifier: SP_BALS_L2_ATTRIBUTES - go_statement: keyword: GO - batch: statement: disable_trigger: - keyword: DISABLE - keyword: TRIGGER - trigger_reference: naked_identifier: safety - keyword: 'ON' - keyword: DATABASE statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/try_catch.sql000066400000000000000000000004501451700765000234530ustar00rootroot00000000000000BEGIN TRY -- Table does not exist; object name resolution -- error not caught. SELECT * FROM NonexistentTable; END TRY BEGIN CATCH SELECT ERROR_NUMBER() AS ErrorNumber ,ERROR_MESSAGE() AS ErrorMessage; THROW END CATCH GO THROW 50005, N'an error occurred', 1; sqlfluff-2.3.5/test/fixtures/dialects/tsql/try_catch.yml000066400000000000000000000045441451700765000234650ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 447bb3cd044ffe26c65e01f60557358411e66ad25287f132fca88e45957cc33c file: - batch: statement: try_catch: - keyword: BEGIN - keyword: TRY - statement: select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: NonexistentTable statement_terminator: ; - keyword: END - keyword: TRY - keyword: BEGIN - keyword: CATCH - statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: function: function_name: function_name_identifier: ERROR_NUMBER bracketed: start_bracket: ( end_bracket: ) alias_expression: keyword: AS naked_identifier: ErrorNumber - comma: ',' - select_clause_element: function: function_name: function_name_identifier: ERROR_MESSAGE bracketed: start_bracket: ( end_bracket: ) alias_expression: keyword: AS naked_identifier: ErrorMessage statement_terminator: ; - statement: throw_statement: keyword: THROW - keyword: END - keyword: CATCH - go_statement: keyword: GO - batch: statement: throw_statement: - keyword: THROW - numeric_literal: '50005' - comma: ',' - quoted_literal: "N'an error occurred'" - comma: ',' - numeric_literal: '1' statement_terminator: ; sqlfluff-2.3.5/test/fixtures/dialects/tsql/update.sql000066400000000000000000000003761451700765000227640ustar00rootroot00000000000000update dbo.Cases set [Flg] = 1 where ID in (select distinct [ID] from dbo.CX) OPTION (Label = 'Cases') ; update tt set tt.rn += 1 from table1 as tt join src on tt._id = src._id; UPDATE stuff SET deleted = 1 OUTPUT * INTO trash WHERE useless = 1 sqlfluff-2.3.5/test/fixtures/dialects/tsql/update.yml000066400000000000000000000105461451700765000227660ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: cdd60df88890b377d4ce4b9d89ed051cf80ae7dc18aa073042dae4b191abb466 file: batch: - statement: update_statement: keyword: update table_reference: - naked_identifier: dbo - dot: . - naked_identifier: Cases set_clause_list: keyword: set set_clause: column_reference: quoted_identifier: '[Flg]' assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' where_clause: keyword: where expression: column_reference: naked_identifier: ID keyword: in bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinct select_clause_element: column_reference: quoted_identifier: '[ID]' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: dbo - dot: . - naked_identifier: CX end_bracket: ) option_clause: keyword: OPTION bracketed: start_bracket: ( query_hint_segment: keyword: Label comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Cases'" end_bracket: ) statement_terminator: ; - statement: update_statement: keyword: update table_reference: naked_identifier: tt set_clause_list: keyword: set set_clause: column_reference: - naked_identifier: tt - dot: . - naked_identifier: rn assignment_operator: binary_operator: + raw_comparison_operator: '=' expression: numeric_literal: '1' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: table1 alias_expression: keyword: as naked_identifier: tt join_clause: keyword: join from_expression_element: table_expression: table_reference: naked_identifier: src join_on_condition: keyword: 'on' expression: - column_reference: - naked_identifier: tt - dot: . - naked_identifier: _id - comparison_operator: raw_comparison_operator: '=' - column_reference: - naked_identifier: src - dot: . - naked_identifier: _id statement_terminator: ; - statement: update_statement: keyword: UPDATE table_reference: naked_identifier: stuff set_clause_list: keyword: SET set_clause: column_reference: naked_identifier: deleted assignment_operator: raw_comparison_operator: '=' expression: numeric_literal: '1' output_clause: - keyword: OUTPUT - wildcard_expression: wildcard_identifier: star: '*' - keyword: INTO - table_reference: naked_identifier: trash where_clause: keyword: WHERE expression: column_reference: naked_identifier: useless comparison_operator: raw_comparison_operator: '=' numeric_literal: '1' sqlfluff-2.3.5/test/fixtures/dialects/tsql/waitfor.sql000066400000000000000000000003471451700765000231530ustar00rootroot00000000000000EXECUTE sp_add_job @job_name = 'TestJob'; BEGIN WAITFOR TIME '22:20'; EXECUTE sp_update_job @job_name = 'TestJob', @new_name = 'UpdatedJob'; END; GO BEGIN WAITFOR DELAY '02:00'; EXECUTE sp_helpdb; END; GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/waitfor.yml000066400000000000000000000041031451700765000231470ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 1be27bfe60666f3494dc3c6c97d1d56226c124be3559d00ab6a7804b3ac45907 file: - batch: - statement: execute_script_statement: keyword: EXECUTE object_reference: naked_identifier: sp_add_job parameter: '@job_name' comparison_operator: raw_comparison_operator: '=' quoted_literal: "'TestJob'" statement_terminator: ; - statement: begin_end_block: - keyword: BEGIN - statement: waitfor_statement: - keyword: WAITFOR - keyword: TIME - expression: quoted_literal: "'22:20'" - statement_terminator: ; - statement: execute_script_statement: - keyword: EXECUTE - object_reference: naked_identifier: sp_update_job - parameter: '@job_name' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'TestJob'" - comma: ',' - parameter: '@new_name' - comparison_operator: raw_comparison_operator: '=' - quoted_literal: "'UpdatedJob'" - statement_terminator: ; - keyword: END - statement_terminator: ; - go_statement: keyword: GO - batch: statement: begin_end_block: - keyword: BEGIN - statement: waitfor_statement: - keyword: WAITFOR - keyword: DELAY - expression: quoted_literal: "'02:00'" - statement_terminator: ; - statement: execute_script_statement: keyword: EXECUTE object_reference: naked_identifier: sp_helpdb statement_terminator: ; - keyword: END statement_terminator: ; - go_statement: keyword: GO sqlfluff-2.3.5/test/fixtures/dialects/tsql/while_statement.sql000066400000000000000000000004441451700765000246720ustar00rootroot00000000000000WHILE (1=1) BEGIN IF EXISTS (SELECT * FROM ##MyTempTable WHERE EventCode = 'Done') BEGIN BREAK; -- 'Done' row has finally been inserted and detected, so end this loop. END PRINT N'The other process is not yet done.'; -- Re-confirm the non-done status to the console. END sqlfluff-2.3.5/test/fixtures/dialects/tsql/while_statement.yml000066400000000000000000000051441451700765000246760ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 10866693896cdecd8390324d3138cc43804895595bcda91c8e64c983aa9365cc file: batch: statement: while_statement: keyword: WHILE expression: bracketed: start_bracket: ( expression: - numeric_literal: '1' - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '1' end_bracket: ) statement: begin_end_block: - keyword: BEGIN - statement: if_then_statement: if_clause: keyword: IF expression: keyword: EXISTS bracketed: start_bracket: ( select_statement: select_clause: keyword: SELECT select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: hash_identifier: '##MyTempTable' where_clause: keyword: WHERE expression: column_reference: naked_identifier: EventCode comparison_operator: raw_comparison_operator: '=' quoted_literal: "'Done'" end_bracket: ) statement: begin_end_block: - keyword: BEGIN - statement: break_statement: keyword: BREAK - statement_terminator: ; - keyword: END - statement: print_statement: keyword: PRINT expression: quoted_literal: "N'The other process is not yet done.'" statement_terminator: ; - keyword: END sqlfluff-2.3.5/test/fixtures/dialects/tsql/window_functions.sql000066400000000000000000000014251451700765000250750ustar00rootroot00000000000000-- Classical partition/order by SELECT ROW_NUMBER() OVER(PARTITION BY t.col1 ORDER BY t.col2) rn FROM mytable t; -- Partition by constant SELECT ROW_NUMBER() OVER(PARTITION BY 1 ORDER BY t.col2) rn FROM mytable t; -- Partition by expression SELECT ROW_NUMBER() OVER(PARTITION BY CASE WHEN t.col1 = 'value' THEN 1 END ORDER BY t.col2) rn FROM mytable t; -- Partition by expression and column SELECT ROW_NUMBER() OVER(PARTITION BY t.col3, CASE WHEN t.col1 = 'value' THEN 1 END, t.col4 ORDER BY t.col2) rn FROM mytable t; -- Partition by select statement SELECT ROW_NUMBER() OVER(PARTITION BY (SELECT col1 FROM othertable) ORDER BY t.col2) rn FROM mytable t; -- Partition by aggregate SELECT ROW_NUMBER() OVER(PARTITION BY SUM(t.col1) ORDER BY t.col2) rn FROM mytable t GROUP BY t.col2; sqlfluff-2.3.5/test/fixtures/dialects/tsql/window_functions.yml000066400000000000000000000246041451700765000251030ustar00rootroot00000000000000# YML test files are auto-generated from SQL files and should not be edited by # hand. To help enforce this, the "hash" field in the file must match a hash # computed by SQLFluff when running the tests. Please run # `python test/generate_parse_fixture_yml.py` to generate them after adding or # altering SQL files. _hash: 659e7f529b17efa905e2ea180f637aa54226a87233f9e0f9ea046cbb47afcecf file: batch: - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - expression: numeric_literal: '1' orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - keyword: THEN - expression: numeric_literal: '1' - keyword: END orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col3 - comma: ',' - expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 comparison_operator: raw_comparison_operator: '=' quoted_literal: "'value'" - keyword: THEN - expression: numeric_literal: '1' - keyword: END - comma: ',' - column_reference: - naked_identifier: t - dot: . - naked_identifier: col4 orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - bracketed: start_bracket: ( expression: select_statement: select_clause: keyword: SELECT select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: othertable end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t statement_terminator: ; - statement: select_statement: select_clause: keyword: SELECT select_clause_element: function: - function_name: keyword: ROW_NUMBER - bracketed: start_bracket: ( end_bracket: ) - keyword: OVER - bracketed: start_bracket: ( partitionby_clause: - keyword: PARTITION - keyword: BY - expression: function: function_name: function_name_identifier: SUM bracketed: start_bracket: ( expression: column_reference: - naked_identifier: t - dot: . - naked_identifier: col1 end_bracket: ) orderby_clause: - keyword: ORDER - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 end_bracket: ) alias_expression: naked_identifier: rn from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: mytable alias_expression: naked_identifier: t groupby_clause: - keyword: GROUP - keyword: BY - column_reference: - naked_identifier: t - dot: . - naked_identifier: col2 statement_terminator: ; sqlfluff-2.3.5/test/fixtures/lexer/000077500000000000000000000000001451700765000173175ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/lexer/basic.sql000066400000000000000000000000521451700765000211160ustar00rootroot00000000000000SELECT a.id, a.name FROM tbl as a sqlfluff-2.3.5/test/fixtures/lexer/block_comment.sql000066400000000000000000000002711451700765000226540ustar00rootroot00000000000000SELECT a.id, /* Block comment with ending */ a.something, a.name, /* Block comment on newlines */ /* Some block comments go over multiple lines */ FROM tbl as a sqlfluff-2.3.5/test/fixtures/lexer/dummy.md000066400000000000000000000000721451700765000207730ustar00rootroot00000000000000# this is a dummy file for testing detection of sql files sqlfluff-2.3.5/test/fixtures/lexer/inline_comment.sql000066400000000000000000000001571451700765000230430ustar00rootroot00000000000000SELECT a.id, -- This is an inline comment -- Sometimes they're on a new line a.name FROM tbl as a sqlfluff-2.3.5/test/fixtures/linter/000077500000000000000000000000001451700765000174755ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/.gitignore000066400000000000000000000000431451700765000214620ustar00rootroot00000000000000# Results of fixed tests *_fix.sql sqlfluff-2.3.5/test/fixtures/linter/autofix/000077500000000000000000000000001451700765000211545ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/README.md000066400000000000000000000011611451700765000224320ustar00rootroot00000000000000# Automated linter fix tests The `autofix` directory contains the files for automated linter fix tests. The structure is: - First level is folders for each `dialect` (e.g. `ansi`, `mysql`). - Second level is a series of folders for each test. Typically these are of the for `001_test_description`, to help contributors understand the purpose of the test. - Within that folder there will be a `before.sql` file, an `after.sql` file, and a config file named `test-config.yml`. Additionally if a `violations.json` file is provided, it will be used to check that the relevant violations are found in the first place. sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/000077500000000000000000000000001451700765000221065ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/001_long_line/000077500000000000000000000000001451700765000244345ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/001_long_line/after.sql000066400000000000000000000016031451700765000262560ustar00rootroot00000000000000WITH all_upstream_matches AS ( SELECT ROW_NUMBER() OVER ( PARTITION BY low_business_type, low_size_label, low_gender_label, low_age_label ORDER BY business_type DESC, size_label DESC, gender_label DESC, age_label DESC ) AS rownum, business_type FROM acceptable_buckets JOIN small_buckets ON (business_type = low_business_type AND size_label = low_size_label AND gender_label = low_gender_label AND age_label = low_age_label) ) SELECT business_type, user_counts FROM acceptable_buckets UNION ALL SELECT business_type, user_counts FROM substituted_buckets sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/001_long_line/before.sql000066400000000000000000000012351451700765000264200ustar00rootroot00000000000000WITH all_upstream_matches AS ( SELECT ROW_NUMBER() OVER (PARTITION BY low_business_type, low_size_label, low_gender_label, low_age_label ORDER BY business_type DESC, size_label DESC, gender_label DESC, age_label DESC) AS rownum, business_type FROM acceptable_buckets JOIN small_buckets ON (business_type = low_business_type AND size_label = low_size_label AND gender_label = low_gender_label AND age_label = low_age_label) ) SELECT business_type, user_counts FROM acceptable_buckets UNION ALL SELECT business_type, user_counts FROM substituted_buckets sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/001_long_line/test-config.yml000066400000000000000000000000411451700765000273740ustar00rootroot00000000000000test-config: rules: - LT05 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/001_long_line/violations.json000066400000000000000000000001661451700765000275210ustar00rootroot00000000000000{ "violations":{ "linting":{ "LT05": [ [3, 9] ] } } } sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/002_indentation/000077500000000000000000000000001451700765000250035ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/002_indentation/after.sql000066400000000000000000000010641451700765000266260ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, a.training_spaces, ( a.under_indented_line ) as foo, ( a.over_indented_line ) as bar, a.line + ( a.with + a.hanging_indent ) as actually_ok, a.line + ( a.with + a.bad_hanging_indent ) as problem, a.line + ( a.something_indented_well + least( a.good_example, a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/002_indentation/before.sql000066400000000000000000000010201451700765000267570ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, a.training_spaces, ( a.under_indented_line ) as foo, ( a.over_indented_line ) as bar, a.line + (a.with + a.hanging_indent) as actually_ok, a.line + (a.with + a.bad_hanging_indent) as problem, a.line + ( a.something_indented_well + least( a.good_example, a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/002_indentation/test-config.yml000066400000000000000000000000411451700765000277430ustar00rootroot00000000000000test-config: rules: - LT02 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/003_long_line/000077500000000000000000000000001451700765000244365ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/003_long_line/after.sql000066400000000000000000000003641451700765000262630ustar00rootroot00000000000000SELECT GREATEST(1, 2 + 7, SQRT(a.long_variable_name_of_some_kind)) AS first_one, GREATEST( 2 / 3.4322348982348, 5 + 6, SQRT(a.nother_long_variable_name_of_some_kind) ) AS second_one FROM this_other_table sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/003_long_line/before.sql000066400000000000000000000003161451700765000264210ustar00rootroot00000000000000SELECT GREATEST(1, 2 + 7, SQRT(a.long_variable_name_of_some_kind)) AS first_one, GREATEST(2 / 3.4322348982348, 5 + 6, SQRT(a.nother_long_variable_name_of_some_kind)) AS second_one FROM this_other_table sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/003_long_line/test-config.yml000066400000000000000000000000411451700765000273760ustar00rootroot00000000000000test-config: rules: - LT05 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/004_indentation/000077500000000000000000000000001451700765000250055ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/004_indentation/after.sql000066400000000000000000000016751451700765000266400ustar00rootroot00000000000000WITH audience_counts AS ( SELECT user_id, list_id, COUNT(email_id) AS audience FROM lists_emails AS list_emails WHERE list_emails.active != 'D' GROUP BY user_id, list_id ) SELECT user_id, list_id, audience, CASE WHEN audience > 0 AND audience <= 200 THEN '< 200' WHEN audience > 200 AND audience <= 3000 -- NB: This one is a hanging indent, which should be modified. AND audience <= 2000 THEN '200 - 2,000' WHEN audience > 2000 AND audience <= 10000 THEN '2,000 - 10,000' WHEN audience > 10000 AND audience <= 50000 THEN '10,000 - 50,000' WHEN audience > 50000 AND audience <= 500000 THEN '50,000 - 500,000' WHEN audience > 500000 THEN '> 500,000' END AS size_bucket FROM audience_counts JOIN gdpr_safe_users USING (user_id)sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/004_indentation/before.sql000066400000000000000000000017031451700765000267710ustar00rootroot00000000000000WITH audience_counts AS ( SELECT user_id, list_id, COUNT(email_id) AS audience FROM lists_emails AS list_emails WHERE list_emails.active != 'D' GROUP BY user_id, list_id) SELECT user_id, list_id, audience, CASE WHEN audience > 0 AND audience <= 200 THEN '< 200' WHEN audience > 200 AND audience <= 3000 -- NB: This one is a hanging indent, which should be modified. AND audience <= 2000 THEN '200 - 2,000' WHEN audience > 2000 AND audience <= 10000 THEN '2,000 - 10,000' WHEN audience > 10000 AND audience <= 50000 THEN '10,000 - 50,000' WHEN audience > 50000 AND audience <= 500000 THEN '50,000 - 500,000' WHEN audience > 500000 THEN '> 500,000' END AS size_bucket FROM audience_counts JOIN gdpr_safe_users USING (user_id)sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/004_indentation/test-config.yml000066400000000000000000000000411451700765000277450ustar00rootroot00000000000000test-config: rules: - LT02 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/005_function_spacing/000077500000000000000000000000001451700765000260235ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/005_function_spacing/after.sql000066400000000000000000000001371451700765000276460ustar00rootroot00000000000000SELECT min(col_a) as foo, max /* a really obnoxious comment */ (col_b) as bar FROM tbl sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/005_function_spacing/before.sql000066400000000000000000000001401451700765000300010ustar00rootroot00000000000000SELECT min (col_a) as foo, max /* a really obnoxious comment */ (col_b) as bar FROM tbl sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/005_function_spacing/test-config.yml000066400000000000000000000000411451700765000307630ustar00rootroot00000000000000test-config: rules: - LT06 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/006_indentation/000077500000000000000000000000001451700765000250075ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/006_indentation/after.sql000066400000000000000000000012751451700765000266360ustar00rootroot00000000000000insert overwrite into forecast_daily_product_base with orders_monthly as ( select period, status, region, forecast_id, value from forecast_monthly where metric = 'orders' ), penetrations_monthly as ( select period, status, region, product_category, forecast_id, value from forecast_monthly where metric = 'penetration' union all -- Add in the dry penetrations as 1.0 select period, status, region, 'dry' as product_category, forecast_id, 1.0 as value from forecast_monthly where metric = 'orders' ) select * from orders_monthly inner join penetrations_monthly using(period, status, region, forecast_id) sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/006_indentation/before.sql000066400000000000000000000013751451700765000270000ustar00rootroot00000000000000insert overwrite into forecast_daily_product_base with orders_monthly as ( select period, status, region, forecast_id, value from forecast_monthly where metric = 'orders' ), penetrations_monthly as ( select period, status, region, product_category, forecast_id, value from forecast_monthly where metric = 'penetration' union all -- Add in the dry penetrations as 1.0 select period, status, region, 'dry' as product_category, forecast_id, 1.0 as value from forecast_monthly where metric = 'orders' ) select * from orders_monthly inner join penetrations_monthly using(period, status, region, forecast_id) sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/006_indentation/test-config.yml000066400000000000000000000000411451700765000277470ustar00rootroot00000000000000test-config: rules: - LT02 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/007_with_clause/000077500000000000000000000000001451700765000250035ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/007_with_clause/after.sql000066400000000000000000000001621451700765000266240ustar00rootroot00000000000000-- Dealing with complicated indents before with clauses. WITH cte as ( select a from tbla ) select a from cte sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/007_with_clause/before.sql000066400000000000000000000002061451700765000267640ustar00rootroot00000000000000-- Dealing with complicated indents before with clauses. WITH cte as ( select a from tbla) select a from cte sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/007_with_clause/test-config.yml000066400000000000000000000000541451700765000277470ustar00rootroot00000000000000test-config: rules: - LT07 - LT02 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/000077500000000000000000000000001451700765000274265ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/after.sql000066400000000000000000000013371451700765000312540ustar00rootroot00000000000000SELECT COUNT(1) AS campaign_count, state_user_v_peer_open, business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. SAFE_DIVIDE( SAFE_MULTIPLY( CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su) ), STDDEV_POP(uses_small_subject_line) ) FROM global_actions_states sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/before.sql000066400000000000000000000012031451700765000314050ustar00rootroot00000000000000SELECT COUNT(1) AS campaign_count, state_user_v_peer_open ,business_type -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/008_looping_rules_LT02_LT04_LT05/test-config.yml000066400000000000000000000000671451700765000323760ustar00rootroot00000000000000test-config: rules: - LT02 - LT05 - LT04 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/008_with_clause/000077500000000000000000000000001451700765000250045ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/008_with_clause/after.sql000066400000000000000000000001101451700765000266160ustar00rootroot00000000000000with a as (select 1), b as (select 2) select * from a join b using (z)sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/008_with_clause/before.sql000066400000000000000000000001031451700765000267610ustar00rootroot00000000000000with a as(select 1), b as(select 2) select * from a join b using(z)sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/008_with_clause/test-config.yml000066400000000000000000000000541451700765000277500ustar00rootroot00000000000000test-config: rules: - LT01 - LT08 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/000077500000000000000000000000001451700765000272465ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/.sqlfluff000066400000000000000000000001071451700765000310670ustar00rootroot00000000000000[sqlfluff:rules:capitalisation.keywords] capitalisation_policy = lower sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/after.sql000066400000000000000000000042541451700765000310750ustar00rootroot00000000000000select credits.*, min(party.created_datetime) as first_party_created_datetime, listagg(distinct party.product_category_code, ', ') as party_product_category_codes, listagg(distinct party.product_category_name, ', ') as party_product_category_names, listagg(distinct party.party_type_id, ', ') as party_type_ids, listagg(distinct party.party_type, ', ') as party_types, listagg(distinct party.party_action_id, ', ') as party_action_ids, listagg(distinct party.party_action, ', ') as party_actions, listagg(distinct party.party_incident_id, ', ') as party_incident_ids, listagg(distinct party.incident, ', ') as party_incidents, listagg(distinct party.product_party_package_id, ', ') as party_product_party_package_ids, listagg(distinct party.product_party_party_type, ', ') as party_product_party_party_types from ( select created, party_transaction_id as big_transaction_id, punter_id, credit_amount, promo_punter_reward_id, NULLIF(SUBSTRING(regexp_substr(cr.description,'Ticket ref: [0-9]*'), 13), '') ::INT as ticket_id, case when cr.description like 'Requesting Punter: %' then left( SUBSTRING(regexp_substr(cr.description,'Requesting Punter: [^/]*'), 18), length(SUBSTRING(regexp_substr(cr.description,'Requesting Punter: [^/]*'), 18) )-1) else null end as punter_name, cr.description, party_reason_id, car.description as reason from {{ ref("party_default__party_transaction") }} cr join {{ ref("party_default__party_reason") }} car using (party_reason_id) ) group by 1,2,3,4,5,6,7,8,9 union select created, mgm_big_transaction_id as big_transaction_id, punter_id, credit_amount, promo_punter_reward_id, null as ticket_id, null as punter_name, null as description, null as reason_id, 'raf' as reason, null as first_party_created_datetime, null as party_product_category_codes, null as party_product_category_names, null as party_type_ids, null as party_types, null as party_action_ids, null as party_actions, null as party_incident_ids, null as party_incidents, null as party_product_party_package_ids, -- NULL as party_product_party_product_types, null as party_product_party_party_types from {{ ref("party_default__mgm_big_transaction") }} sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/before.sql000066400000000000000000000042541451700765000312360ustar00rootroot00000000000000SELECT credits.*, min(party.created_datetime) as first_party_created_datetime, listagg(distinct party.product_category_code, ', ') as party_product_category_codes, listagg(distinct party.product_category_name, ', ') as party_product_category_names, listagg(distinct party.party_type_id, ', ') as party_type_ids, listagg(distinct party.party_type, ', ') as party_types, listagg(distinct party.party_action_id, ', ') as party_action_ids, listagg(distinct party.party_action, ', ') as party_actions, listagg(distinct party.party_incident_id, ', ') as party_incident_ids, listagg(distinct party.incident, ', ') as party_incidents, listagg(distinct party.product_party_package_id, ', ') as party_product_party_package_ids, listagg(distinct party.product_party_party_type, ', ') as party_product_party_party_types FROM ( SELECT created, party_transaction_id as big_transaction_id, punter_id, credit_amount, promo_punter_reward_id, NULLIF(SUBSTRING(regexp_substr(cr.description,'Ticket ref: [0-9]*'), 13), '') ::INT as ticket_id, case when cr.description like 'Requesting Punter: %' then left( SUBSTRING(regexp_substr(cr.description,'Requesting Punter: [^/]*'), 18), length(SUBSTRING(regexp_substr(cr.description,'Requesting Punter: [^/]*'), 18) )-1) else null end as punter_name, cr.description, party_reason_id, car.description as reason from {{ ref("party_default__party_transaction") }} cr join {{ ref("party_default__party_reason") }} car using (party_reason_id) ) group by 1,2,3,4,5,6,7,8,9 UNION SELECT created, mgm_big_transaction_id as big_transaction_id, punter_id, credit_amount, promo_punter_reward_id, null as ticket_id, null as punter_name, null as description, null as reason_id, 'raf' as reason, NULL as first_party_created_datetime, NULL as party_product_category_codes, NULL as party_product_category_names, NULL as party_type_ids, NULL as party_types, NULL as party_action_ids, NULL as party_actions, NULL as party_incident_ids, NULL as party_incidents, NULL as party_product_party_package_ids, -- NULL as party_product_party_product_types, NULL as party_product_party_party_types from {{ ref("party_default__mgm_big_transaction") }} sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/009_keyword_capitalisation/test-config.yml000066400000000000000000000000541451700765000322120ustar00rootroot00000000000000test-config: rules: - CP01 - CP04 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/000077500000000000000000000000001451700765000260125ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/after.sql000066400000000000000000000003541451700765000276360ustar00rootroot00000000000000with my_cte as ( select 1 ) , that_cte as ( select 1 ), -- This Comment should stick to the CTE other_cte as ( select 1 ), this_cte as (select 1), final_cte as ( select 1 ) select * from my_cte cross join other_cte sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/before.sql000066400000000000000000000003471451700765000300010ustar00rootroot00000000000000with my_cte as ( select 1 ) , that_cte as ( select 1 ), -- This Comment should stick to the CTE other_cte as ( select 1 ), this_cte as (select 1), final_cte as ( select 1 ) select * from my_cte cross join other_cte sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/010_CTEs_and_newlines/test-config.yml000066400000000000000000000000411451700765000307520ustar00rootroot00000000000000test-config: rules: - LT08 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/011_indentation/000077500000000000000000000000001451700765000250035ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/011_indentation/after.sql000066400000000000000000000003511451700765000266240ustar00rootroot00000000000000SELECT a {# My Comment #} , b {% for i in [1, 2, 3] %} , c_{{i}} + 42 AS the_meaning_of_li{{ 'f' * i }} {% endfor %} , boo {% for i in [1, 2, 3] %} , d_{{i}} {% endfor %} FROM my_table sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/011_indentation/before.sql000066400000000000000000000003461451700765000267710ustar00rootroot00000000000000SELECT a {# My Comment #} , b {% for i in [1, 2, 3] %} , c_{{i}}+42 AS the_meaning_of_li{{ 'f' * i }} {% endfor %} , boo {% for i in [1, 2, 3] %} , d_{{i}} {% endfor %} FROM my_table sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/011_indentation/test-config.yml000066400000000000000000000000541451700765000277470ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/012_templating/000077500000000000000000000000001451700765000246345ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/012_templating/.sqlfluff000066400000000000000000000001611451700765000264550ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] test_expression_1=4+5+6 test_expression_2=+2+ test_expression_3=barfoo as bf, sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/012_templating/after.sql000066400000000000000000000004431451700765000264570ustar00rootroot00000000000000-- Templated query aimed to stress the fixing templated sections. SELECT foo, 1 + 2 + 3 + 4 + 5 as bar1, 1 + {{test_expression_1}} + 3 as bar2, 1 {{test_expression_2}} 3 as bar3, {% if 1 == 0 %} {{test_expression_3}} {% endif %} foobar FROM example_table sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/012_templating/before.sql000066400000000000000000000004251451700765000266200ustar00rootroot00000000000000-- Templated query aimed to stress the fixing templated sections. SELECT foo, 1+2+3+4+5 as bar1, 1+{{test_expression_1}}+3 as bar2, 1{{test_expression_2}}3 as bar3, {% if 1 == 0 %} {{test_expression_3}} {% endif %} foobar FROM example_table sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/012_templating/test-config.yml000066400000000000000000000000411451700765000275740ustar00rootroot00000000000000test-config: rules: - LT01 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/013_order_by_explicit/000077500000000000000000000000001451700765000261775ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/013_order_by_explicit/after.sql000066400000000000000000000000471451700765000300220ustar00rootroot00000000000000SELECT * FROM t ORDER BY a DESC, b ASC sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/013_order_by_explicit/before.sql000066400000000000000000000000431451700765000301570ustar00rootroot00000000000000SELECT * FROM t ORDER BY a DESC, b sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/013_order_by_explicit/test-config.yml000066400000000000000000000000411451700765000311370ustar00rootroot00000000000000test-config: rules: - AM03 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030/000077500000000000000000000000001451700765000323745ustar00rootroot00000000000000after.sql000066400000000000000000000000441451700765000341350ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030SELECT FLOOR(dt), COUNT(*) FROM testbefore.sql000066400000000000000000000000441451700765000342760ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030SELECT FLOOR(dt) ,count(*) FROM testtest-config.yml000066400000000000000000000000541451700765000352610ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/014_looping_interaction_between_l008_and_l030test-config: rules: - LT01 - CP03 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/000077500000000000000000000000001451700765000274655ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/after.sql000066400000000000000000000001001451700765000312760ustar00rootroot00000000000000 {%- set x = 42 %} SELECT 1, 2; {% set x = 42 %} SELECT 1, 2 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/before.sql000066400000000000000000000001001451700765000314370ustar00rootroot00000000000000 {%- set x = 42 %} SELECT 1, 2; {% set x = 42 %} SELECT 1, 2 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/015_jinja_leading_whitespace/test-config.yml000066400000000000000000000000411451700765000324250ustar00rootroot00000000000000test-config: rules: - LT09 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/000077500000000000000000000000001451700765000277005ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/after.sql000066400000000000000000000005301451700765000315200ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = [ 'table1', 'table2'] %} {% for product in products %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/before.sql000066400000000000000000000004501451700765000316620ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = [ 'table1', 'table2'] %} {% for product in products %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if/test-config.yml000066400000000000000000000000541451700765000326440ustar00rootroot00000000000000test-config: rules: - LT02 - LT05 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/000077500000000000000000000000001451700765000277625ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/after.sql000066400000000000000000000005411451700765000316040ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = [ 'table1', 'table2'] %} {% for product in products %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last %} UNION ALL {% endif %} {% endfor %} ORDER BY 1 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/before.sql000066400000000000000000000004611451700765000317460ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = [ 'table1', 'table2'] %} {% for product in products %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last %} UNION ALL {% endif %} {% endfor %} ORDER BY 1 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/016_index_error_with_jinja_if2/test-config.yml000066400000000000000000000000541451700765000327260ustar00rootroot00000000000000test-config: rules: - LT02 - LT05 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/000077500000000000000000000000001451700765000275515ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/.sqlfluff000066400000000000000000000006451451700765000314010ustar00rootroot00000000000000[sqlfluff] # TODO: This setting defaults to true. I had to set it false in order to allow a # rule (LT02) to indent a templated table name. Technically, indenting templated # code is not "touching" templated code, but in order for SQLFluff to detect # this and allow the fixes to be applied using default settings, we'd need to # tweak some of the anchor and create logic for LintResult. ignore_templated_areas = false sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/after.sql000066400000000000000000000004351451700765000313750ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = ['table1'] %} {% for product in products %} SELECT brand, country_code FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/before.sql000066400000000000000000000004031451700765000315310ustar00rootroot00000000000000-- This file combines product data from individual brands into a staging table {% set products = ['table1'] %} {% for product in products %} SELECT brand, country_code FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/018_LT02_indent_templated_code/test-config.yml000066400000000000000000000000411451700765000325110ustar00rootroot00000000000000test-config: rules: - LT02 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/000077500000000000000000000000001451700765000276515ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/.sqlfluff000066400000000000000000000000651451700765000314750ustar00rootroot00000000000000[sqlfluff:layout:type:comma] line_position = leading sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/after.sql000066400000000000000000000003741451700765000314770ustar00rootroot00000000000000WITH first_cte AS (SELECT id , one FROM first) , second_cte AS (SELECT id , two FROM {{ source('schema', 'table') }} ) SELECT id , one , two FROM first_cte LEFT JOIN second_cte ON first_cte.id = second_cte.id; sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/before.sql000066400000000000000000000003731451700765000316370ustar00rootroot00000000000000WITH first_cte AS (SELECT id , one FROM first), second_cte AS (SELECT id , two FROM {{ source('schema', 'table') }} ) SELECT id , one , two FROM first_cte LEFT JOIN second_cte ON first_cte.id = second_cte.id; sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/019_trailing_comma_to_leading/test-config.yml000066400000000000000000000000411451700765000326110ustar00rootroot00000000000000test-config: rules: - LT04 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/000077500000000000000000000000001451700765000261575ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/after.sql000066400000000000000000000000121451700765000277720ustar00rootroot00000000000000select *, sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/before.sql000066400000000000000000000000121451700765000301330ustar00rootroot00000000000000select *, sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/020_L008_trailing_comma/test-config.yml000066400000000000000000000000411451700765000311170ustar00rootroot00000000000000test-config: rules: - LT01 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/000077500000000000000000000000001451700765000262045ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/after.sql000066400000000000000000000007421451700765000300310ustar00rootroot00000000000000--noqa: disable=ST06 SELECT DISTINCT TO_CHAR(a, 'YYYY-MM-dd HH:MM:ss') AS the_date, a AS b FROM table1; SELECT col_a AS a, col_b b, --noqa: disable=AL02 col_c c, col_d AS d, --noqa: enable=AL02 col_e AS e, col_f AS f, col_g g, --noqa col_h AS h, col_i i, --noqa:AL02 col_j AS j, col_k AS k, --noqa:AL03 col_l AS l, col_m AS m, col_n n, --noqa: disable=all col_o o, col_p AS p --noqa: enable=all FROM foo sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/before.sql000066400000000000000000000007041451700765000301700ustar00rootroot00000000000000--noqa: disable=ST06 SELECT DISTINCT TO_CHAR(a, 'YYYY-MM-dd HH:MM:ss') as the_date, a AS b FROM table1; SELECT col_a a, col_b b, --noqa: disable=AL02 col_c c, col_d d, --noqa: enable=AL02 col_e e, col_f f, col_g g, --noqa col_h h, col_i i, --noqa:AL02 col_j j, col_k k, --noqa:AL03 col_l l, col_m m, col_n n, --noqa: disable=all col_o o, col_p p --noqa: enable=all FROM foo sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/021_fix_respects_noqa/test-config.yml000066400000000000000000000000671451700765000311540ustar00rootroot00000000000000test-config: rules: - CP01 - AL02 - ST06 022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sql/000077500000000000000000000000001451700765000353475ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansiafter.sql000066400000000000000000000001031451700765000371630ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sqlSELECT ID, DataDate, COALESCE(a, 1) AS CoalesceOutput FROM temp1 before.sql000066400000000000000000000001051451700765000373260ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sqlSELECT ID , COALESCE(a, 1) AS CoalesceOutput , DataDate FROM temp1 test-config.yml000066400000000000000000000000541451700765000403130ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/022_LT04_corrupts_parse_tree_and_causes_ST06_to_corrupt_sqltest-config: rules: - LT04 - ST06 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/000077500000000000000000000000001451700765000256235ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/.sqlfluff000066400000000000000000000000401451700765000274400ustar00rootroot00000000000000[sqlfluff] max_line_length = 70 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/after.sql000066400000000000000000000003401451700765000274420ustar00rootroot00000000000000SELECT * FROM superverylongtablenamereallyreally1 WHERE long_varname_to_trigger_Rule_LT05_id in ( SELECT distinct id FROM superverylongtablenamereallyreally2 WHERE deletedat IS NULL ) sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/before.sql000066400000000000000000000003261451700765000276070ustar00rootroot00000000000000SELECT * FROM superverylongtablenamereallyreally1 WHERE long_varname_to_trigger_Rule_LT05_id in (SELECT distinct id FROM superverylongtablenamereallyreally2 WHERE deletedat IS NULL) sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/023_LT05_confuses_LT10/test-config.yml000066400000000000000000000003241451700765000305670ustar00rootroot00000000000000test-config: # NOTE: LT02 is included in this test case because the fix for # LT05 doesn't really make sense without it as the existing # query is poorly indented. rules: - LT02 - LT05 - LT10 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/000077500000000000000000000000001451700765000274235ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/.sqlfluff000066400000000000000000000001521451700765000312440ustar00rootroot00000000000000[sqlfluff:templater:jinja:macros] par_wrap = {% macro par_wrap() %} ( col ) AS col {% endmacro %} sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/after.sql000066400000000000000000000002731451700765000312470ustar00rootroot00000000000000-- Templated query aimed to test the BaseRule.remove_templated_errors() -- function's behavior of not modifying templated sections. SELECT {{ par_wrap() }} , line_two AS line_two sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/before.sql000066400000000000000000000002701451700765000314050ustar00rootroot00000000000000-- Templated query aimed to test the BaseRule.remove_templated_errors() -- function's behavior of not modifying templated sections. SELECT {{ par_wrap() }} , line_two as line_two sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/024_remove_templated_errors/test-config.yml000066400000000000000000000001021451700765000323610ustar00rootroot00000000000000test-config: rules: - LT02 - CP01 - LT04 - ST06 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/000077500000000000000000000000001451700765000271545ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/after.sql000066400000000000000000000000521451700765000307730ustar00rootroot00000000000000select foo, bar as test from baz; sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/before.sql000066400000000000000000000001671451700765000311430ustar00rootroot00000000000000select foo, case when bar is not null then bar else null end as test from baz; sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/025_ST02_and_ST01_interaction/test-config.yml000066400000000000000000000000541451700765000321200ustar00rootroot00000000000000test-config: rules: - ST01 - ST02 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes/000077500000000000000000000000001451700765000323205ustar00rootroot00000000000000.sqlfluff000066400000000000000000000000651451700765000340650ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixes[sqlfluff:rules:aliasing.forbid] force_enable = true after.sql000066400000000000000000000005551451700765000340700ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixesSELECT abs(round(foo_bar_report.metricx-xxx_yyy_report.metricx)) as col_c_rel_diff, abs( ( round(foo_bar_report.metricx-xxx_yyy_report.metricx) /foo_bar_report.metricx ) *100 ) as metric_x_rel_diff FROM foo_bar_report LEFT JOIN xxx_yyy_report ON foo_bar_report.event_date = xxx_yyy_report.event_date; before.sql000066400000000000000000000003301451700765000342200ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixesSELECT abs(round(a.metricx-b.metricx)) as col_c_rel_diff, abs((round(a.metricx-b.metricx)/a.metricx)*100) as metric_x_rel_diff FROM foo_bar_report a LEFT JOIN xxx_yyy_report b ON a.event_date = b.event_date; test-config.yml000066400000000000000000000000671451700765000352110ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/026_LT05_line_length_includes_earlier_fixestest-config: rules: - LT02 - LT05 - AL07 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/000077500000000000000000000000001451700765000301045ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/after.sql000066400000000000000000000000601451700765000317220ustar00rootroot00000000000000WITH cte AS ( SELECT 1 ) SELECT * FROM cte sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/before.sql000066400000000000000000000000631451700765000320660ustar00rootroot00000000000000WITH cte AS ( SELECT 1 ) SELECT * FROM cte sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_LT08_wrong_indent_cte/test-config.yml000066400000000000000000000000671451700765000330540ustar00rootroot00000000000000test-config: rules: - LT02 - LT07 - LT08 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cte/000077500000000000000000000000001451700765000332755ustar00rootroot00000000000000after.sql000066400000000000000000000003521451700765000350400ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cteWITH {% for i in [0, 1] %} {% if i == 0 %} cte0 AS ( SELECT 1 ), {% else %} cte1 AS ( SELECT 2 ) {% endif %} {% endfor %} SELECT * FROM cte0 UNION SELECT * FROM cte1 before.sql000066400000000000000000000003301451700765000351750ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_cteWITH {% for i in [0, 1] %} {% if i == 0 %} cte0 AS ( SELECT 1), {% else %} cte1 AS ( SELECT 2) {% endif %} {% endfor %} SELECT * FROM cte0 UNION SELECT * FROM cte1 test-config.yml000066400000000000000000000000541451700765000361620ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_without_newline_ctetest-config: rules: - LT02 - LT07 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cte/000077500000000000000000000000001451700765000325465ustar00rootroot00000000000000after.sql000066400000000000000000000001261451700765000343100ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cteWITH {% if true %} cte AS ( SELECT 2 ) {% endif %} SELECT * FROM cte before.sql000066400000000000000000000001221451700765000344450ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_cteWITH {% if true %} cte AS ( SELECT 2 ) {% endif %} SELECT * FROM cte test-config.yml000066400000000000000000000000541451700765000354330ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_templated_block_wrong_indent_ctetest-config: rules: - LT02 - LT07 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/000077500000000000000000000000001451700765000275355ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/after.sql000066400000000000000000000000601451700765000313530ustar00rootroot00000000000000WITH cte AS ( SELECT 1 ) SELECT * FROM cte sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/before.sql000066400000000000000000000000701451700765000315150ustar00rootroot00000000000000 WITH cte AS ( SELECT 1 ) SELECT * FROM cte sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/027_LT02_LT07_wrong_indent_with/test-config.yml000066400000000000000000000000541451700765000325010ustar00rootroot00000000000000test-config: rules: - LT02 - LT07 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/000077500000000000000000000000001451700765000267405ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/after.sql000066400000000000000000000003211451700765000305560ustar00rootroot00000000000000{% set x = "col" %} -- We find the error with the subquery and then have to dump it again -- due to the template SELECT * FROM A_TABLE INNER JOIN ( SELECT *, {{ x }} FROM B_TABLE ) USING (SOME_COLUMN) sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/before.sql000066400000000000000000000003211451700765000307170ustar00rootroot00000000000000{% set x = "col" %} -- We find the error with the subquery and then have to dump it again -- due to the template SELECT * FROM A_TABLE INNER JOIN ( SELECT *, {{ x }} FROM B_TABLE ) USING (SOME_COLUMN) sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/ST05_subqueries_in_joins/test-config.yml000066400000000000000000000000541451700765000317040ustar00rootroot00000000000000test-config: rules: - ST05 - JJ01 sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/000077500000000000000000000000001451700765000327525ustar00rootroot00000000000000.sqlfluff000066400000000000000000000002431451700765000345150ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable[sqlfluff] dialect = ansi [sqlfluff:templater:jinja] load_macros_from_path = test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/macros after.sql000066400000000000000000000000271451700765000345140ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variableSELECT id FROM records before.sql000066400000000000000000000000271451700765000346550ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variableSELECT id FROM records sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/macros/000077500000000000000000000000001451700765000342365ustar00rootroot00000000000000utils.sql000066400000000000000000000000271451700765000360370ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variable/macros{% include sql_file %} test-config.yml000066400000000000000000000000411451700765000356330ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/ansi/macro_file_jinja_include_undefined_variabletest-config: rules: - LT09 sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/000077500000000000000000000000001451700765000230035ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/001_templating/000077500000000000000000000000001451700765000255275ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/001_templating/.sqlfluff000066400000000000000000000002371451700765000273540ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] project=marketing_segmentation dataset=dataset label_prob_threshold=0.8 [sqlfluff:indentation] allow_implicit_indents=True sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/001_templating/after.sql000066400000000000000000000003071451700765000273510ustar00rootroot00000000000000select * from `{{project}}.{{dataset}}.user_labels_with_probs` where prob_max >= {{label_prob_threshold}} --- only focus on 3 segments and label_str not in ('marketing_maven', 'growth_services') sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/001_templating/before.sql000066400000000000000000000003021451700765000275050ustar00rootroot00000000000000select * from `{{project}}.{{dataset}}.user_labels_with_probs` where prob_max >={{label_prob_threshold}} --- only focus on 3 segments and label_str not in ('marketing_maven', 'growth_services')sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/001_templating/test-config.yml000066400000000000000000000000671451700765000304770ustar00rootroot00000000000000test-config: rules: - LT02 - LT01 - LT12 sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/002_templating/000077500000000000000000000000001451700765000255305ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/002_templating/.sqlfluff000066400000000000000000000002701451700765000273520ustar00rootroot00000000000000[sqlfluff] max_line_length = 50 [sqlfluff:templater:jinja:context] considered_actions=['ussl', 'ups', 'upt'] corr_states="suvpo\n ,biz_type" dst=my_dataset gcp=my_project metric=open sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/002_templating/after.sql000066400000000000000000000006211451700765000273510ustar00rootroot00000000000000-- A subset of the hairy test. SELECT COUNT(1) AS campaign_count, {{corr_states}} {% for action in considered_actions %} , aaa( bbb( ccc({{metric}}_r, {{action}}), ddd({{metric}}_r) ), eee({{action}}) ) AS {{metric}}_{{action}} {% endfor %} FROM `{{gcp}}.{{dst}}.gas` GROUP BY {{corr_states}} sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/002_templating/before.sql000066400000000000000000000004561451700765000275200ustar00rootroot00000000000000-- A subset of the hairy test. SELECT COUNT(1) AS campaign_count, {{corr_states}} {% for action in considered_actions %} ,aaa(bbb(ccc({{metric}}_r, {{action}}), ddd({{metric}}_r)), eee({{action}})) AS {{metric}}_{{action}} {% endfor %} FROM `{{gcp}}.{{dst}}.gas` GROUP BY {{corr_states}} sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/002_templating/test-config.yml000066400000000000000000000001021451700765000304660ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 - LT12 - LT05 sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/003_templating/000077500000000000000000000000001451700765000255315ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/003_templating/.sqlfluff000066400000000000000000000003011451700765000273460ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] considered_actions=['uses_small_subject_line', 'uses_personal_subject', 'uses_personal_to'] corr_states="state_user_v_peer_open\n ,business_type" metric=open sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/003_templating/after.sql000066400000000000000000000012451451700765000273550ustar00rootroot00000000000000-- A subset of the hairy test. -- NOTE: This is not perfect, but reflects -- functionality as at Nov 2020. In future -- the logic should be updated to lint this -- better. -- Force indentation linting. -- sqlfluff: indentation: template_blocks_indent: force SELECT {{corr_states}} {% for action in considered_actions %} , {{metric}}_{{action}} , campaign_count_{{action}} {% endfor %} FROM {% for action in considered_actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{corr_states}}) {% endif %} {% endfor %} CROSS JOIN action_states sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/003_templating/before.sql000066400000000000000000000012151451700765000275130ustar00rootroot00000000000000-- A subset of the hairy test. -- NOTE: This is not perfect, but reflects -- functionality as at Nov 2020. In future -- the logic should be updated to lint this -- better. -- Force indentation linting. -- sqlfluff: indentation: template_blocks_indent: force SELECT {{corr_states}} {% for action in considered_actions %} ,{{metric}}_{{action}} ,campaign_count_{{action}} {% endfor %} FROM {% for action in considered_actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{corr_states}}) {% endif %} {% endfor %} CROSS JOIN action_states sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/003_templating/test-config.yml000066400000000000000000000000671451700765000305010ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 - LT12 sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/004_templating/000077500000000000000000000000001451700765000255325ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/004_templating/.sqlfluff000066400000000000000000000005161451700765000273570ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] action_states="has_used_small_subject_line\n ,has_used_personal_subject\n ,has_used_personal_to\n" considered_actions=['uses_small_subject_line', 'uses_personal_subject', 'uses_personal_to'] corr_states="state_user_v_peer_open\n ,business_type" dataset=my_dataset gcp_project=my_project metric=open sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/004_templating/after.sql000066400000000000000000000053071451700765000273610ustar00rootroot00000000000000 /* A nice hairy templated query to really stretch and test templating and fixing. This file should fail the safety checks, and so the position of the templated tokens shouldn't move. */ WITH raw_effect_sizes AS ( SELECT COUNT(1) AS campaign_count, {{corr_states}} {% for action in considered_actions %} , SAFE_DIVIDE(SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)), STDDEV_POP({{action}})) AS {{metric}}_{{action}} {% endfor %} FROM `{{gcp_project}}.{{dataset}}.global_actions_states` GROUP BY {{corr_states}} ), {% for action in considered_actions %} {{action}}_raw_effect_sizes AS ( SELECT COUNT(1) AS campaign_count_{{action}}, {{corr_states}} -- NOTE: The LT02 fix routine behaves a little strangely here around the templated -- code, specifically the indentation of STDDEV_POP and preceding comments. This -- is a bug currently with no obvious solution. , SAFE_DIVIDE( SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)), STDDEV_POP({{action}}) ) AS {{metric}}_{{action}} FROM `{{gcp_project}}.{{dataset}}.global_actions_states` WHERE {{action}} != -1 GROUP BY {{corr_states}} ), {% endfor %} new_raw_effect_sizes AS ( SELECT {{corr_states}} {% for action in considered_actions %} , {{metric}}_{{action}} , campaign_count_{{action}} {% endfor %} FROM {% for action in considered_actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{corr_states}}) {% endif %} {% endfor %} ), imputed_effect_sizes AS ( SELECT {{corr_states}} , o.campaign_count AS campaign_count {% for action in considered_actions %} , COALESCE(IF(IS_NAN(o.{{metric}}_{{action}}), 0, o.{{metric}}_{{action}}), 0) AS {{metric}}_{{action}} , COALESCE(IF(IS_NAN(n.{{metric}}_{{action}}), 0, n.{{metric}}_{{action}}), 0) AS new_{{metric}}_{{action}} , n.campaign_count_{{action}} {% endfor %} FROM raw_effect_sizes o JOIN new_raw_effect_sizes n USING ({{corr_states}}) ), action_states AS ( SELECT {{action_states}} FROM `{{gcp_project}}.{{dataset}}.global_state_space` GROUP BY {{action_states}}) SELECT imputed_effect_sizes.*, {{action_states}} FROM imputed_effect_sizes CROSS JOIN action_states sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/004_templating/before.sql000066400000000000000000000045631451700765000275250ustar00rootroot00000000000000 /* A nice hairy templated query to really stretch and test templating and fixing. This file should fail the safety checks, and so the position of the templated tokens shouldn't move. */ WITH raw_effect_sizes AS ( SELECT COUNT(1) AS campaign_count, {{corr_states}} {% for action in considered_actions %} ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)), STDDEV_POP({{action}})) AS {{metric}}_{{action}} {% endfor %} FROM `{{gcp_project}}.{{dataset}}.global_actions_states` GROUP BY {{corr_states}} ), {% for action in considered_actions %} {{action}}_raw_effect_sizes AS ( SELECT COUNT(1) AS campaign_count_{{action}}, {{corr_states}} -- NOTE: The LT02 fix routine behaves a little strangely here around the templated -- code, specifically the indentation of STDDEV_POP and preceding comments. This -- is a bug currently with no obvious solution. ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR({{metric}}_rate_su, {{action}}), STDDEV_POP({{metric}}_rate_su)), STDDEV_POP({{action}})) AS {{metric}}_{{action}} FROM `{{gcp_project}}.{{dataset}}.global_actions_states` WHERE {{action}} != -1 GROUP BY {{corr_states}} ), {% endfor %} new_raw_effect_sizes AS ( SELECT {{corr_states}} {% for action in considered_actions %} ,{{metric}}_{{action}} ,campaign_count_{{action}} {% endfor %} FROM {% for action in considered_actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{corr_states}}) {% endif %} {% endfor %} ), imputed_effect_sizes AS ( SELECT {{corr_states}} ,o.campaign_count AS campaign_count {% for action in considered_actions %} ,COALESCE(IF(IS_NAN(o.{{metric}}_{{action}}), 0, o.{{metric}}_{{action}}), 0) AS {{metric}}_{{action}} ,COALESCE(IF(IS_NAN(n.{{metric}}_{{action}}), 0, n.{{metric}}_{{action}}), 0) AS new_{{metric}}_{{action}} ,n.campaign_count_{{action}} {% endfor %} FROM raw_effect_sizes o JOIN new_raw_effect_sizes n USING ({{corr_states}}) ), action_states AS ( SELECT {{action_states}} FROM `{{gcp_project}}.{{dataset}}.global_state_space` GROUP BY {{action_states}}) SELECT imputed_effect_sizes.*, {{action_states}} FROM imputed_effect_sizes CROSS JOIN action_states sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/004_templating/test-config.yml000066400000000000000000000000671451700765000305020ustar00rootroot00000000000000test-config: rules: - LT01 - LT02 - LT12 sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/000077500000000000000000000000001451700765000264075ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/after.sql000066400000000000000000000001201451700765000302220ustar00rootroot00000000000000SELECT category, value FROM table1, UNNEST(1, 2, 3) AS category sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/before.sql000066400000000000000000000001141451700765000303660ustar00rootroot00000000000000SELECT category, value FROM table1, UNNEST(1, 2, 3) AS category sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/005_unnest_spacing/test-config.yml000066400000000000000000000000541451700765000313530ustar00rootroot00000000000000test-config: rules: - LT02 - AL05 sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/000077500000000000000000000000001451700765000277455ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/.sqlfluff000066400000000000000000000002251451700765000315670ustar00rootroot00000000000000[sqlfluff] dialect = bigquery ignore = templating fix_even_unparsable = True [sqlfluff:rules:capitalisation.keywords] capitalisation_policy = upper sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/after.sql000066400000000000000000000013251451700765000315700ustar00rootroot00000000000000SELECT * EXCEPT ({% include query %}) FROM ( SELECT tbl1.*, row_number() OVER ( PARTITION BY tbl1.the_name, {{ context_columns | join(', ') }} ORDER BY created_at DESC ) AS rnk {% if context_columns | default("abc") == "abc" %} FROM tbl1 {% endif %} INNER JOIN tbl2 ON tbl1.the_name = tbl2.the_name AND tbl1.run_id = tbl2.run_id WHERE {{ run_rnk }} = {% include "foobar.sql" %} ) {% if +level - -level + level.level + level + level["key"] >= 0 %} WHERE rnk = 1 {% endif %} sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/before.sql000066400000000000000000000007741451700765000317400ustar00rootroot00000000000000select * except({% include query %}) from ( select tbl1.*, row_number() over (partition by tbl1.the_name, {{ context_columns | join(', ') }} order by created_at desc) rnk {% if context_columns | default("abc") == "abc" %} from tbl1 {% endif %} inner join tbl2 on tbl1.the_name = tbl2.the_name and tbl1.run_id = tbl2.run_id where {{ run_rnk }} = {% include "foobar.sql" %} ) {% if +level - -level + level.level + level + level["key"] >= 0 %} where rnk = 1 {% endif %} sqlfluff-2.3.5/test/fixtures/linter/autofix/bigquery/006_fix_ignore_templating/test-config.yml000066400000000000000000000001301451700765000327040ustar00rootroot00000000000000test-config: rules: - LT02 - LT01 - CP01 - AL02 - LT05 - LT09 sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/000077500000000000000000000000001451700765000231455ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/001_semi_structured/000077500000000000000000000000001451700765000267465ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/001_semi_structured/.sqlfluff000066400000000000000000000001121451700765000305630ustar00rootroot00000000000000[sqlfluff:rules:capitalisation.identifiers] capitalisation_policy = lower sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/001_semi_structured/after.sql000066400000000000000000000001751451700765000305730ustar00rootroot00000000000000select value:data:to::string AS to_phone_number, value:data:from::string AS from_phone_number FROM a.b.ticket_audits sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/001_semi_structured/before.sql000066400000000000000000000001751451700765000307340ustar00rootroot00000000000000select value:data:to::string AS TO_PHONE_NUMBER, value:data:from::string AS FROM_PHONE_NUMBER FROM a.b.ticket_audits sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/001_semi_structured/test-config.yml000066400000000000000000000000411451700765000317060ustar00rootroot00000000000000test-config: rules: - CP02 sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/000077500000000000000000000000001451700765000320075ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/.sqlfluff000066400000000000000000000012361451700765000336340ustar00rootroot00000000000000[sqlfluff] dialect = snowflake templater = jinja exclude_rules = AL01,LT05,AL07,ST06,ST01 output_line_length = 120 max_line_length = 120 [sqlfluff:layout:type:binary_operator] line_position = leading [sqlfluff:layout:type:comparison_operator] line_position = leading [sqlfluff:indentation] tab_space_size = 2 [sqlfluff:rules:capitalisation.keywords] # Keywords capitalisation_policy = upper [sqlfluff:rules:AL03] # Column expressions allow_scalar = False [sqlfluff:rules:capitalisation.identifiers] # Unquoted identifiers extended_capitalisation_policy = lower [sqlfluff:rules:capitalisation.functions] # Function names extended_capitalisation_policy = lower sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/after.sql000066400000000000000000000004131451700765000336270ustar00rootroot00000000000000MERGE INTO foo.bar AS tgt USING ( SELECT foo::DATE AS bar FROM foo.bar WHERE split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$' OR foo IN ('BAR', 'FOO') ) AS src ON src.foo = tgt.foo WHEN MATCHED THEN UPDATE SET tgt.foo = src.foo; sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damaging/before.sql000066400000000000000000000003671451700765000340000ustar00rootroot00000000000000merge into foo.bar as tgt using ( select foo::DATE as bar from foo.bar where split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$' OR foo IN ('BAR','FOO') ) as src on src.foo = tgt.foo when matched then update set tgt.foo = src.foo ; test-config.yml000066400000000000000000000001301451700765000346670ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/002_previously_parse_tree_damagingtest-config: rules: - LT01 - LT02 - LT03 - CP01 - LT09 - CV06 sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/000077500000000000000000000000001451700765000320105ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/.sqlfluff000066400000000000000000000001121451700765000336250ustar00rootroot00000000000000[sqlfluff] dialect = snowflake [sqlfluff:indentation] tab_space_size = 2 sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/after.sql000066400000000000000000000000401451700765000336240ustar00rootroot00000000000000set cutoff = ( select foo ); sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damaging/before.sql000066400000000000000000000000431451700765000337700ustar00rootroot00000000000000set cutoff = (select foo ); test-config.yml000066400000000000000000000000541451700765000346750ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/autofix/snowflake/003_previously_parse_tree_damagingtest-config: rules: - LT02 - LT09 sqlfluff-2.3.5/test/fixtures/linter/block_comment_errors.sql000066400000000000000000000005161451700765000244300ustar00rootroot00000000000000select my_field from my_table_asdlfkhsldfjkhsadlfjksahdf /* this is a long multi-line comment that seeks dajkfhsalkjfdhs to explain why we are selecting from this table sfafdjkafdksajdfhsdajkfhsakldjfhsakldjfhksdajf rather than another table dsafdafsdfsadfsadfasdfasfdsadfasdfa fdsaf sdafa sadf asdf sdfa sdf asdf */ my_table sqlfluff-2.3.5/test/fixtures/linter/block_comment_errors_2.sql000066400000000000000000000002531451700765000246470ustar00rootroot00000000000000/*********************************************************************************** ************************************************************************************/ sqlfluff-2.3.5/test/fixtures/linter/block_comment_errors_3.sql000066400000000000000000000001301451700765000246420ustar00rootroot00000000000000/********************************************************************************* */ sqlfluff-2.3.5/test/fixtures/linter/column_references.sql000066400000000000000000000002301451700765000237070ustar00rootroot00000000000000select a, b.c, d.g, f as f1, f1 + 1 as f2 from z as a JOIN d using(f) where f2 > 1 -- NB: `f` appears in the USING clause and so shouldn't fail on RF02 sqlfluff-2.3.5/test/fixtures/linter/column_references_bare_function.sql000066400000000000000000000001771451700765000266170ustar00rootroot00000000000000select ta.column_a, current_timestamp as column_b, tb.column_c from table_a as ta join table_b as tb using(id) sqlfluff-2.3.5/test/fixtures/linter/comma_errors.sql000066400000000000000000000001251451700765000227040ustar00rootroot00000000000000-- Checking leading/trailing commas Select a , b , c d , e, f FROM g sqlfluff-2.3.5/test/fixtures/linter/diffquality/000077500000000000000000000000001451700765000220165ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/diffquality/.sqlfluff000066400000000000000000000000321451700765000236340ustar00rootroot00000000000000[sqlfluff] ignore=parsing sqlfluff-2.3.5/test/fixtures/linter/diffquality/parse_error.sql000066400000000000000000000000071451700765000250570ustar00rootroot00000000000000SELECT sqlfluff-2.3.5/test/fixtures/linter/discovery_file.txt000066400000000000000000000001211451700765000232360ustar00rootroot00000000000000-- This is a text file to test discovery of configured file extensions. SELECT 1 sqlfluff-2.3.5/test/fixtures/linter/encoding-utf-8-sig.sql000066400000000000000000000000331451700765000235210ustar00rootroot00000000000000select a from b sqlfluff-2.3.5/test/fixtures/linter/encoding-utf-8.sql000066400000000000000000000000301451700765000227360ustar00rootroot00000000000000select a from b sqlfluff-2.3.5/test/fixtures/linter/heavy_templating.sql000066400000000000000000000004621451700765000235600ustar00rootroot00000000000000{% set properties = { "id": "id", "type": "post_type", "channel_id": "episode_id" } %} select {% for prop, col in properties.items() %} {% if not loop.first %} , {% endif %} {{prop}} as {{ col}} {% endfor %} from {{ ref("snowplow_events_dev") }}sqlfluff-2.3.5/test/fixtures/linter/identifier_capitalisation.sql000066400000000000000000000000551451700765000254240ustar00rootroot00000000000000select foo BAR FROM tbla, tblB, TBLC sqlfluff-2.3.5/test/fixtures/linter/indentation_error_contained.sql000066400000000000000000000002551451700765000257710ustar00rootroot00000000000000-- Line 4 of this query has a closing bracket indent which we should test handling of. SELECT user_id FROM ( SELECT c.user_id AS user_id FROM c )sqlfluff-2.3.5/test/fixtures/linter/indentation_error_hard.sql000066400000000000000000000010201451700765000247320ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, a.training_spaces, ( a.under_indented_line ) as foo, ( a.over_indented_line ) as bar, a.line + (a.with + a.hanging_indent) as actually_ok, a.line + (a.with + a.bad_hanging_indent) as problem, a.line + ( a.something_indented_well + least( a.good_example, a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a sqlfluff-2.3.5/test/fixtures/linter/indentation_error_simple.sql000066400000000000000000000001141451700765000253100ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, a.training_spaces FROM tbl as a sqlfluff-2.3.5/test/fixtures/linter/indentation_errors.sql000066400000000000000000000002101451700765000241170ustar00rootroot00000000000000SELECT a.id, -- 3 Spaces a.name, -- tabs and spaces, 2 spaces a.trailing_spaces, a.tabs_alone_after_spaces FROM tbl as a sqlfluff-2.3.5/test/fixtures/linter/multiple_files/000077500000000000000000000000001451700765000225125ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/multiple_files/passing.1.sql000066400000000000000000000000211451700765000250270ustar00rootroot00000000000000select a from b; sqlfluff-2.3.5/test/fixtures/linter/multiple_files/passing.2.sql000066400000000000000000000000211451700765000250300ustar00rootroot00000000000000select a from b; sqlfluff-2.3.5/test/fixtures/linter/multiple_files/passing.3.sql000066400000000000000000000000211451700765000250310ustar00rootroot00000000000000select a from b; sqlfluff-2.3.5/test/fixtures/linter/multiple_sql_errors.sql000066400000000000000000000027431451700765000243320ustar00rootroot00000000000000WITH organizations AS ( SELECT id, organization_key FROM {{ ref('platform_stg__organizations') }} ), accounts AS ( SELECT app_key, organization_id FROM {{ ref('platform_stg__accounts') }} ), owners_packages AS ( SELECT owner_id, owner_type, package_id, created_at, updated_at, started_at FROM {{ ref('platform_stg__owners_packages') }} ), owner_packages_organization_app AS ( SELECT accounts.app_key AS store_id, package_id, owners_packages.created_at, owners_packages.updated_at, owners_packages.started_at FROM owners_packages INNER JOIN organizations ON organizations.organization_key = owner_id INNER JOIN accounts ON accounts.organization_id = organizations.id WHERE LOWER(owner_type) = 'organization' AND app_key IS NOT NULL ), owner_packages_app AS ( SELECT owner_id AS store_id, package_id, created_at, updated_at, started_at FROM owners_packages WHERE LOWER(owner_type) = 'store' ), owner_packages_store_view AS ( SELECT store_id, package_id, created_at, updated_at, started_at FROM owner_packages_organization_app UNION ALL SELECT store_id, package_id, created_at, updated_at, started_at FROM owner_packages_app ) SELECT * FROM owner_packages_store_view sqlfluff-2.3.5/test/fixtures/linter/operator_errors.sql000066400000000000000000000002601451700765000234430ustar00rootroot00000000000000SELECT a.a + a.b AS good, a.a - a.b AS bad_1, a.a * a.b AS bad_2, a.b / a.a AS bad_3, 2+(3+6)+7 AS bad_4, a.b AND a.a AS good_4 FROM tbl AS a sqlfluff-2.3.5/test/fixtures/linter/operator_errors_ignore.sql000066400000000000000000000006371451700765000250160ustar00rootroot00000000000000/* This is a file to test the inline ignoring of certain rules. Errors should be found in line 10, but not on line 9. Line 10 has rules ignored, but there are rules which *arent* ignored, which are still present. No errors should be found on line 8 at all. */ SELECT a.a + a.b AS good, a.a-a.b AS bad_1, -- noqa a.a*a.b AS bad_2, -- noqa: LT01, LT03 a.a*a.b AS bad_3 -- noqa: LT03 FROM tbl AS a sqlfluff-2.3.5/test/fixtures/linter/operator_errors_negative.sql000066400000000000000000000001141451700765000253230ustar00rootroot00000000000000SELECT a - b AS c, -2 AS d, a - b AS e, 4-7 AS f FROM tbl sqlfluff-2.3.5/test/fixtures/linter/parse_error.sql000066400000000000000000000000071451700765000225360ustar00rootroot00000000000000SELECT sqlfluff-2.3.5/test/fixtures/linter/parse_lex_error.sql000066400000000000000000000005001451700765000234040ustar00rootroot00000000000000-- file with both parsing and lexing errors. -- Used for checking ignore functionality and -- the ability to work around issues. SELECT a.id, -- 3 Spaces a.name, a.training_spaces, some_function(SELECT LIMIT WHERE BY ORDER) AS not_parsable, another_function(🤷‍♀️) AS not_lexable FROM tbl AS asqlfluff-2.3.5/test/fixtures/linter/passing.sql000066400000000000000000000000201451700765000216520ustar00rootroot00000000000000select a from b sqlfluff-2.3.5/test/fixtures/linter/passing_cap_extension.SQL000066400000000000000000000000201451700765000244310ustar00rootroot00000000000000select a from b sqlfluff-2.3.5/test/fixtures/linter/select_distinct_group_by.sql000066400000000000000000000000531451700765000253020ustar00rootroot00000000000000sELECT distinct a, b FROM tbl GROUP BY a, bsqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/000077500000000000000000000000001451700765000225235ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/.sqlfluffignore000066400000000000000000000000301451700765000255430ustar00rootroot00000000000000# Ignore path_a path_a/ sqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/path_a/000077500000000000000000000000001451700765000237575ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/path_a/query_a.sql000066400000000000000000000000121451700765000261360ustar00rootroot00000000000000SELECT 1+2sqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/path_b/000077500000000000000000000000001451700765000237605ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/path_b/.sqlfluffignore000066400000000000000000000000351451700765000270050ustar00rootroot00000000000000# Ignore query_c query_c.sql sqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/path_b/query_b.sql000066400000000000000000000000121451700765000261400ustar00rootroot00000000000000SELECT 1+2sqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/path_b/query_c.sql000066400000000000000000000000121451700765000261410ustar00rootroot00000000000000SELECT 1+2sqlfluff-2.3.5/test/fixtures/linter/sqlfluffignore/path_c000077700000000000000000000000001451700765000237572.ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/linter/whitespace_errors.sql000066400000000000000000000002761451700765000237530ustar00rootroot00000000000000SELECT a.id , -- Comma with leading spaces a.name, a.training_spaces , -- Comma on newline, trailing spaces but with comment! a.normal_comma, a.should_work FROM tbl as a sqlfluff-2.3.5/test/fixtures/rules/000077500000000000000000000000001451700765000173325ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/rules/R001_global_config_test.yml000066400000000000000000000003501451700765000244010ustar00rootroot00000000000000rule: R001 configs: core: dialect: exasol tc1: pass_str: | create table if not exists tab.xxx (col1 varchar(10)) configs: core: dialect: ansi tc2: pass_str: | create table tab.xxx (col1 varchar(10)) sqlfluff-2.3.5/test/fixtures/rules/custom/000077500000000000000000000000001451700765000206445ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/rules/custom/L000.py000066400000000000000000000001571451700765000216340ustar00rootroot00000000000000"""Test std rule import.""" class Rule_L000: """Test std rule import.""" groups = ("all",) pass sqlfluff-2.3.5/test/fixtures/rules/custom/S000.py000066400000000000000000000001571451700765000216430ustar00rootroot00000000000000"""Test std rule import.""" class Rule_S000: """Test std rule import.""" groups = ("all",) pass sqlfluff-2.3.5/test/fixtures/rules/custom/bad_rule_name/000077500000000000000000000000001451700765000234215ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/rules/custom/bad_rule_name/E000.py000066400000000000000000000001751451700765000244020ustar00rootroot00000000000000"""Test std rule import.""" class E000: """This will fail to import because it does not start with Rule_.""" pass sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/000077500000000000000000000000001451700765000223315ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AL01.yml000066400000000000000000000106231451700765000235130ustar00rootroot00000000000000rule: AL01 test_fail_default_explicit: # Add whitespace when fixing implicit aliasing fail_str: select foo.bar from table1 foo fix_str: select foo.bar from table1 AS foo test_fail_explicit: # Add whitespace when fixing implicit aliasing fail_str: select foo.bar from table1 foo fix_str: select foo.bar from table1 AS foo configs: rules: aliasing.table: aliasing: explicit test_fail_implicit: # Add whitespace when fixing implicit aliasing fail_str: select foo.bar from table1 AS foo fix_str: select foo.bar from table1 foo configs: rules: aliasing.table: aliasing: implicit test_fail_implicit_alias: # Add whitespace when fixing implicit aliasing fail_str: select foo.bar from (select 1 as bar)foo fix_str: select foo.bar from (select 1 as bar) AS foo test_fail_implicit_alias_space: # No unnecessary whitespace added when fixing implicit aliasing fail_str: select foo.bar from (select 1 as bar) foo fix_str: select foo.bar from (select 1 as bar) AS foo test_fail_implicit_alias_explicit: # Test when explicitly setting explict fail_str: select foo.bar from (select 1 as bar) foo fix_str: select foo.bar from (select 1 as bar) AS foo configs: rules: aliasing.table: aliasing: explicit test_fail_implicit_alias_implicit: # Test implicit fail_str: select foo.bar from (select 1 as bar) AS foo fix_str: select foo.bar from (select 1 as bar) foo configs: rules: aliasing.table: aliasing: implicit test_fail_implicit_alias_implicit_multiple: # Test implicit with multiple tables fail_str: select foo.bar from (select 1 as bar) AS bar, (select 1 as foo) AS foo fix_str: select foo.bar from (select 1 as bar) bar, (select 1 as foo) foo configs: rules: aliasing.table: aliasing: implicit test_fail_implicit_alias_implicit_newline: # NOTE: Even when removing by a newline, we should still remove any duplicate # whitespace. fail_str: | select foo.bar from (select 1 as bar) AS foo fix_str: | select foo.bar from (select 1 as bar) foo configs: rules: aliasing.table: aliasing: implicit test_fail_default_explicit_alias_merge: # Add whitespace when fixing implicit aliasing fail_str: | MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; fix_str: | MERGE dataset.inventory AS t USING dataset.newarrivals AS s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; configs: core: dialect: bigquery test_fail_explicit_alias_merge: # Add whitespace when fixing implicit aliasing fail_str: | MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; fix_str: | MERGE dataset.inventory AS t USING dataset.newarrivals AS s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; configs: core: dialect: bigquery rules: aliasing.table: aliasing: explicit test_pass_implicit_alias_merge: # Add whitespace when fixing implicit aliasing pass_str: | MERGE dataset.inventory t USING dataset.newarrivals s ON t.product = s.product WHEN MATCHED THEN UPDATE SET quantity = t.quantity + s.quantity; configs: core: dialect: bigquery rules: aliasing.table: aliasing: implicit test_alias_expression_4492: # Test failing alias expressions # https://github.com/sqlfluff/sqlfluff/issues/4492 fail_str: SELECT voo.a FROM foo voo fix_str: SELECT voo.a FROM foo AS voo configs: core: dialect: snowflake layout: type: alias_expression: spacing_before: align test_alias_expression_4089: # Test failing alias expressions # https://github.com/sqlfluff/sqlfluff/issues/4089 fail_str: SELECT RANK() OVER (PARTITION BY Id ORDER BY Id DESC) nr_rank FROM (values ('Amsterdam', 1), ('London', 2)) Cities(Name, Id) fix_str: SELECT RANK() OVER (PARTITION BY Id ORDER BY Id DESC) nr_rank FROM (values ('Amsterdam', 1), ('London', 2)) AS Cities(Name, Id) configs: layout: type: alias_expression: spacing_before: align sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AL02.yml000066400000000000000000000050561451700765000235200ustar00rootroot00000000000000rule: AL02 issue_561: # Test for https://github.com/sqlfluff/sqlfluff/issues/561 pass_str: | select array_agg(catalog_item_id) within group (order by product_position asc) over (partition by (event_id, shelf_position)) as shelf_catalog_items from x configs: core: dialect: snowflake test_fail_explicit_column_default: # Test explicit column alias fail_str: select 1 bar from table1 b fix_str: select 1 AS bar from table1 b test_fail_explicit_column_explicit: # Test explicit column alias fail_str: select 1 bar from table1 b fix_str: select 1 AS bar from table1 b configs: rules: aliasing.column: aliasing: explicit test_fail_explicit_column_implicit: # Test explicit column alias fail_str: select 1 AS bar from table1 b fix_str: select 1 bar from table1 b configs: rules: aliasing.column: aliasing: implicit test_pass_tsql_alternative_alias: # Test explicit column alias pass_str: select alias1 = col1 configs: core: dialect: tsql test_fail_alias_ending_equals: # Test explicit column alias doesn't catch false positives fail_str: select col1 "example=" fix_str: select col1 AS "example=" test_fail_alias_ending_raw_equals: # Test explicit column alias doesn't catch false positives fail_str: select col1 raw_equals fix_str: select col1 AS raw_equals test_alias_expression_align_4515_1: # Test more failing alias expressions fail_str: | select test a from example_table fix_str: | select test AS a from example_table configs: layout: type: alias_expression: spacing_before: align align_within: select_clause align_scope: bracketed test_alias_expression_align_4515_2: # Test more failing alias expressions fail_str: | select test a, test b from example_table fix_str: | select test AS a, test AS b from example_table configs: layout: type: alias_expression: spacing_before: align align_within: select_clause align_scope: bracketed test_alias_expression_align_4515_3: # Test more failing alias expressions fail_str: | select testy_testy_testy a, test b from example_table fix_str: | select testy_testy_testy AS a, test AS b from example_table configs: layout: type: alias_expression: spacing_before: align align_within: select_clause align_scope: bracketed sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AL03.yml000066400000000000000000000037721451700765000235240ustar00rootroot00000000000000rule: AL03 test_pass_column_exp_without_alias_1: pass_str: SELECT *, foo from blah test_pass_column_exp_without_alias_2: # AL03 fix with https://github.com/sqlfluff/sqlfluff/issues/449 pass_str: select ps.*, pandgs.blah from ps join pandgs using(moo) test_pass_column_exp_without_alias_allow_scalar_true: # Don't expect alias if allow_scalar = True (default) pass_str: SELECT 1 from blah test_fail_column_exp_without_alias: fail_str: SELECT upper(foo), bar from blah # Casting (via "::TYPE" syntax) has no effect on column output naming # and AL03 therefore shouldnt be applied test_pass_column_exp_without_alias_if_only_cast: pass_str: SELECT foo_col::VARCHAR(28) , bar from blah test_pass_column_exp_without_alias_if_only_cast_inc_double_cast: pass_str: SELECT foo_col::INT::VARCHAR , bar from blah # No catch useless brackets # output column name is unchanged test_pass_column_exp_without_alias_if_bracketed: pass_str: SELECT (foo_col::INT)::VARCHAR , bar from blah test_fail_column_exp_without_alias_and_cast_fn: fail_str: SELECT CAST(foo_col AS INT)::VARCHAR , bar from blah test_fail_column_exp_without_alias_allow_scalar_false: # Expect alias if allow_scalar = False fail_str: SELECT 1 from blah configs: rules: allow_scalar: false test_pass_column_exp_with_alias: pass_str: SELECT upper(foo) as foo_up, bar from blah test_pass_function_emits: # Don't expect alias if allow_scalar = True (default) pass_str: SELECT json_extract(json_str, '$.AFIELD', '$.BFIELD') emits (cola char(1), colb char(1)) FROM table1 configs: core: dialect: exasol test_fail_cte_no_column_list: fail_str: | WITH cte AS ( SELECT col_a, min(col_b) FROM my_table GROUP BY 1 ) SELECT a, b FROM cte test_pass_cte_column_list: pass_str: | WITH cte(a, b) AS ( SELECT col_a, min(col_b) FROM my_table GROUP BY 1 ) SELECT a, b FROM cte sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AL04.yml000066400000000000000000000022551451700765000235200ustar00rootroot00000000000000rule: AL04 test_fail_exactly_once_duplicated_aliases: # duplicate aliases fail_str: select 1 from table_1 as a join table_2 as a using(pk) test_fail_two_duplicated_aliases: fail_str: | select 1 from table_1 as a join table_2 as a on a.pk = b.pk join table_3 as b on a.pk = b.pk join table_4 as b on b.pk = b.pk test_fail_subquery: fail_str: | SELECT 1 FROM ( select 1 from table_1 as a join table_2 as a on a.pk = b.pk join table_3 as b on a.pk = b.pk join table_4 as b on b.pk = b.pk ) test_pass_subquery: # This query should pass as the different 'a' # aliases are in different subquery levels. pass_str: | SELECT 1 FROM ( select 1 from table_1 as a join table_2 as b on a.pk = b.pk ) AS a test_pass_bigquery_function: pass_str: | SELECT gcpproject.functions.timestamp_parsing(log_tbl.orderdate) AS orderdate FROM `gcp-project.data.year_2021` AS log_tbl configs: core: dialect: bigquery test_pass_tsql_table_variable: pass_str: | select @someVar = someColumn from @someTableVar configs: core: dialect: tsql sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AL05.yml000066400000000000000000000211701451700765000235160ustar00rootroot00000000000000rule: AL05 test_fail_table_alias_not_referenced_1: # Aliases not referenced. fail_str: SELECT * FROM my_tbl AS foo fix_str: SELECT * FROM my_tbl test_fail_table_alias_not_referenced_1_subquery: # Aliases not referenced. fail_str: SELECT * FROM (SELECT * FROM my_tbl AS foo) fix_str: SELECT * FROM (SELECT * FROM my_tbl) test_pass_table_alias_referenced_subquery: pass_str: SELECT * FROM (SELECT foo.bar FROM my_tbl AS foo) test_pass_table_alias_referenced: pass_str: SELECT * FROM my_tbl AS foo JOIN other_tbl on other_tbl.x = foo.x test_pass_unaliased_table_referenced: # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/449 pass_str: select ps.*, pandgs.blah from ps join pandgs using(moo) test_ignore_bigquery_value_table_functions: # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/356 pass_str: | select * from unnest(generate_timestamp_array( '2020-01-01', '2020-01-30', interval 1 day)) as ts configs: core: dialect: bigquery test_ignore_postgres_value_table_functions: # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/3051 pass_str: | SELECT json_build_object( 'name', 'ticket_status', 'type', 'enum', 'values', json_agg(status_name) ) FROM unnest(enum_range(NULL::my_enum)) AS status_name; configs: core: dialect: postgres test_ignore_postgres_value_table_functions_generate_series: # AL05 fix with https://github.com/sqlfluff/sqlfluff/issues/3462 pass_str: | SELECT date_trunc('day', dd):: timestamp with time zone FROM generate_series ( '2022-02-01'::timestamp , NOW()::timestamp , '1 day'::interval ) dd ; configs: core: dialect: postgres test_fail_table_alias_not_referenced_2: # Similar to test_1, but with implicit alias. fail_str: SELECT * FROM my_tbl foo fix_str: SELECT * FROM my_tbl test_fail_table_alias_not_referenced_2_subquery: # Aliases not referenced. fail_str: SELECT * FROM (SELECT * FROM my_tbl foo) fix_str: SELECT * FROM (SELECT * FROM my_tbl) test_pass_subquery_alias_not_referenced: pass_str: select * from (select 1 as a) subquery test_pass_bigquery_unaliased_table_with_hyphens: # Test non-quoted table name containing hyphens: https://github.com/sqlfluff/sqlfluff/issues/895 # This is more of a smoke test to exercise the # ObjectReferenceSegment.extract_reference() function, which is used by AL05 # and in turn calls HyphenatedObjectReferenceSegment.iter_raw_references(). pass_str: | select * from project-a.dataset-b.table-c configs: core: dialect: bigquery test_pass_bigquery_aliased_table_with_ticks_referenced: # Test ambiguous column reference caused by use of BigQuery structure fields. # Here, 'et2' could either be a schema name or a table name. # https://github.com/sqlfluff/sqlfluff/issues/1079 pass_str: | SELECT et2.txn.amount FROM `example_dataset2.example_table2` AS et2 configs: core: dialect: bigquery test_pass_tsql_object_reference_override: # T-SQL Overrides the ObjectReferenceSegment so needs to have the _level_to_int # static method set (as a static method!) or rule AL05 fails. # https://github.com/sqlfluff/sqlfluff/issues/1669 pass_str: SELECT a FROM b configs: core: dialect: tsql test_pass_subselect_uses_alias_1: pass_str: | SELECT col1, ( SELECT count(*) FROM base WHERE a.col2 = base.col2 ) FROM without_dup AS a test_pass_subselect_uses_alias_2: pass_str: | select COL_A , COL_B from INSERTS INS where COL_B != (select max(COL_B) from INSERTS X where INS.COL_A = X.COL_A) test_pass_subselect_uses_alias_3: pass_str: | SELECT col_1 FROM table_a AS a WHERE NOT EXISTS (SELECT TRUE FROM table_b AS b WHERE a.col_4 = b.col_1) test_ansi_function_not_table_parameter: fail_str: | SELECT TO_JSON_STRING(t) FROM my_table AS t fix_str: | SELECT TO_JSON_STRING(t) FROM my_table test_bigquery_function_takes_tablealias_parameter: pass_str: | SELECT TO_JSON_STRING(t) FROM my_table AS t configs: core: dialect: bigquery test_bigquery_function_takes_tablealias_column_parameter: pass_str: | SELECT TO_JSON_STRING(t.c) FROM my_table AS t configs: core: dialect: bigquery test_bigquery_function_takes_tablealias_column_struct_parameter: pass_str: | SELECT TO_JSON_STRING(t.c.structure) FROM my_table AS t configs: core: dialect: bigquery test_snowflake_delete_cte: fail_str: | DELETE FROM MYTABLE1 USING ( WITH MYCTE AS (SELECT COLUMN2 FROM MYTABLE3 AS MT3) SELECT COLUMN3 FROM MYTABLE3 ) X WHERE COLUMN1 = X.COLUMN3 fix_str: | DELETE FROM MYTABLE1 USING ( WITH MYCTE AS (SELECT COLUMN2 FROM MYTABLE3) SELECT COLUMN3 FROM MYTABLE3 ) X WHERE COLUMN1 = X.COLUMN3 configs: core: dialect: snowflake test_pass_exasol_values_clause: pass_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) configs: core: dialect: exasol test_fail_exasol_values_clause: fail_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2) fix_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) configs: core: dialect: exasol test_pass_sparksql_values_clause: pass_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) configs: core: dialect: sparksql test_fail_sparksql_values_clause: fail_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2) fix_str: | SELECT * FROM ( VALUES (1, 2), (3, 4) ) configs: core: dialect: sparksql test_pass_snowflake_values: # Tests a fix for issue 3301. pass_str: | SELECT thing_1 , thing_2 FROM VALUES ( 'foo', 'bar') , ( 'foo', 'bar') my_table_alias(thing_1, thing_2) configs: core: dialect: snowflake test_pass_tsql_values_clause_in_parentheses: # Tests a fix for issue 3522. In tsql, the parentheses surrouding "values" are # required (otherwise syntax error). SQLFluff was incorrectly complaining that # the alias 't' was unused. pass_str: | SELECT * FROM (VALUES ('a1', 'b1'), ('a2', 'b2'), ('a3', 'b3')) t(a,b) configs: core: dialect: tsql test_pass_join_on_expression_in_parentheses: pass_str: | SELECT table1.c1 FROM table1 AS tbl1 INNER JOIN table2 AS tbl2 ON (tbl2.col2 = tbl1.col2) INNER JOIN table3 AS tbl3 ON (tbl3.col3 = tbl2.col3) test_pass_bigquery_qualify_clause: pass_str: | SELECT * FROM table1 AS tbl1 INNER JOIN tbl2 AS tbl2 WHERE TRUE QUALIFY ROW_NUMBER() OVER ( PARTITION BY tbl1.col1 ORDER BY tbl2.col3 ) = 1 configs: core: dialect: bigquery test_pass_bigquery_nested_inner_join: pass_str: | with abh as ( select ceb.emailaddresskey, dac.accountkey from table2 as dac inner join table3 as ceb on ceb.col2 = dac.col2 ) select col1 from table1 as abg inner join abh on abg.col1 = abh.col1 configs: core: dialect: bigquery test_fail_snowflake_flatten_function: # Tests a fix for issue 3178. fail_str: | SELECT r.rec:foo::string, value:bar::string FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) AS x fix_str: | SELECT r.rec:foo::string, value:bar::string FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) configs: core: dialect: snowflake test_pass_derived_query_requires_alias_1: # Case 1: Simple derived query pass_str: | SELECT * FROM ( SELECT 1 ) as a test_pass_derived_query_requires_alias_2: # Case 2: Derived query uses set operation (UNION) pass_str: | SELECT * FROM ( SELECT col FROM dbo.tab UNION SELECT -1 AS col ) AS a test_pass_derived_query_requires_alias_3: # Case 3: Derived query includes a WITH statement pass_str: | SELECT * FROM ( WITH foo AS ( SELECT col FROM dbo.tab ) SELECT * FROM foo ) AS a test_pass_redshift_semi_structured_op: # Redshift _requires_ aliasing when doing semi-structured operations. # https://docs.aws.amazon.com/redshift/latest/dg/query-super.html#unnest # The logic here should be that if references _overlap_ (i.e. some # aliases refer to other tables in the same FROM clause). pass_str: | SELECT tt.resource_id FROM top_table AS tt , tt.nested_column AS co configs: core: dialect: redshift sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AL06.yml000066400000000000000000000034201451700765000235150ustar00rootroot00000000000000rule: AL06 test_pass_no_config: pass_str: | select x.a, x_2.b from x left join x as x_2 on x.foreign_key = x.foreign_key test_fail_alias_too_short: fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id; configs: rules: aliasing.length: min_alias_length: 4 test_fail_alias_too_long: fail_str: | SELECT u.id, customers_customers_customers.first_name, customers_customers_customers.last_name, COUNT(o.user_id) FROM users as u JOIN customers as customers_customers_customers on u.id = customers_customers_customers.user_id JOIN orders as o on u.id = o.user_id; configs: rules: aliasing.length: max_alias_length: 10 test_fail_alias_min_and_max: fail_str: | SELECT u.id, customers_customers_customers.first_name, customers_customers_customers.last_name, COUNT(o.user_id) FROM users as u JOIN customers as customers_customers_customers on u.id = customers_customers_customers.user_id JOIN orders as o on u.id = o.user_id; configs: rules: aliasing.length: min_alias_length: 4 max_alias_length: 10 test_pass_with_config: pass_str: | SELECT users.id, customers_customers_customers.first_name, customers_customers_customers.last_name, COUNT(latest_orders.user_id) FROM users JOIN customers as customers_customers_customers on users.id = customers_customers_customers.user_id JOIN orders as latest_orders on users.id = latest_orders.user_id; configs: rules: aliasing.length: min_alias_length: 10 max_alias_length: 30 sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AL07.yml000066400000000000000000000166521451700765000235310ustar00rootroot00000000000000rule: AL07 test_pass_allow_self_join_alias: # AL07 Allow self-joins pass_str: | select x.a, x_2.b from x left join x as x_2 on x.foreign_key = x.foreign_key configs: rules: aliasing.forbid: force_enable: true test_fail_avoid_aliases_1: fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id; fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id; configs: rules: aliasing.forbid: force_enable: true test_fail_avoid_aliases_2: # AL07 order by fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id order by o.user_id desc fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id order by orders.user_id desc configs: rules: aliasing.forbid: force_enable: true test_fail_avoid_aliases_3: # AL07 order by identifier which is the same raw as an alias but refers to a column fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id order by o desc fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id order by o desc configs: rules: aliasing.forbid: force_enable: true alias_single_char_identifiers: fail_str: "select b from tbl as a" fix_str: "select b from tbl" configs: rules: aliasing.forbid: force_enable: true alias_with_wildcard_identifier: fail_str: "select * from tbl as a" fix_str: "select * from tbl" configs: rules: aliasing.forbid: force_enable: true select_from_values: pass_str: | select * from values(1, 2, 3) configs: rules: aliasing.forbid: force_enable: true select_from_table_generator: pass_str: | select * from table( generator( rowcount=>10000 ) ) configs: core: dialect: snowflake rules: aliasing.forbid: force_enable: true issue_635: pass_str: | select id::varchar as id, obj:userid::varchar as user_id, redemptions.value:awardedreceiptid::varchar as awarded_receipt_id from a, lateral flatten(input => a.obj:redemptions) redemptions configs: core: dialect: snowflake rules: aliasing.forbid: force_enable: true # This query was causing a runtime error in the rule. issue_239: pass_str: | WITH confusion_matrix AS ( SELECT expected_label, commerce, digital, traditional_services FROM ML.CONFUSION_MATRIX(MODEL model3, ( SELECT * FROM table1 WHERE training = 0 ))) SELECT *, commerce pct_commerce FROM confusion_matrix configs: core: dialect: bigquery # The rule was removing the aliases from this query, causing incorrect behavior. # (Aliases may not only be used in select targets; they also influence whether # multiple joins to a table are independent or not). issue_610: pass_str: | SELECT aaaaaa.c FROM aaaaaa JOIN bbbbbb AS b ON b.a = aaaaaa.id JOIN bbbbbb AS b2 ON b2.other = b.id configs: rules: aliasing.forbid: force_enable: true issue_1589: pass_str: | select * from (select random() as v from (values(1))) t1, (select max(repl) as m from data) t2, (select * from data where repl=t2.m and rnd>=t.v order by rnd limit 1) configs: rules: aliasing.forbid: force_enable: true issue_1639: fail_str: | DECLARE @VariableE date = GETDATE() CREATE TABLE #TempTable AS ( Select ColumnD from SchemaA.TableB AliasC where ColumnD >= @VariableE ) fix_str: | DECLARE @VariableE date = GETDATE() CREATE TABLE #TempTable AS ( Select ColumnD from SchemaA.TableB where ColumnD >= @VariableE ) configs: core: dialect: tsql rules: aliasing.forbid: force_enable: true test_fail_no_copy_code_out_of_template: # The rule wants to replace "t" with "foobar", but # LintFix.has_template_conflicts() correctly prevents it copying code out # of the templated region. Hence, the query is not modified. fail_str: | SELECT t.repo_id FROM {{ source_table }} AS t configs: templater: jinja: context: source_table: foobar rules: aliasing.forbid: force_enable: true test_bigquery_skip_multipart_names: pass_str: | SELECT t.col1 FROM shema1.table1 AS t configs: core: dialect: bigquery test_bigquery_force_enable: fail_str: | SELECT t.col1 FROM schema1.table1 AS t # TRICKY: The fix_str does not parse in the real BigQuery, due to backtick # requirements. That's why the rule is disabled by default. # TODO (low priority): Update this test to test for a case where the rule # produces valid SQL. fix_str: | SELECT schema1.table1.col1 FROM schema1.table1 configs: core: dialect: bigquery rules: aliasing.forbid: force_enable: true test_violation_locations: fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id; fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id; configs: rules: aliasing.forbid: force_enable: true violations: - code: AL07 description: Avoid aliases in from clauses and join conditions. line_no: 6 line_pos: 15 name: aliasing.forbid - code: AL07 description: Avoid aliases in from clauses and join conditions. line_no: 7 line_pos: 19 name: aliasing.forbid - code: AL07 description: Avoid aliases in from clauses and join conditions. line_no: 8 line_pos: 16 name: aliasing.forbid test_fail_fix_command: # Test originally from commands_test.py fail_str: | SELECT u.id, c.first_name, c.last_name, COUNT(o.user_id) FROM users as u JOIN customers as c on u.id = c.user_id JOIN orders as o on u.id = o.user_id; fix_str: | SELECT users.id, customers.first_name, customers.last_name, COUNT(orders.user_id) FROM users JOIN customers on users.id = customers.user_id JOIN orders on users.id = orders.user_id; configs: rules: aliasing.forbid: force_enable: true sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AL08.yml000066400000000000000000000022311451700765000235160ustar00rootroot00000000000000rule: AL08 test_fail_references: fail_str: | select foo, foo test_fail_aliases: fail_str: | select a as foo, b as foo test_fail_alias_refs: fail_str: | select foo, b as foo test_fail_locs: fail_str: | select foo, b as foo, c as bar, bar, d foo, violations: - code: AL08 description: Reuse of column alias 'foo' from line 2. line_no: 3 line_pos: 8 name: aliasing.unique.column - code: AL08 description: Reuse of column alias 'bar' from line 4. line_no: 5 line_pos: 3 name: aliasing.unique.column - code: AL08 description: Reuse of column alias 'foo' from line 2. line_no: 6 line_pos: 5 name: aliasing.unique.column test_fail_alias_quoted: fail_str: | select foo, b as "foo" configs: core: dialect: snowflake test_fail_alias_case: fail_str: | select foo, b as FOO test_fail_qualified: fail_str: | select a.foo , b as foo from a test_pass_table_names: pass_str: | select a.b, b.c, c.d from a, b, c sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AM01.yml000066400000000000000000000003161451700765000235120ustar00rootroot00000000000000rule: AM01 test_pass_only_group_by: # check if using select distinct and group by pass_str: select a from b group by a test_fail_distinct_and_group_by: fail_str: select distinct a from b group by a sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AM02.yml000066400000000000000000000043551451700765000235220ustar00rootroot00000000000000rule: AM02 test_pass_union_all: pass_str: | SELECT a, b FROM tbl UNION ALL SELECT c, d FROM tbl1 test_fail_bare_union: fail_str: | SELECT a, b FROM tbl UNION SELECT c, d FROM tbl1 fix_str: | SELECT a, b FROM tbl UNION DISTINCT SELECT c, d FROM tbl1 test_pass_union_distinct: pass_str: | SELECT a, b FROM tbl UNION DISTINCT SELECT c, d FROM tbl1 test_pass_union_distinct_with_comment: pass_str: | SELECT a, b FROM tbl --selecting a and b UNION DISTINCT SELECT c, d FROM tbl1 test_fail_triple_join_with_one_bad: fail_str: | SELECT a, b FROM tbl UNION DISTINCT SELECT c, d FROM tbl1 UNION SELECT e, f FROM tbl2 fix_str: | SELECT a, b FROM tbl UNION DISTINCT SELECT c, d FROM tbl1 UNION DISTINCT SELECT e, f FROM tbl2 test_fail_triple_join_with_one_bad_lowercase: fail_str: | select a, b from tbl union distinct select c, d from tbl1 union select e, f from tbl2 fix_str: | select a, b from tbl union distinct select c, d from tbl1 union distinct select e, f from tbl2 test_exasol: pass_str: | select a, b from tbl1 union select c, d from tbl2 configs: core: dialect: exasol test_exasol_union_all: pass_str: | select a, b from tbl1 union all select c, d from tbl2 configs: core: dialect: exasol test_postgres: pass_str: | select a, b from tbl1 union select c, d from tbl2 configs: core: dialect: postgres test_redshift: fail_str: | SELECT a, b FROM tbl1 UNION SELECT c, d FROM tbl2 fix_str: | SELECT a, b FROM tbl1 UNION DISTINCT SELECT c, d FROM tbl2 configs: core: dialect: redshift sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AM03.yml000066400000000000000000000013461451700765000235200ustar00rootroot00000000000000rule: AM03 test_unspecified: pass_str: SELECT * FROM t ORDER BY a test_unspecified_unspecified: pass_str: SELECT * FROM t ORDER BY a, b test_unspecified_desc: fail_str: SELECT * FROM t ORDER BY a, b DESC fix_str: SELECT * FROM t ORDER BY a ASC, b DESC test_asc_desc: pass_str: SELECT * FROM t ORDER BY a ASC, b DESC test_desc_unspecified: fail_str: SELECT * FROM t ORDER BY a DESC, b fix_str: SELECT * FROM t ORDER BY a DESC, b ASC test_desc_asc: pass_str: SELECT * FROM t ORDER BY a DESC, b ASC test_nulls_last: fail_str: SELECT * FROM t ORDER BY a DESC, b NULLS LAST fix_str: SELECT * FROM t ORDER BY a DESC, b ASC NULLS LAST test_comment: pass_str: SELECT * FROM t ORDER BY a /* Comment */ DESC, b ASC sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AM04.yml000066400000000000000000000270421451700765000235220ustar00rootroot00000000000000rule: AM04 test_pass_known_number_of_result_columns_1: pass_str: select a, b from t test_fail_unknown_number_of_result_columns_1: fail_str: select * from t test_pass_known_number_of_result_columns_2: desc: Columns are specified in CTE so * in final query will return only columns specified earlier. pass_str: | with cte as ( select a, b from t ) select * from cte test_fail_unknown_number_of_result_columns_2: fail_str: | with cte as ( select * from t ) select * from cte test_pass_known_number_of_result_columns_3: pass_str: | with cte as ( select * from t ) select a, b from cte test_pass_known_number_of_result_columns_4: desc: | CTE1 has * but columns are specified in final select. CTE2 has columns specified so cte2.* in final select will return only columns specified in CTE2's body. pass_str: | with cte1 as ( select * from t ), cte2 as ( select a, b from t ) select cte1.a, cte2.* from cte1 join cte2 using (a) test_fail_unknown_number_of_result_columns_3: fail_str: | with cte1 as ( select * from t ), cte2 as ( select a, b from t ) select cte1.*, cte2.* from cte1 join cte2 using (a) test_pass_known_number_of_result_columns_5: desc: Columns specified in subquery so * in final select will return only those columns. pass_str: | select * from ( select a, b from t ) test_fail_unknown_number_of_result_columns_4: desc: Select t.* will return unknown number of columns. fail_str: | with cte as ( select a, b from t ) select cte.*, t.* from cte1 join t using (a) test_fail_unknown_number_of_result_columns_5: desc: Select t_alias.* will return unknown number of columns since the * is used in subquery. fail_str: | with cte as ( select a, b from t ) select cte.*, t_alias.* from cte1 join (select * from t) as t_alias using (a) test_pass_known_number_of_result_columns_6: desc: Select t_alias.* will return known number of columns since they are defined in subquery. pass_str: | select t_alias.* from cte1 join (select a from t) as t_alias using (a) test_fail_unknown_number_of_result_columns_6: fail_str: | select t_alias.* from t1 join (select * from t) as t_alias using (a) test_pass_known_number_of_result_columns_7: pass_str: | with cte as ( select a, b from t ) select cte.*, t_alias.a from cte1 join (select * from t) as t_alias using (a) test_fail_unknown_number_of_result_columns_7: fail_str: select *, t.*, t.a, b from t test_pass_known_number_of_result_columns_8: pass_str: | select a from t1 union all select b from t2 test_fail_unknown_number_of_result_columns_8: fail_str: | select a from t1 union all select * from t2 test_fail_unknown_number_of_result_columns_9: fail_str: | select * from t1 union all select b from t2 test_fail_unknown_number_of_result_columns_10: fail_str: | with cte as ( select * from t1 union all select b from t2 ) select * from cte union all select b from t3 test_pass_known_number_of_result_columns_9: pass_str: | with cte as ( select a from t1 union all select b from t2 ) select * from cte union all select b from t3 test_pass_known_number_of_result_columns_10: desc: Columns are specified in cte_orders's body so cte_orders.* will return only these columns. pass_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT customers.name, cte_orders.* FROM customers, cte_orders WHERE clients.id = orders.clientId test_pass_known_number_of_result_columns_11: desc: Columns are specified in cte_orders's body so cte_orders.* will return only these columns. pass_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT * FROM cte_orders AS orders test_fail_unknown_number_of_result_columns_11: fail_str: | WITH cte_orders AS ( SELECT * FROM orders ) SELECT * FROM cte_orders AS orders test_fail_unknown_number_of_result_columns_12: desc: CTE is unused. We select * from orders table which means it's unknown what columns will be returned. fail_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT * FROM orders AS cte_orders test_fail_unknown_number_of_result_columns_13: fail_str: SELECT p.* FROM races, UNNEST(participants) AS p test_pass_known_number_of_result_columns_12: pass_str: SELECT p FROM races, UNNEST(participants) AS p test_fail_unknown_number_of_result_columns_14: fail_str: SELECT * FROM a JOIN b test_fail_unknown_number_of_result_columns_15: desc: We know what columns will cte return but we don't know what columns will be returned from joined table b. fail_str: | WITH cte AS ( SELECT a FROM t ) SELECT * FROM cte JOIN b test_pass_known_number_of_result_columns_13: desc: Both CTEs define returned columns so * in final select will return known number of columns. pass_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 JOIN cte2 test_pass_known_number_of_result_columns_14: pass_str: select a, b from `d.t` configs: core: dialect: bigquery test_fail_unknown_number_of_result_columns_16: fail_str: select * from `d.t` configs: core: dialect: bigquery test_pass_known_number_of_result_columns_15: # Issue 915: Crash on statements that don't have a SELECT pass_str: CREATE TABLE my_table (id INTEGER) test_fail_unknown_number_of_result_columns_17: # Issue 930: Infinite recursion if CTE queries itself. fail_str: | with hubspot__engagement_calls as ( select * from hubspot__engagement_calls ) select * from hubspot__engagement_calls test_fail_unknown_number_of_result_columns_18: # Another test for issue #930 fail_str: | with hubspot__contacts as ( select * from ANALYTICS.PUBLIC_intermediate.hubspot__contacts ), final as ( select * from hubspot__contacts where not coalesce(_fivetran_deleted, false) ) select * from final test_pass_nested_ctes_1: # Test for issue 1984 pass_str: | with a as ( with b as ( select 1 from c ) select * from b ) select * from a test_fail_nested_ctes_1: # Test for issue 1984 fail_str: | with a as ( with b as ( select * from c ) select * from b ) select * from a test_fail_nested_ctes_2: # Test for issue 1984 fail_str: | with a as ( with b as ( select 1 from t1 ), c AS ( SELECT * FROM u ) select b.*, c.* from b join c ) select * from a test_pass_nested_ctes_3: # Test for issue 1984 pass_str: with a as ( with b as ( select * from c ) select 1 from b ) select * from a test_pass_nested_ctes_4: # Test for issue 1984 pass_str: with a as ( with b as ( select * from c ) select * from b ) select 1 from a test_cte_reference_outer_5: pass_str: with a as ( select 1 from b ) select * from ( select * from a ) test_cte_tricky_nesting_6: pass_str: with b as ( select 1 from c ) select * from ( with a as ( select * from b ) select * from a ) test_nested_and_same_level_ctes_7: pass_str: with a as ( with c as ( select 1 from d ), b as ( select * from c ) select * from b ) select * from a test_nested_cte_references_outer_8: pass_str: with c as ( select 1 from d ), a as ( with b as ( select * from c ) select * from b ) select * from a test_pass_join_inside_cte_with_unqualified: pass_str: with cte as ( select * from t1 inner join t2 ) select a, b from cte; test_pass_known_number_of_columns_in_two_join_subqueries: pass_str: select * from ( select a from foo ) t1 inner join ( select b from bar ) t2; test_fail_two_join_subqueries_one_with_unknown_number_of_columns: fail_str: select * from ( select * from foo ) t1 inner join ( select b from bar ) t2; test_fail_no_source_table: fail_str: | SELECT * test_query_on_snowflake_stage: pass_str: select mycolumn1 from @public.mytable1 configs: core: dialect: snowflake test_snowflake_delete_cte: pass_str: | DELETE FROM MYTABLE1 USING ( WITH MYCTE AS (SELECT COLUMN2 FROM MYTABLE3) SELECT COLUMN3 FROM MYTABLE3 ) X WHERE COLUMN1 = X.COLUMN3 configs: core: dialect: snowflake test_pass_exasol_values_clause_cte_1: pass_str: | WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt configs: core: dialect: exasol test_pass_exasol_values_clause_cte_2: pass_str: | WITH txt AS ( VALUES (1, 'foo') AS t (id, name) ) SELECT * FROM txt configs: core: dialect: exasol test_pass_exasol_values_clause: pass_str: SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2) configs: core: dialect: exasol test_fail_exasol_invalid_foreign_key_from: # This query should fail (I think) because it # returns more than just the foreign key and # that could be an unknown number of columns. fail_str: | SELECT * WITH INVALID FOREIGN KEY (nr) FROM T1 REFERENCING T2 (id) configs: core: dialect: exasol test_pass_exasol_invalid_foreign_key_from: # This query should pass because it will return # just the foreign key. pass_str: | SELECT INVALID FOREIGN KEY (nr) FROM T1 REFERENCING T2 (id) configs: core: dialect: exasol test_pass_cte_no_select_final_statement: pass_str: WITH mycte AS ( SELECT foo, bar FROM mytable1 ) UPDATE sometable SET sometable.baz = mycte.bar FROM mycte; test_tsql_select_system_as_identifier: pass_str: | SELECT @@IDENTITY AS 'Identity' configs: core: dialect: tsql test_pass_sparksql_values_clause_cte_1: pass_str: | WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt configs: core: dialect: sparksql test_pass_sparksql_values_clause_cte_2: pass_str: | WITH txt AS ( VALUES (1, 'foo') AS t (id, name) ) SELECT * FROM txt configs: core: dialect: sparksql test_pass_sparksql_values_clause: pass_str: SELECT * FROM ( VALUES (1, 2), (3, 4) ) AS t(c1, c2) configs: core: dialect: sparksql sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AM05.yml000066400000000000000000000153021451700765000235170ustar00rootroot00000000000000rule: AM05 # Default config test_fail_lone_join_default: fail_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" test_fail_lone_join_lowercase_default: fail_str: "SELECT foo.a, bar.b FROM foo join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo inner join bar;\n" test_pass_inner_join_default: pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" test_pass_left_join_default: pass_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n" test_pass_right_join_default: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n" test_pass_full_join_default: pass_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n" test_pass_left_outer_join_default: pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" test_pass_right_outer_join_default: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" test_pass_full_outer_join_default: pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" test_pass_cross_join_default: pass_str: "SELECT foo.a, bar.b FROM foo CROSS JOIN bar;\n" # Config = "inner" test_fail_lone_join_inner: fail_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_fail_lone_join_lowercase_inner: fail_str: "SELECT foo.a, bar.b FROM foo join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo inner join bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_inner_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_left_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_right_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_full_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_left_outer_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_right_outer_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner test_pass_full_outer_join_inner: pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: inner # Config = "outer" test_pass_lone_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_pass_inner_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_fail_left_join_outer: fail_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_fail_right_join_outer: fail_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_fail_full_join_outer: fail_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_fail_full_join_lowercase_outer: fail_str: "SELECT foo.a, bar.b FROM foo full join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo full outer join bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_pass_left_outer_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_pass_right_outer_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer test_pass_full_outer_join_outer: pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: outer # Config = "both" test_fail_lone_join_both: fail_str: "SELECT foo.a, bar.b FROM foo JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_lone_join_lowercase_both: fail_str: "SELECT foo.a, bar.b FROM foo join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo inner join bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_pass_inner_join_both: pass_str: "SELECT foo.a, bar.b FROM foo INNER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_left_join_both: fail_str: "SELECT foo.a, bar.b FROM foo LEFT JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_right_join_both: fail_str: "SELECT foo.a, bar.b FROM foo RIGHT JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_full_join_both: fail_str: "SELECT foo.a, bar.b FROM foo FULL JOIN bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_fail_full_join_lowercase_both: fail_str: "SELECT foo.a, bar.b FROM foo full join bar;\n" fix_str: "SELECT foo.a, bar.b FROM foo full outer join bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_pass_left_outer_join_both: pass_str: "SELECT foo.a, bar.b FROM foo LEFT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_pass_right_outer_join_both: pass_str: "SELECT foo.a, bar.b FROM foo RIGHT OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both test_pass_full_outer_join_both: pass_str: "SELECT foo.a, bar.b FROM foo FULL OUTER JOIN bar;\n" configs: rules: ambiguous.join: fully_qualify_join_types: both sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AM06.yml000066400000000000000000000310221451700765000235150ustar00rootroot00000000000000rule: AM06 test_pass_explicit_group_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2; test_pass_implicit_group_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar; test_pass_explicit_order_by_default: pass_str: | SELECT foo, bar FROM fake_table ORDER BY 1, 2; test_fail_implicit_order_by_default: pass_str: | SELECT foo, bar FROM fake_table ORDER BY foo, bar; test_fail_mix_group_by_default: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar; test_pass_implicit_group_by_and_order_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY 1, 2; test_pass_explicit_group_by_and_order_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar ORDER BY foo, bar; test_fail_within_line_mix_group_by_and_order_by_default: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar ORDER BY foo, 2; test_fail_across_line_mix_group_by_and_order_by_default: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY foo, bar; test_pass_explicit_expression_order_by_default: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY foo, power(bar, 2) test_fail_implicit_expression_order_by_default: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY 1, power(bar, 2) test_pass_explicit_group_by_custom_explicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_implicit_group_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_mix_group_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_pass_explicit_order_by_custom_explicit: pass_str: | SELECT foo, bar FROM fake_table ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_implicit_order_by_custom_explicit: fail_str: | SELECT foo, bar FROM fake_table ORDER BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_pass_explicit_group_by_and_order_by_custom_explicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_implicit_group_by_and_order_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_within_line_mix_group_by_and_order_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar ORDER BY foo, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_across_line_mix_group_by_and_order_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_pass_explicit_expression_order_by_custom_explicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY foo, power(bar, 2) configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_fail_implicit_expression_order_by_custom_explicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY 1, power(bar, 2) configs: rules: ambiguous.column_references: group_by_and_order_by_style: explicit test_pass_explicit_group_by_custom_implicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_implicit_group_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_pass_explicit_order_by_custom_implicit: pass_str: | SELECT foo, bar FROM fake_table ORDER BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_implicit_order_by_custom_implicit: fail_str: | SELECT foo, bar FROM fake_table ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_mix_group_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_pass_implicit_group_by_and_order_by_custom_implicit: pass_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY 1, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_explicit_group_by_and_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY foo, bar ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_within_line_mix_group_by_and_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, bar ORDER BY foo, 2; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_across_line_mix_group_by_and_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table GROUP BY 1, 2 ORDER BY foo, bar; configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_explicit_expression_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY foo, power(bar, 2) configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_implicit_expression_order_by_custom_implicit: fail_str: | SELECT foo, bar, sum(baz) AS sum_value FROM fake_table ORDER BY 1, power(bar, 2) configs: rules: ambiguous.column_references: group_by_and_order_by_style: implicit test_fail_consistent_snowflake: fail_str: | select a, b, c from test_table group by 1, b order by 1, 2 configs: core: dialect: snowflake rules: ambiguous.column_references: group_by_and_order_by_style: consistent test_fail_consistent_exasol: fail_str: | select a, b, c from test_table group by 1, b order by 1, 2 configs: core: dialect: exasol rules: ambiguous.column_references: group_by_and_order_by_style: consistent test_pass_window: pass_str: | SELECT field_1 , field_2 , SUM(field_3) as field_3_total , SUM(field_3) OVER (ORDER BY field_1) AS field_3_window_sum FROM table1 GROUP BY 1, 2 ORDER BY 1, 2 test_pass_window_snowflake: pass_str: | SELECT field_1 , field_2 , SUM(field_3) as field_3_total , SUM(field_3) OVER (ORDER BY field_1) AS field_3_window_sum FROM table1 GROUP BY 1, 2 ORDER BY 1, 2 configs: core: dialect: snowflake test_pass_withingroup_snowflake: pass_str: | SELECT LISTAGG(x) WITHIN GROUP (ORDER BY list_order) AS my_list FROM main GROUP BY 1 configs: core: dialect: snowflake test_pass_groupby_rollup_bigquery: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: bigquery test_fail_groupby_rollup_bigquery: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: bigquery test_pass_groupby_rollup_postgres: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: postgres test_fail_groupby_rollup_postgres: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: postgres test_pass_groupby_rollup_exasol: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: exasol test_fail_groupby_rollup_exasol: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: exasol test_pass_groupby_rollup_athena: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: athena test_fail_groupby_rollup_athena: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: athena test_pass_groupby_rollup_sparksql: pass_str: | SELECT column1, column2 FROM table_name GROUP BY 1, 2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: sparksql test_fail_groupby_rollup_sparksql: fail_str: | SELECT column1, column2 FROM table_name GROUP BY column1, column2 UNION ALL SELECT column1, column2 FROM table_name2 GROUP BY ROLLUP(1, 2) configs: core: dialect: sparksql test_pass_array_agg_bigquery: pass_str: | SELECT to_json_string(array_agg(product_id order by started_at desc)) AS products FROM purchased GROUP by 1 configs: core: dialect: bigquery sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/AM07.yml000066400000000000000000000174241451700765000235300ustar00rootroot00000000000000rule: AM07 test_pass_known_number_of_result_columns_1: pass_str: | select a, b from t union all select c, d from k test_fail_known_number_of_result_columns_1: fail_str: | select a from t union all select c, d from k test_pass_known_number_of_result_columns_2: desc: Columns are specified in CTE so * in final query will return only columns specified earlier. pass_str: | with cte as ( select a, b from t ) select * from cte union select c, d from t2 test_fail_known_number_of_result_columns_2: fail_str: | with cte as ( select a, b, c from t ) select * from cte union select d, e from t test_pass_known_number_of_result_columns_3: pass_str: | with cte as ( select * from t ) select a, b from cte test_pass_known_number_of_result_columns_4: pass_str: | with cte1 as ( select * from t ), cte2 as ( select a, b from t ) select cte1.a , cte1.d , cte2.* from cte1 join cte2 using (a) union select e, f, g, h from cte3 test_fail_known_number_of_result_columns_3: fail_str: | with cte1 as ( select * from t ), cte2 as ( select a, b from t ) select cte1.a, cte2.* from cte1 join cte2 using (a) union select e, f from cte3 test_pass_known_number_of_result_columns_5: pass_str: | select * from ( select a, b from t ) union select c, d from t2 test_pass_known_number_of_result_columns_6: pass_str: | with cte2 as ( select b from t1 ) select t_alias.* from t2 join (select a from t) as t_alias using (a) union select * from cte2 test_fail_unknown_number_of_result_columns_4: fail_str: | select t_alias.* from t1 join (select a from t) as t_alias using (a) union select a,b from t2 test_pass_known_number_of_result_columns_7: pass_str: | select a from t1 union all select b from t2 test_pass_unknown_wildcard_number_of_result_columns_8: pass_str: | select a from t1 union all select * from t2 test_pass_known_number_of_result_columns_9: pass_str: | with cte as ( select a from t1 union all select b from t2 ) select * from cte union all select b from t3 test_pass_known_number_of_result_columns_10: desc: Columns are specified in cte_orders's body so cte_orders.* will return only these columns. pass_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT customers.name, cte_orders.* FROM customers, cte_orders WHERE clients.id = orders.clientId test_pass_known_number_of_result_columns_11: pass_str: select a, b from `d.t` union all select c, d from `d.t` configs: core: dialect: bigquery test_fail_unknown_number_of_result_columns_5: fail_str: select a, b, c from `d.t` union all select c, d from `d.t` configs: core: dialect: bigquery test_pass_known_number_of_result_columns_13: # Issue 915: Crash on statements that don't have a SELECT pass_str: CREATE TABLE my_table (id INTEGER) test_pass_known_number_of_result_columns_14: pass_str: | SELECT * FROM ( WITH cte2 AS ( SELECT a, b FROM table2 ) SELECT * from cte2 as cte_al UNION SELECT e, f FROM table3 ) UNION SELECT k, l FROM table4 test_pass_known_number_of_result_columns_15: pass_str: | SELECT * FROM ( WITH cte2 AS ( SELECT * FROM table2 ) SELECT * from cte2 as cte_al UNION SELECT e, f FROM table3 ) UNION SELECT k, l FROM table4 test_fail_unknown_number_of_result_columns_6: fail_str: | SELECT * FROM ( WITH cte2 AS ( SELECT a FROM table2 ) SELECT * from cte2 as cte_al UNION SELECT e, f FROM table3 ) UNION SELECT k, l FROM table4 test_pass_known_number_of_result_columns_16: pass_str: | SELECT * FROM ( WITH cte2 AS ( SELECT a, b FROM table2 ) SELECT * from cte2 as cte_al ) UNION SELECT e, f FROM table3 test_pass_known_number_of_result_columns_17: pass_str: | SELECT * FROM table1 as table2 UNION SELECT e, f FROM table3 test_fail_known_number_of_result_columns_7: fail_str: | SELECT * FROM ( WITH cte2 AS ( SELECT a FROM table2 ) SELECT * from cte2 as cte_al ) UNION SELECT e, f FROM table3 test_pass_nested_ctes_1: # Test for issue 1984 pass_str: | with a as ( with b as ( select 1 from c ) select * from b ) select * from a union all select k from t2 test_fail_nested_ctes_1: # Test for issue 1984 fail_str: | with a as ( with b as ( select a from c ) select * from b ) select * from a union select a, b from t2 test_cte_reference_outer_2: pass_str: with a as ( select 1 from b ) select * from ( select * from a ) union select 2 from c test_cte_tricky_nesting_3: pass_str: with b as ( select 1 from c ) select * from ( with a as ( select * from b ) select * from a ) union select a from t2 test_nested_and_same_level_ctes_4: pass_str: with a as ( with c as ( select 1 from d ), b as ( select * from c ) select * from b ) select * from a union select k from t2 test_nested_cte_references_outer_5: pass_str: with c as ( select 1 from d ), a as ( with b as ( select * from c ) select * from b ) select * from a union select k from t2 test_pass_join_inside_cte_with_unqualified: pass_str: with cte as ( select * from t1 inner join t2 ) select a, b from cte union select c, d from cte2; test_pass_known_number_of_columns_in_two_join_subqueries: pass_str: select * from ( select a from foo ) t1 inner join ( select b from bar ) t2 union select c, d from t3; test_fail_two_join_subqueries_one_with_unknown_number_of_columns: fail_str: select * from ( select b from foo ) t1 inner join ( select b from bar ) t2 union select c, d, e from t3; test_query_on_snowflake_stage: pass_str: select mycolumn1 from @public.mytable1 union select mycolumn2 from table2 configs: core: dialect: snowflake test_pass_cte_no_select_final_statement: pass_str: WITH mycte AS ( SELECT foo, bar FROM mytable1 ) UPDATE sometable SET sometable.baz = mycte.bar FROM mycte; test_fail_cte_no_select_final_statement: fail_str: UPDATE sometable SET sometable.baz = mycte.bar FROM (SELECT foo, bar FROM mytable1 UNION ALL SELECT bar FROM mytable2) as k test_tsql_select_system_as_identifier: pass_str: | SELECT @@IDENTITY AS 'Identity' configs: core: dialect: tsql sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CP01.yml000066400000000000000000000131741451700765000235250ustar00rootroot00000000000000rule: CP01 test_fail_inconsistent_capitalisation_1: # Test that we don't have the "inconsistent" bug fail_str: SeLeCt 1 fix_str: SELECT 1 test_fail_inconsistent_capitalisation_2: fail_str: SeLeCt 1 from blah fix_str: SELECT 1 FROM blah test_fail_capitalisation_policy_lower: # Fix for https://github.com/sqlfluff/sqlfluff/issues/476 fail_str: SELECT * FROM MOO ORDER BY dt DESC fix_str: select * from MOO order by dt desc configs: rules: capitalisation.keywords: capitalisation_policy: lower test_fail_capitalisation_policy_upper: # Fix for https://github.com/sqlfluff/sqlfluff/issues/476 fail_str: select * from MOO order by dt desc fix_str: SELECT * FROM MOO ORDER BY dt DESC configs: rules: capitalisation.keywords: capitalisation_policy: upper test_fail_capitalisation_policy_capitalise: # Test for capitalised casing fail_str: SELECT * FROM MOO ORDER BY dt DESC fix_str: Select * From MOO Order By dt Desc configs: rules: capitalisation.keywords: capitalisation_policy: capitalise test_fail_date_part_inconsistent_capitalisation: # Test that time unit capitalization is fixed fail_str: SELECT dt + interval 2 day, interval 3 HOUR fix_str: SELECT dt + INTERVAL 2 DAY, INTERVAL 3 HOUR test_fail_date_part_capitalisation_policy_lower: # Test that capitalization policy is applied on time units fail_str: SELECT dt + interval 2 day, interval 3 HOUR fix_str: select dt + interval 2 day, interval 3 hour configs: rules: capitalisation.keywords: capitalisation_policy: lower test_fail_date_part_capitalisation_policy_upper: # Test that capitalization policy is applied on time units fail_str: SELECT dt + interval 2 day, interval 3 HOUR fix_str: SELECT dt + INTERVAL 2 DAY, INTERVAL 3 HOUR configs: rules: capitalisation.keywords: capitalisation_policy: upper test_pass_date_part_consistent_capitalisation: # Test that correctly capitalized time units are left unchanged pass_str: SELECT dt + INTERVAL 2 DAY, INTERVAL 3 HOUR test_pass_data_type_inconsistent_capitalisation: # Test that we don't have the "inconsistent" bug pass_str: CREATE TABLE table1 (account_id bigint); configs: rules: capitalisation.keywords: capitalisation_policy: upper test_pass_bigquery_date: pass_str: SELECT DATE_ADD(date, INTERVAL 5 YEAR) AS display_date configs: core: dialect: bigquery rules: capitalisation.keywords: capitalisation_policy: upper test_pass_ignore_word: pass_str: SeleCT 1 configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words: select test_pass_ignore_words: pass_str: SeleCT 1 configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words: select,from test_pass_ignore_words_regex_simple: pass_str: SeleCT 1 configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words_regex: ^Se test_fail_ignore_words_regex_simple: fail_str: SeleCT 1 FrOM t_table fix_str: SeleCT 1 FROM t_table configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words_regex: ^Se test_pass_ignore_words_complex: pass_str: SeleCT 1 FrOM t_table configs: rules: capitalisation.keywords: capitalisation_policy: upper ignore_words_regex: (^Se|^Fr) test_pass_ignore_templated_code_true: pass_str: | {{ "select" }} a FROM foo WHERE 1 configs: core: ignore_templated_areas: true test_fail_ignore_templated_code_false: fail_str: | {{ "select" }} a FROM foo WHERE 1 fix_str: | {{ "select" }} a from foo where 1 configs: core: ignore_templated_areas: false test_fail_snowflake_group_by_cube: fail_str: | SELECT state, city, sum((s.retail_price - p.wholesale_price) * s.quantity) AS profit FROM products AS p, sales AS s WHERE s.product_id = p.product_id GROUP BY cube (state, city) ORDER BY state, city NULLS LAST ; fix_str: | SELECT state, city, sum((s.retail_price - p.wholesale_price) * s.quantity) AS profit FROM products AS p, sales AS s WHERE s.product_id = p.product_id GROUP BY CUBE (state, city) ORDER BY state, city NULLS LAST ; configs: core: dialect: snowflake rules: capitalisation.keywords: capitalisation_policy: upper test_pass_ignore_null: pass_str: | SELECT null FROM foo WHERE 1 test_pass_ignore_true: pass_str: | SELECT true FROM foo WHERE 1 test_pass_ignore_false: pass_str: | SELECT false FROM foo WHERE 1 test_fail_bigquery_week: fail_str: SELECT LAST_DAY(col, WEEK(monday)) fix_str: SELECT LAST_DAY(col, WEEK(MONDAY)) configs: core: dialect: bigquery rules: capitalisation.keywords: capitalisation_policy: upper test_fail_select_lower: # Test for issue #3399, a bug in the core apply_fixes() function that surfaced # with various rules, including this one. fail_str: | select * FROM {{ source("ids","shop") }} fix_str: | SELECT * FROM {{ source("ids","shop") }} configs: core: dialect: tsql rules: capitalisation.keywords: capitalisation_policy: upper test_fail_select_lower_keyword_functions: # Test for issue #3520 fail_str: | SELECT cast(5 AS int) AS test1, coalesce(1, 2) AS test3 fix_str: | SELECT CAST(5 AS int) AS test1, COALESCE(1, 2) AS test3 configs: core: dialect: tsql rules: capitalisation.keywords: capitalisation_policy: upper sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CP02.yml000066400000000000000000000176611451700765000235330ustar00rootroot00000000000000rule: CP02 test_pass_consistent_capitalisation_1: pass_str: SELECT a, b test_pass_consistent_capitalisation_2: pass_str: SELECT A, B test_pass_consistent_capitalisation_with_null: # Test that NULL is classed as a keyword and not an identifier pass_str: SELECT NULL, a test_pass_consistent_capitalisation_with_single_letter_upper: # Single-letter ambiguity: Upper vs Capitalise pass_str: SELECT A, Boo test_pass_consistent_capitalisation_with_single_word_snake: # Single-word ambiguity: Pascal vs Capitalise pass_str: SELECT Apple, Banana_split test_pass_consistent_capitalisation_with_single_word_pascal: # Single-word ambiguity: Pascal vs Capitalise pass_str: SELECT AppleFritter, Banana test_pass_consistent_capitalisation_with_multiple_words_with_numbers: # Numbers count as part of words so following letter can be upper or lower pass_str: SELECT AppleFritter, Apple123fritter, Apple123Fritter test_pass_consistent_capitalisation_with_leading_underscore: pass_str: SELECT _a, b test_fail_inconsistent_capitalisation_lower_case: # Test that fixes are consistent fail_str: SELECT a, B fix_str: SELECT a, b test_fail_inconsistent_capitalisation_2: fail_str: SELECT B, a fix_str: SELECT B, A # PascalCase tests are based on this comment by @alanmcruickshank: # https://github.com/sqlfluff/sqlfluff/issues/820#issuecomment-787050507 test_pass_consistent_capitalisation_policy_pascal_1: pass_str: SELECT PascalCase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_2: pass_str: SELECT Pascalcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_3: fail_str: SELECT pascalCase fix_str: SELECT PascalCase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_4: pass_str: SELECT PasCalCaSe configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_5: pass_str: SELECT PAscalcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_pass_consistent_capitalisation_policy_pascal_6: # This could be argued as not PascalCase but technically it is # unless we introduce a full dictionarry of words to recognise # where word breaks are (an impossible task!). Also what about # abbreviations (e.g. NASA)? pass_str: SELECT PASCALCASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_pascal_1: fail_str: SELECT pascalcase fix_str: SELECT Pascalcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_pascal_2: fail_str: SELECT pascal_case fix_str: SELECT Pascal_Case configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_pascal_3: # Similar to above, you could argue the fixed string is # Not really Pascal Case, but it's closer than it was! fail_str: SELECT pASCAL_CASE fix_str: SELECT PASCAL_CASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal test_fail_inconsistent_capitalisation_policy_pascal_4: fail_str: SELECT PasCalCase fix_str: SELECT pascalcase configs: rules: capitalisation.identifiers: extended_capitalisation_policy: lower test_fail_consistent_capitalisation_policy_pascal_5: fail_str: SELECT PascalCaseNAME fix_str: SELECT PASCALCASENAME configs: rules: capitalisation.identifiers: extended_capitalisation_policy: upper test_fail_inconsistent_capitalisation_pascal_v_capitalise: # Pascal vs Capitalise fail_str: SELECT AppleFritter, Banana_split fix_str: SELECT AppleFritter, Banana_Split test_pass_policy_unquoted_identifiers_aliases_1: pass_str: SELECT a, B configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_pass_policy_unquoted_identifiers_aliases_2: pass_str: SELECT B, a configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_pass_policy_unquoted_identifiers_aliases_3: # See above commentsin regards to whether this should # really be considered PascalCase (we treat as yes) pass_str: SELECT PASCAL_CASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: pascal unquoted_identifiers_policy: aliases test_pass_policy_unquoted_identifiers_aliases_4: pass_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_aliases_5: fail_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS lower_case fix_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS Lower_Case configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_aliases_6: fail_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS lower_case fix_str: SELECT UPPER_CASE AS PASCALCASE, PascalCase AS LOWER_CASE configs: rules: capitalisation.identifiers: extended_capitalisation_policy: upper unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_aliases_7: fail_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case FROM UPPER_CASE AS UPPER_CASE fix_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case FROM UPPER_CASE AS upper_case configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_aliases_8: fail_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS lower_case FROM lower_case AS lower_case fix_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS Lower_Case FROM lower_case AS Lower_Case configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: aliases test_policy_unquoted_identifiers_column_aliases_1: pass_str: SELECT UPPER_CASE AS low_case, PascalCase AS low_case FROM UPPER_CASE AS UPPER_CASE configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: column_aliases test_policy_unquoted_identifiers_aliases_2: fail_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS lower_case FROM lower_case AS lower_case fix_str: SELECT UPPER_CASE AS PascalCase, PascalCase AS Lower_Case FROM lower_case AS lower_case configs: rules: capitalisation.identifiers: unquoted_identifiers_policy: column_aliases test_pass_ignore_word: pass_str: SELECT A, b configs: rules: capitalisation.identifiers: capitalisation_policy: upper ignore_words: b test_pass_consistent_capitalisation_properties_naked_identifier: pass_str: SHOW TBLPROPERTIES customer (created.by.user) configs: core: dialect: sparksql test_fail_inconsistent_capitalisation_properties_naked_identifier: # Test that fixes are consistent fail_str: SHOW TBLPROPERTIES customer (created.BY.user) fix_str: SHOW TBLPROPERTIES customer (created.by.user) configs: core: dialect: sparksql test_fail_inconsistent_capitalisation_properties_naked_identifier_2: fail_str: SHOW TBLPROPERTIES customer (Created.By.User) fix_str: SHOW TBLPROPERTIES customer (created.by.user) configs: core: dialect: sparksql test_pass_bigquery_safe_does_not_trigger: pass_str: SELECT SAFE.myFunction(1) AS col1 configs: core: dialect: bigquery test_pass_databricks_case_sensitive_property: pass_str: SET spark.databricks.delta.properties.defaults.enableChangeDataFeed = true; configs: core: dialect: databricks sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CP03.yml000066400000000000000000000060631451700765000235260ustar00rootroot00000000000000rule: CP03 # Inconsistent capitalisation of functions test_fail_inconsistent_function_capitalisation_1: fail_str: SELECT MAX(id), min(id) from table fix_str: SELECT MAX(id), MIN(id) from table test_fail_inconsistent_function_capitalisation_2: fail_str: SELECT MAX(id), min(id) from table fix_str: SELECT max(id), min(id) from table configs: rules: capitalisation.functions: extended_capitalisation_policy: lower test_bare_functions: fail_str: SELECT current_timestamp from table fix_str: SELECT CURRENT_TIMESTAMP from table configs: rules: capitalisation.functions: extended_capitalisation_policy: upper test_bare_functions_2: fail_str: SELECT current_timestamp, min(a) from table fix_str: SELECT CURRENT_TIMESTAMP, MIN(a) from table configs: rules: capitalisation.functions: extended_capitalisation_policy: upper test_bare_functions_3: fail_str: SELECT current_timestamp, min(a) from table fix_str: SELECT Current_Timestamp, Min(a) from table configs: rules: capitalisation.functions: extended_capitalisation_policy: pascal test_fail_capitalization_after_comma: fail_str: SELECT FLOOR(dt) ,count(*) FROM test fix_str: SELECT FLOOR(dt) ,COUNT(*) FROM test test_pass_fully_qualified_function_mixed_functions: pass_str: SELECT COUNT(*), project1.foo(value1) AS value2 test_pass_fully_qualified_function_pascal_case: pass_str: SELECT project1.FoO(value1) AS value2 test_pass_ignore_word: pass_str: SELECT MAX(id), min(id) FROM TABLE1 configs: rules: capitalisation.functions: ignore_words: min test_pass_ignore_templated_code_true: pass_str: | SELECT {{ "greatest(a, b)" }}, GREATEST(i, j) configs: core: ignore_templated_areas: true test_fail_ignore_templated_code_false: fail_str: | SELECT {{ "greatest(a, b)" }}, GREATEST(i, j) fix_str: | SELECT {{ "greatest(a, b)" }}, greatest(i, j) configs: core: ignore_templated_areas: false test_pass_func_name_templated_literal_mix: # Issue 3022. This was actually a bug in BaseSegment.iter_patches(). pass_str: SELECT RO(), {{ "t" }}.func() test_pass_ignore_words_regex_simple: pass_str: SELECT MAX(id), f_test_udf(id) FROM TABLE1 configs: rules: capitalisation.functions: ignore_words_regex: ^f_ test_pass_ignore_words_regex_complex: pass_str: SELECT MAX(id), f_test_udf(id), g_test_udf(id) FROM TABLE1 configs: rules: capitalisation.functions: ignore_words_regex: (^f_|^g_) test_pass_ignore_words_regex_bigquery_simple: pass_str: SELECT MAX(id), project.dataset._f_test_udf(id) FROM TABLE1 configs: core: dialect: bigquery rules: capitalisation.functions: ignore_words_regex: ^_f_ test_pass_ignore_words_regex_bigquery_complex: pass_str: SELECT MAX(id), project.dataset._f_test_udf(id), `project.dataset._f_test_udf`(id) FROM TABLE1 configs: core: dialect: bigquery rules: capitalisation.functions: ignore_words_regex: (^_f_|\._f_) sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CP04.yml000066400000000000000000000004211451700765000235170ustar00rootroot00000000000000rule: CP04 test_fail_inconsistent_boolean_capitalisation: fail_str: SeLeCt true, FALSE, NULL fix_str: SeLeCt true, false, null test_pass_ignore_word: pass_str: SELECT true, FALSE, NULL configs: rules: capitalisation.literals: ignore_words: true sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CP05.yml000066400000000000000000000115461451700765000235320ustar00rootroot00000000000000rule: CP05 test_pass_default_consistent_lower: # Test that we don't have the "inconsistent" bug pass_str: | CREATE TABLE distributors ( did integer, name varchar(40), ts time with time zone ); test_pass_default_consistent_upper: # Test that we don't have the "inconsistent" bug pass_str: | CREATE TABLE distributors ( did INTEGER, name VARCHAR(40), ts TIME WITH TIME ZONE ); test_pass_default_consistent_capitalised: # Test that we don't have the "inconsistent" bug pass_str: | CREATE TABLE distributors ( did Integer, name Varchar(40), ts Time With Time Zone ); test_pass_default_consistent_pascal: # Test that we don't have the "inconsistent" bug pass_str: | CREATE TABLE distributors ( did Integer, name VarChar(40), ts Time With Time Zone ); test_fail_data_type_inconsistent_capitalisation_1: # Test that we don't have the "inconsistent" bug fail_str: CREATE TABLE table1 (account_id BiGinT); fix_str: CREATE TABLE table1 (account_id BIGINT); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_fail_data_type_inconsistent_capitalisation_2: fail_str: CREATE TABLE table1 (account_id BiGinT); fix_str: CREATE TABLE table1 (account_id bigint); configs: rules: capitalisation.types: extended_capitalisation_policy: lower test_fail_data_type_inconsistent_capitalisation_3: fail_str: CREATE TABLE table1 (account_id BiGinT); fix_str: CREATE TABLE table1 (account_id Bigint); configs: rules: capitalisation.types: extended_capitalisation_policy: capitalise test_fail_data_type_capitalisation_policy_lower: fail_str: CREATE TABLE table1 (account_id BIGINT); fix_str: CREATE TABLE table1 (account_id bigint); configs: rules: capitalisation.types: extended_capitalisation_policy: lower test_fail_data_type_capitalisation_policy_lower_2: fail_str: CREATE TABLE table1 (account_id BIGINT, column_two varchar(255)); fix_str: CREATE TABLE table1 (account_id bigint, column_two varchar(255)); configs: rules: capitalisation.types: extended_capitalisation_policy: lower test_fail_data_type_capitalisation_policy_upper: fail_str: CREATE TABLE table1 (account_id bigint); fix_str: CREATE TABLE table1 (account_id BIGINT); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_fail_data_type_capitalisation_policy_upper_2: fail_str: CREATE TABLE table1 (account_id BIGINT, column_two varchar(255)); fix_str: CREATE TABLE table1 (account_id BIGINT, column_two VARCHAR(255)); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_fail_data_type_capitalisation_policy_capitalise: # Test for capitalised casing fail_str: CREATE TABLE table1 (account_id BIGINT); fix_str: CREATE TABLE table1 (account_id Bigint); configs: rules: capitalisation.types: extended_capitalisation_policy: capitalise test_fail_data_type_capitalisation_policy_keywords_1: # Test cases where data types are keywords, not data_type_identifiers # See: https://github.com/sqlfluff/sqlfluff/pull/2121 fail_str: CREATE TABLE table1 (account_id BIGINT, column_two timestamp); fix_str: CREATE TABLE table1 (account_id BIGINT, column_two TIMESTAMP); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_fail_data_type_capitalisation_policy_keywords_2: fail_str: CREATE TABLE table1 (account_id BIGINT, column_two timestamp with time zone); fix_str: CREATE TABLE table1 (account_id BIGINT, column_two TIMESTAMP WITH TIME ZONE); configs: rules: capitalisation.types: extended_capitalisation_policy: upper test_pass_sparksql_complex_data_types: pass_str: | CREATE TABLE table_identifier( a STRUCT COMMENT 'col_comment', d MAP COMMENT 'col_comment', e ARRAY COMMENT 'col_comment' ); configs: core: dialect: sparksql rules: capitalisation.types: extended_capitalisation_policy: upper test_pass_bigquery_struct_params: pass_str: | CREATE TEMPORARY FUNCTION getTableInfo(payload STRING) RETURNS STRUCT LANGUAGE js AS ''' return 1 '''; configs: core: dialect: bigquery rules: capitalisation.types: extended_capitalisation_policy: upper # See https://github.com/sqlfluff/sqlfluff/issues/3277 test_pass_typless_structs_dont_trigger_rule: pass_str: | SELECT STRUCT( some_field, some_other_field ) AS col FROM table configs: core: dialect: bigquery rules: capitalisation.types: extended_capitalisation_policy: upper sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV01.yml000066400000000000000000000015321451700765000235260ustar00rootroot00000000000000rule: CV01 test_pass_not_equal_to: pass_str: | SELECT * FROM X WHERE 1 != 2 test_fail_not_equal_to: fail_str: | SELECT * FROM X WHERE 1 <> 2 fix_str: | SELECT * FROM X WHERE 1 != 2 test_less_than_passes: pass_str: | SELECT * FROM X WHERE 1 < 2 test_non_comparison_passes: pass_str: | SELECT col1 AS "alias_<>" FROM X test_fail_not_equal_to_multi: fail_str: | SELECT * FROM X WHERE 1 <> 2 AND 2 <> 1 AND 3 != 1 fix_str: | SELECT * FROM X WHERE 1 != 2 AND 2 != 1 AND 3 != 1 test_pass_not_equal_to_tsql: pass_str: | SELECT * FROM X WHERE 1 ! = 2 configs: core: dialect: tsql test_fail_not_equal_to_tsql: fail_str: | SELECT * FROM X WHERE 1 < -- some comment > 2 fix_str: | SELECT * FROM X WHERE 1 ! -- some comment = 2 configs: core: dialect: tsql sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV02.yml000066400000000000000000000005721451700765000235320ustar00rootroot00000000000000rule: CV02 test_pass_coalesce: pass_str: | SELECT coalesce(foo, 0) AS bar, FROM baz; test_fail_ifnull: fail_str: | SELECT ifnull(foo, 0) AS bar, FROM baz; fix_str: | SELECT COALESCE(foo, 0) AS bar, FROM baz; test_fail_nvl: fail_str: | SELECT nvl(foo, 0) AS bar, FROM baz; fix_str: | SELECT COALESCE(foo, 0) AS bar, FROM baz; sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV03.yml000066400000000000000000000024421451700765000235310ustar00rootroot00000000000000rule: CV03 test_require_pass: pass_str: SELECT a, b, FROM foo configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: require test_require_fail: fail_str: SELECT a, b FROM foo fix_str: SELECT a, b, FROM foo configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: require test_forbid_pass: pass_str: SELECT a, b FROM foo configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: forbid test_forbid_fail: fail_str: SELECT a, b, FROM foo fix_str: SELECT a, b FROM foo configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: forbid test_fail_templated: # NOTE: Check no fix, because it's not safe. fail_str: | SELECT {% for col in ['a', 'b', 'c'] %} {{col}}, {% endfor %} FROM tbl fix_str: | SELECT {% for col in ['a', 'b', 'c'] %} {{col}}, {% endfor %} FROM tbl violations_after_fix: - code: CV03 description: Trailing comma in select statement forbidden line_no: 3 line_pos: 16 name: "convention.select_trailing_comma" configs: rules: convention.select_trailing_comma: select_clause_trailing_comma: forbid sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV04.yml000066400000000000000000000063011451700765000235300ustar00rootroot00000000000000rule: CV04 passes_on_count_star: pass_str: | select foo, count(*) from my_table group by foo passes_on_count_1: pass_str: | select foo, count(1) from my_table group by foo configs: &prefer_count_1 rules: convention.count_rows: prefer_count_1: true changes_count_0_to_count_star: fail_str: | select foo, count(0) from my_table group by foo fix_str: | select foo, count(*) from my_table group by foo passes_on_count_0: pass_str: | select foo, count(0) from my_table group by foo configs: &prefer_count_0 rules: convention.count_rows: prefer_count_0: true passes_on_count_1_if_both_present: pass_str: | select foo, count(1) from my_table group by foo configs: &prefer_both rules: convention.count_rows: prefer_count_0: true prefer_count_1: true changes_to_count_1_if_both_present: fail_str: | select foo, count(*) from my_table group by foo fix_str: | select foo, count(1) from my_table group by foo configs: *prefer_both changes_count_1_to_count_star: fail_str: | select foo, count(1) from my_table group by foo fix_str: | select foo, count(*) from my_table group by foo handles_whitespaces: fail_str: | select foo, count( 1 ) from my_table group by foo fix_str: | select foo, count( * ) from my_table group by foo changes_count_star_to_count_0: fail_str: | select foo, count(*) from my_table group by foo fix_str: | select foo, count(0) from my_table group by foo configs: *prefer_count_0 changes_count_star_to_count_1: fail_str: | select foo, count(*) from my_table group by foo fix_str: | select foo, count(1) from my_table group by foo configs: *prefer_count_1 changes_count_1_to_count_0: fail_str: | select foo, count(1) from my_table group by foo fix_str: | select foo, count(0) from my_table group by foo configs: *prefer_count_0 changes_count_0_to_count_1: fail_str: | select foo, count(0) from my_table group by foo fix_str: | select foo, count(1) from my_table group by foo configs: *prefer_count_1 changes_count_star_to_count_1_handle_new_line: fail_str: | select foo, count( * ) from my_table group by foo fix_str: | select foo, count( 1 ) from my_table group by foo configs: *prefer_count_1 no_false_positive_on_count_col: pass_str: | select foo, count(bar) from my_table no_false_positive_on_expression: pass_str: | select foo, count(1 + 10) from my_table sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV05.yml000066400000000000000000000035231451700765000235340ustar00rootroot00000000000000rule: CV05 test_is_null: pass_str: | SELECT a FROM foo WHERE a IS NULL test_is_not_null: pass_str: | SELECT a FROM foo WHERE a IS NOT NULL test_not_equals_null_upper: fail_str: | SELECT a FROM foo WHERE a <> NULL fix_str: | SELECT a FROM foo WHERE a IS NOT NULL test_not_equals_null_multi_nulls: fail_str: | SELECT a FROM foo WHERE a <> NULL AND b != NULL AND c = 'foo' fix_str: | SELECT a FROM foo WHERE a IS NOT NULL AND b IS NOT NULL AND c = 'foo' test_not_equals_null_lower: fail_str: | SELECT a FROM foo WHERE a <> null fix_str: | SELECT a FROM foo WHERE a is not null test_equals_null_spaces: fail_str: | SELECT a FROM foo WHERE a = NULL fix_str: | SELECT a FROM foo WHERE a IS NULL test_equals_null_no_spaces: fail_str: | SELECT a FROM foo WHERE a=NULL fix_str: | SELECT a FROM foo WHERE a IS NULL test_complex_case_1: fail_str: | SELECT a FROM foo WHERE a = b or (c > d or e = NULL) fix_str: | SELECT a FROM foo WHERE a = b or (c > d or e IS NULL) test_set_clause: pass_str: | UPDATE table1 SET col = NULL WHERE col = "" test_bigquery_set_options: pass_str: | ALTER TABLE table SET OPTIONS (expiration_timestamp = NULL) ; configs: core: dialect: bigquery test_tsql_exec_clause: pass_str: | exec something @param1 = 'blah', @param2 = 'blah', @param3 = null, @param4 = 'blah'; configs: core: dialect: tsql test_tsql_alternate_alias_syntax: pass_str: | select name = null from t configs: core: dialect: tsql test_exclude_constraint: pass_str: | alter table abc add constraint xyz exclude (field WITH =); configs: core: dialect: postgres sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV06.yml000066400000000000000000000233011451700765000235310ustar00rootroot00000000000000rule: CV06 test_pass_semi_colon_same_line_default: pass_str: | SELECT a FROM foo; test_pass_semi_colon_custom_newline: pass_str: | SELECT a FROM foo; configs: rules: convention.terminator: multiline_newline: true test_fail_semi_colon_same_line_custom_newline: fail_str: | SELECT a FROM foo; fix_str: | SELECT a FROM foo ; configs: rules: convention.terminator: multiline_newline: true test_pass_no_semi_colon_default: pass_str: | SELECT a FROM foo test_pass_no_semi_colon_custom_newline: pass_str: | SELECT a FROM foo configs: rules: convention.terminator: multiline_newline: true test_fail_no_semi_colon_custom_require: fail_str: | SELECT a FROM foo fix_str: | SELECT a FROM foo; configs: rules: convention.terminator: require_final_semicolon: true test_fail_no_semi_colon_custom_require_oneline: fail_str: | SELECT a FROM foo fix_str: | SELECT a FROM foo; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_no_semi_colon_custom_require_multiline: fail_str: | SELECT a FROM foo fix_str: | SELECT a FROM foo ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_pass_multi_statement_semi_colon_default: pass_str: | SELECT a FROM foo; SELECT b FROM bar; test_pass_multi_statement_semi_colon_custom_oneline: pass_str: | SELECT a FROM foo; SELECT b FROM bar; test_fail_multi_statement_semi_colon_custom_multiline: fail_str: | SELECT a FROM foo; SELECT b FROM bar; fix_str: | SELECT a FROM foo ; SELECT b FROM bar ; configs: rules: convention.terminator: multiline_newline: true test_pass_multi_statement_no_trailing_semi_colon_default: pass_str: | SELECT a FROM foo; SELECT b FROM bar test_pass_multi_statement_no_trailing_semi_colon_custom_require: fail_str: | SELECT a FROM foo; SELECT b FROM bar fix_str: | SELECT a FROM foo; SELECT b FROM bar; configs: rules: convention.terminator: require_final_semicolon: true test_fail_multi_statement_no_trailing_semi_colon_custom_require_oneline: fail_str: | SELECT a FROM foo; SELECT b FROM bar fix_str: | SELECT a FROM foo; SELECT b FROM bar; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_multi_statement_no_trailing_semi_colon_custom_require_multiline: fail_str: | SELECT a FROM foo; SELECT b FROM bar fix_str: | SELECT a FROM foo ; SELECT b FROM bar ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_space_semi_colon_default: fail_str: | SELECT a FROM foo ; fix_str: | SELECT a FROM foo; test_fail_newline_semi_colon_default: fail_str: | SELECT a FROM foo ; fix_str: | SELECT a FROM foo; test_pass_newline_semi_colon_custom_newline: pass_str: | SELECT a FROM foo ; configs: rules: convention.terminator: multiline_newline: true test_fail_multi_statement_semi_colon_default: fail_str: | SELECT a FROM foo ; SELECT b FROM bar ; fix_str: | SELECT a FROM foo; SELECT b FROM bar; test_fail_multi_statement_semi_colon_custom_require_multiline: fail_str: | SELECT a FROM foo ; SELECT b FROM bar ; fix_str: | SELECT a FROM foo ; SELECT b FROM bar ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_multiple_newlines_semi_colon_custom_require_newline: fail_str: | SELECT a FROM foo ; fix_str: | SELECT a FROM foo ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_final_semi_colon_same_line_inline_comment: fail_str: | SELECT a FROM foo -- inline comment fix_str: | SELECT a FROM foo; -- inline comment configs: rules: convention.terminator: require_final_semicolon: true test_fail_final_semi_colon_same_line_inline_comment_custom_oneline: fail_str: | SELECT a FROM foo -- inline comment fix_str: | SELECT a FROM foo; -- inline comment configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_final_semi_colon_newline_inline_comment_custom_multiline: fail_str: | SELECT a FROM foo -- inline comment fix_str: | SELECT a FROM foo -- inline comment ; configs: rules: convention.terminator: require_final_semicolon: true multiline_newline: true test_fail_same_line_inline_comment: fail_str: | SELECT a FROM foo -- inline comment ; fix_str: | SELECT a FROM foo; -- inline comment test_fail_same_line_multiple_inline_comment: fail_str: | SELECT a FROM foo -- inline comment #1 -- inline comment #2 ; fix_str: | SELECT a FROM foo; -- inline comment #1 -- inline comment #2 test_pass_newline_inline_comment: pass_str: | SELECT a FROM foo -- inline comment ; configs: rules: convention.terminator: multiline_newline: true test_fail_newline_inline_comment: fail_str: | SELECT a FROM foo -- inline comment ; fix_str: | SELECT a FROM foo -- inline comment ; configs: rules: convention.terminator: multiline_newline: true test_fail_newline_multiple_inline_comments_custom_oneline: fail_str: | SELECT a FROM foo -- inline comment #1 -- inline comment #2 ; fix_str: | SELECT a FROM foo; -- inline comment #1 -- inline comment #2 configs: rules: convention.terminator: multiline_newline: true test_fail_newline_multiple_inline_comments_custom_multiline: fail_str: | SELECT a FROM foo -- inline comment #1 -- inline comment #2 ; fix_str: | SELECT a FROM foo -- inline comment #1 ; -- inline comment #2 configs: rules: convention.terminator: multiline_newline: true test_fail_newline_trailing_inline_comment: fail_str: | SELECT a FROM foo ; -- inline comment fix_str: | SELECT a FROM foo -- inline comment ; configs: rules: convention.terminator: multiline_newline: true test_fail_newline_preceding_block_comment_custom_oneline: fail_str: | SELECT foo FROM bar /* multiline comment */ ; fix_str: | SELECT foo FROM bar; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_preceding_block_comment_custom_multiline: fail_str: | SELECT foo FROM bar /* multiline comment */ ; fix_str: | SELECT foo FROM bar ; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_trailing_block_comment: fail_str: | SELECT foo FROM bar; /* multiline comment */ fix_str: | SELECT foo FROM bar ; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_block_comment_semi_colon_before: fail_str: | SELECT foo FROM bar; /* multiline comment */ fix_str: | SELECT foo FROM bar ; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_block_comment_semi_colon_after: fail_str: | SELECT foo FROM bar /* multiline comment */ ; fix_str: | SELECT foo FROM bar ; /* multiline comment */ configs: rules: convention.terminator: multiline_newline: true test_fail_newline_create_table: # https://github.com/sqlfluff/sqlfluff/issues/2268 fail_str: | CREATE TABLE my_table ( id INTEGER ); fix_str: | CREATE TABLE my_table ( id INTEGER ) ; configs: rules: convention.terminator: multiline_newline: true test_fail_newline_create_table_inline_comment: fail_str: | CREATE TABLE my_table ( id INTEGER ); --inline comment fix_str: | CREATE TABLE my_table ( id INTEGER ) --inline comment ; configs: rules: convention.terminator: multiline_newline: true test_fail_whitespace_after_simple_select: fail_str: | SELECT 1 ; fix_str: | SELECT 1; test_fail_whitespace_after_snowflake_set: fail_str: | SET foo = (SELECT foo FROM foo.foo) ; fix_str: | SET foo = (SELECT foo FROM foo.foo); configs: core: dialect: snowflake test_fail_templated_fix_crosses_block_boundary: # The rule wants to move the semicolon to the same line as the SELECT, but # the core linter prevents it because it crosses a template block boundary. fail_str: | {% if True %} SELECT 1 {% else %} SELECT 2 {% endif %} ; configs: rules: convention.terminator: require_final_semicolon: true test_pass_empty_file: pass_str: "" test_pass_empty_file_with_require_final_semicolon: pass_str: "" configs: rules: convention.terminator: require_final_semicolon: true test_pass_file_with_only_comments: pass_str: | -- just an empty file test_pass_file_with_only_comments_with_require_final_semicolon: pass_str: | -- just an empty file configs: rules: convention.terminator: require_final_semicolon: true sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV07.yml000066400000000000000000000035241451700765000235370ustar00rootroot00000000000000rule: CV07 test_pass_no_outer_brackets: pass_str: | SELECT foo FROM bar test_fail_outer_brackets: fail_str: | (SELECT foo FROM bar) fix_str: | SELECT foo FROM bar test_fail_outer_brackets_inner_subquery: fail_str: | (SELECT foo FROM (select * from bar)) fix_str: | SELECT foo FROM (select * from bar) test_pass_set_statement_brackets: pass_str: | (SELECT 1) UNION (SELECT 1) test_pass_no_outer_brackets_tsql: pass_str: | SELECT foo FROM bar configs: core: dialect: tsql test_fail_outer_brackets_tsql: fail_str: | (SELECT foo FROM bar) fix_str: | SELECT foo FROM bar configs: core: dialect: tsql test_fail_outer_brackets_inner_subquery_tsql: fail_str: | (SELECT foo FROM (select * from bar)) fix_str: | SELECT foo FROM (select * from bar) configs: core: dialect: tsql test_pass_begin_end_statement_brackets_tsql: pass_str: | BEGIN (SELECT 1) END configs: core: dialect: tsql test_fail_leading_trailing_whitespace: # This previously caused the post-fix parse check to fail. fail_str: "(\n SELECT\n foo,\n bar,\n baz\n FROM mycte2\n);\n" # Yes, the formatting looks bad, but that's because we're only running CV07 # here. In the real world, other rules will tidy up the formatting. fix_str: "\n SELECT\n foo,\n bar,\n baz\n FROM mycte2\n;\n" test_fail_leading_whitespace_and_comment: fail_str: "( -- This\n SELECT\n foo,\n bar,\n baz\n FROM mycte2\n)\n" # Yes, the formatting looks bad, but that's because we're only running CV07 # here. In the real world, other rules will tidy up the formatting. fix_str: " -- This\n SELECT\n foo,\n bar,\n baz\n FROM mycte2\n\n" sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV08.yml000066400000000000000000000053571451700765000235460ustar00rootroot00000000000000rule: CV08 test_fail_right_join: fail_str: | SELECT foo.col1, bar.col2 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id; test_pass_left_join: pass_str: | SELECT foo.col1, bar.col2 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id; test_pass_inner_join: pass_str: | SELECT foo.col1, bar.col2 FROM bar INNER JOIN foo ON foo.bar_id = bar.id; test_fail_right_and_right_join: fail_str: | SELECT foo.col1, bar.col2, baz.col3 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id RIGHT JOIN baz ON foo.baz_id = baz.id; test_fail_right_and_left_join: fail_str: | SELECT foo.col1, bar.col2, baz.col3 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id LEFT JOIN baz ON foo.baz_id = baz.id; test_fail_right_and_inner_join: fail_str: | SELECT foo.col1, bar.col2, baz.col3 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id INNER JOIN baz ON foo.baz_id = baz.id; test_pass_left_inner_join: pass_str: | SELECT foo.col1, bar.col2, baz.col3 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id INNER JOIN baz ON foo.baz_id = baz.id; test_fail_subquery_right_join: fail_str: | SELECT col1, col2 FROM ( SELECT foo.col1, bar.col2 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id ); test_pass_subquery_left_join: pass_str: | SELECT col1, col2 FROM ( SELECT foo.col1, bar.col2 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id ); test_pass_subquery_inner_join: pass_str: | SELECT col1, col2 FROM ( SELECT foo.col1, bar.col2 FROM bar INNER JOIN foo ON foo.bar_id = bar.id ); test_fail_with_right_join: fail_str: | WITH cte AS ( SELECT foo.col1, bar.col2 FROM foo RIGHT JOIN bar ON foo.bar_id = bar.id ) SELECT col1, col2 FROM cte; test_pass_with_left_join: pass_str: | WITH cte AS ( SELECT foo.col1, bar.col2 FROM bar LEFT JOIN foo ON foo.bar_id = bar.id ) SELECT col1, col2 FROM cte; test_pass_with_inner_join: pass_str: | WITH cte AS ( SELECT foo.col1, bar.col2 FROM bar INNER JOIN foo ON foo.bar_id = bar.id ) SELECT col1, col2 FROM cte; sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV09.yml000066400000000000000000000120371451700765000235400ustar00rootroot00000000000000rule: CV09 test_pass_default_none: pass_str: | SELECT col1 FROM None test_fail_deny_word: fail_str: | SELECT col1 FROM deprecated_table configs: rules: convention.blocked_words: blocked_words: deprecated_table test_fail_deny_word_case_difference1: fail_str: | SELECT col1 FROM deprecated_table configs: rules: convention.blocked_words: blocked_words: Deprecated_Table test_fail_deny_word_case_difference2: fail_str: | SELECT col1 FROM Deprecated_Table configs: rules: convention.blocked_words: blocked_words: deprecated_table test_fail_multiple_deny_words1: fail_str: | SELECT myOldFunction(col1) FROM table1 configs: rules: convention.blocked_words: blocked_words: deprecated_table,myoldFunction test_fail_multiple_deny_words2: fail_str: | SELECT col1 FROM deprecated_table configs: rules: convention.blocked_words: blocked_words: deprecated_table,myoldFunction test_pass_not_complete_match: pass_str: | SELECT col1 FROM deprecated_table1 configs: rules: convention.blocked_words: blocked_words: deprecated_table test_pass_is_comment: pass_str: | -- deprecated_table SELECT col1 FROM new_table configs: rules: convention.blocked_words: blocked_words: deprecated_table test_pass_in_comment: pass_str: | -- This used to use the deprecated_table SELECT col1 FROM new_table configs: rules: convention.blocked_words: blocked_words: deprecated_table test_fail_bool: fail_str: | CREATE TABLE myschema.t1 (a BOOL); configs: core: dialect: exasol rules: convention.blocked_words: blocked_words: bool test_pass_bool: pass_str: | CREATE TABLE myschema.t1 (a BOOLEAN); configs: core: dialect: exasol rules: convention.blocked_words: blocked_words: bool test_pass_bigquery: pass_str: | SELECT * FROM `owner.schema.table_2022_07_01_desktop` configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: ^.*(2022_06_01|2022_05_01).*$ test_fail_bigquery: fail_str: | SELECT * FROM `owner.schema.table_2022_06_01_desktop` configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: ^.*(2022_06_01|2022_05_01).*$ test_fail_bigquery2: fail_str: | SELECT * FROM `owner.schema.table_2022_06_01_desktop` configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: .*(2022_06_01|2022_05_01).* test_fail_bigquery3: fail_str: | SELECT * FROM `owner.schema.table_2022_06_01_desktop` configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: (2022_06_01|2022_05_01) test_pass_comment_word1: pass_str: | SELECT * FROM table1 -- TABLESAMPLE SYSTEM (.05 PERCENT) configs: core: dialect: bigquery rules: convention.blocked_words: blocked_words: TABLESAMPLE test_pass_comment_word2: pass_str: | SELECT * FROM table1 # TABLESAMPLE SYSTEM (.05 PERCENT) configs: core: dialect: bigquery rules: convention.blocked_words: blocked_words: TABLESAMPLE test_pass_comment_word3: pass_str: | SELECT * FROM table1 /* TABLESAMPLE SYSTEM (.05 PERCENT) */ configs: core: dialect: bigquery rules: convention.blocked_words: blocked_words: TABLESAMPLE test_pass_comment_regex1: pass_str: | SELECT * FROM table1 -- TABLESAMPLE SYSTEM (.05 PERCENT) configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: (TABLESAMPLE) test_pass_comment_regex2: pass_str: | SELECT * FROM table1 # TABLESAMPLE SYSTEM (.05 PERCENT) configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: (TABLESAMPLE) test_pass_comment_regex3: pass_str: | SELECT * FROM table1 /* TABLESAMPLE SYSTEM (.05 PERCENT) */ configs: core: dialect: bigquery rules: convention.blocked_words: blocked_regex: (TABLESAMPLE) test_pass_match_source1: pass_str: | SELECT * FROM {{ ref('deprecated_table') }} configs: core: templater: jinja ignore_templated_areas: true rules: convention.blocked_words: blocked_regex: ref\('deprecated_ match_source: true test_pass_match_source2: pass_str: | SELECT * FROM {{ ref('deprecated_table') }} configs: core: templater: jinja ignore_templated_areas: false rules: convention.blocked_words: blocked_regex: ref\('deprecated_ match_source: false test_fail_match_source1: fail_str: | SELECT * FROM {{ ref('deprecated_table') }} configs: core: templater: jinja ignore_templated_areas: false rules: convention.blocked_words: blocked_regex: ref\('deprecated_ match_source: true sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV10.yml000066400000000000000000000224001451700765000235230ustar00rootroot00000000000000rule: CV10 test_fail_result_of_fix_is_valid_bigquery: fail_str: | SELECT "some string", 'some string' fix_str: | SELECT "some string", "some string" configs: core: dialect: bigquery test_fail_result_of_fix_is_valid_hive: fail_str: | SELECT "some string", 'some string' fix_str: | SELECT "some string", "some string" configs: core: dialect: hive test_fail_result_of_fix_is_valid_mysql: fail_str: | SELECT "some string", 'some string' fix_str: | SELECT "some string", "some string" configs: core: dialect: mysql test_fail_result_of_fix_is_valid_sparksql: fail_str: | SELECT "some string", 'some string' fix_str: | SELECT "some string", "some string" configs: core: dialect: sparksql test_pass_preferred_tripple_quotes: pass_str: | SELECT """some_string""" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_alternate_tripple_quotes: fail_str: | SELECT '''some_string''' fix_str: | SELECT """some_string""" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_unnecessary_escaping: fail_str: | SELECT 'unnecessary \"\"escaping', "unnecessary \'\'escaping" fix_str: | SELECT 'unnecessary ""escaping', "unnecessary ''escaping" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_bigquery_string_prefixes: fail_str: | SELECT r'some_string', b'some_string', R'some_string', B'some_string' fix_str: | SELECT r"some_string", b"some_string", R"some_string", B"some_string" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_bigquery_string_prefixes_when_style_is_consistent: fail_str: | SELECT r'some_string', b"some_string" fix_str: | SELECT r'some_string', b'some_string' configs: core: dialect: bigquery test_fail_tripple_quoted_strings_with_quotes_in_them: fail_str: | SELECT """Strings with "" in them""", '''Strings with "" in them''' fix_str: | SELECT """Strings with "" in them""", """Strings with "" in them""" configs: core: dialect: bigquery test_fail_tripple_quoted_strings_dont_remove_escapes_single_quotes: fail_str: | SELECT """Strings escaped quotes \" and \' in them""", '''Strings escaped quotes \" and \' in them''' fix_str: | SELECT '''Strings escaped quotes \" and \' in them''', '''Strings escaped quotes \" and \' in them''' configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: single_quotes test_fail_tripple_quoted_strings_dont_remove_escapes_double_quotes: fail_str: | SELECT """Strings escaped quotes \" and \' in them""", '''Strings escaped quotes \" and \' in them''' fix_str: | SELECT """Strings escaped quotes \" and \' in them""", """Strings escaped quotes \" and \' in them""" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_edge_case_tripple_quoted_string_ending_with_double_quote: # Test that a trailing preferred quote in tripple quote scenario doesn't break fail_str: | SELECT '''Here's a "''', '''Here's a " ''' fix_str: | SELECT '''Here's a "''', """Here's a " """ configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_lots_of_quotes: # Test that we can handle complex quoting scenarios pass_str: | SELECT '\\""', "\\''" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_lots_of_quotes: # Test that we can handle complex quoting scenarios fail_str: | SELECT 'Lots of \\\\\\\\\'quotes\'' fix_str: | SELECT "Lots of \\\\\\\\'quotes'" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_quote_replace_in_raw_strings: # Test that we can handle complex quoting scenarios fail_str: | SELECT r'Tricky "quote', r'Not-so-tricky \"quote' fix_str: | SELECT r'Tricky "quote', r"Not-so-tricky \"quote" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_dollar_quoted_strings_are_ignored: # Test that we dont interfere with dollar quoted strings pass_str: | SELECT 'some string', $$some_other_string$$ configs: core: dialect: postgres rules: convention.quoted_literals: force_enable: true preferred_quoted_literal_style: single_quotes test_pass_date_constructor_strings_are_ignored_1: # Test that we dont interfere with date constructor strings pass_str: | SELECT "quoted string", DATE'some string' test_pass_date_constructor_strings_are_ignored_2: # Test that we dont interfere with date constructor strings pass_str: | SELECT DATE'some string' configs: rules: convention.quoted_literals: force_enable: true preferred_quoted_literal_style: double_quotes test_pass_empty_string: pass_str: | SELECT "" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_empty_string: fail_str: | SELECT '' fix_str: | SELECT "" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_quoted_literals_simple: pass_str: | SELECT "{{ 'a string' }}" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_partially_templated_quoted_literals_simple: fail_str: | SELECT '{{ "a string" }}' configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_quoted_literals_complex: pass_str: | SELECT "this_is_a_lintable_{{ 'string' }}" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_partially_templated_quoted_literals_complex: fail_str: | SELECT 'this_is_a_lintable_{{ "string" }}' configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_quoted_literals_with_multiple_templates: pass_str: | SELECT "this_{{ 'is' }}_{{ 'a_lintable' }}_{{ 'string' }}" configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_partially_templated_quoted_literals_with_multiple_templates: fail_str: | SELECT 'this_{{ "is" }}_{{ "a_lintable" }}_{{ "string" }}' configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_fail_partially_templated_quoted_literals_inside_blocks: fail_str: | SELECT {% if true %} '{{ "another_templated_string" }}' {% endif %} configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_fully_templated_quoted_literals_are_ignored: pass_str: | SELECT {{ "'a_non_lintable_string'" }} configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_literals_are_ignored_when_some_quotes_are_inside_the_template_1: pass_str: | SELECT '{{ "string' FROM table1" }} configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_partially_templated_literals_are_ignored_when_some_quotes_are_inside_the_template_2: pass_str: | {{ "SELECT 'stri" -}}ng' FROM table1 configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes test_pass_prefix_chars_are_correctly_detected_as_unlintable: pass_str: | SELECT r{{ "''" }}, r{{ "'project' FROM table1" }} configs: core: dialect: bigquery rules: convention.quoted_literals: preferred_quoted_literal_style: double_quotes sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/CV11.yml000066400000000000000000000213751451700765000235360ustar00rootroot00000000000000rule: CV11 test_pass_cast: pass_str: | select cast(1 as varchar) as bar from foo; test_pass_casting_operator: pass_str: | select 1::varchar as bar from foo; test_pass_multi_casting_operator: pass_str: | select 1::int::varchar as bar from foo; test_pass_convert: pass_str: | select convert(varchar, 1) as bar from foo; test_pass_3_argument_convert: pass_str: | select convert(varchar, 1, 126) as bar from foo; # maybe someday we can have fixes for cast and convert with comments test_pass_convert_with_comment: pass_str: | select convert( -- convert the value int, /* to an integer */ 1) as bar; test_pass_cast_with_comment: pass_str: | select cast( -- cast the value 1 /* to an integer */ as int) as bar; test_fail_cast_with_comment_when_config_is_set_to_convert: fail_str: | select cast( -- cast the value 1 /* to an integer */ as int) as bar; configs: rules: convention.casting_style: preferred_type_casting_style: convert test_fail_cast_with_comment_when_config_is_set_to_shorthand: fail_str: | select cast( -- cast the value 1 /* to an integer */ as int) as bar; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_3_argument_convert_when_config_is_set_to_cast: fail_str: | select convert(varchar, 1, 126) as bar from foo; configs: rules: convention.casting_style: preferred_type_casting_style: cast test_fail_3_argument_convert_when_config_is_set_to_shorthand: fail_str: | select convert(varchar, 1, 126) as bar from foo; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_inconsistent_type_casting_prior_convert: fail_str: | select convert(int, 1) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select convert(int, 1) as bar, convert(text, convert(int, 100)), convert(text, 10) as coo from foo; test_fail_inconsistent_type_casting_with_comment: fail_str: | select cast(10 as text) as coo, convert( -- Convert the value int, /* to an integer */ 1) as bar, 100::int::text from foo; fix_str: | select cast(10 as text) as coo, convert( -- Convert the value int, /* to an integer */ 1) as bar, cast(cast(100 as int) as text) from foo; test_fail_inconsistent_type_casting_prior_cast: fail_str: | select cast(10 as text) as coo, convert(int, 1) as bar, 100::int::text, from foo; fix_str: | select cast(10 as text) as coo, cast(1 as int) as bar, cast(cast(100 as int) as text), from foo; test_fail_inconsistent_type_casting_prior_cast_3_arguments_convert: fail_str: | select cast(10 as text) as coo, convert(int, 1, 126) as bar, 100::int::text from foo; fix_str: | select cast(10 as text) as coo, convert(int, 1, 126) as bar, cast(cast(100 as int) as text) from foo; test_fail_inconsistent_type_casting_prior_convert_cast_with_comment: fail_str: | select convert(int, 126) as bar, cast( 1 /* cast the value to an integer */ as int) as coo, 100::int::text from foo; fix_str: | select convert(int, 126) as bar, cast( 1 /* cast the value to an integer */ as int) as coo, convert(text, convert(int, 100)) from foo; test_fail_inconsistent_type_casting_prior_shorthand: fail_str: | select 100::int::text, cast(10 as text) as coo, convert(int, 1) as bar from foo; fix_str: | select 100::int::text, 10::text as coo, 1::int as bar from foo; test_fail_inconsistent_type_casting_prior_shorthand_3_arguments_convert: fail_str: | select 100::int::text, convert(int, 1, 126) as bar, cast(10 as text) as coo from foo; fix_str: | select 100::int::text, convert(int, 1, 126) as bar, 10::text as coo from foo; test_fail_inconsistent_type_casting_prior_shorthand_cast_with_comment: fail_str: | select 100::int::text, convert(int, 126) as bar, cast( 1 /* cast the value to an integer */ as int) as coo from foo; fix_str: | select 100::int::text, 126::int as bar, cast( 1 /* cast the value to an integer */ as int) as coo from foo; test_fail_inconsistent_type_casting_when_config_cast: fail_str: | select convert(int, 1) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select cast(1 as int) as bar, cast(cast(100 as int) as text), cast(10 as text) as coo from foo; configs: rules: convention.casting_style: preferred_type_casting_style: cast test_fail_inconsistent_type_casting_3_arguments_convert_when_config_cast: fail_str: | select convert(int, 1, 126) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select convert(int, 1, 126) as bar, cast(cast(100 as int) as text), cast(10 as text) as coo from foo; violations_after_fix: - code: CV11 description: Used type casting style is different from the preferred type casting style. line_no: 2 line_pos: 5 name: "convention.casting_style" configs: rules: convention.casting_style: preferred_type_casting_style: cast test_fail_inconsistent_type_casting_when_config_convert: fail_str: | select convert(int, 1) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select convert(int, 1) as bar, convert(text, convert(int, 100)), convert(text, 10) as coo from foo; configs: rules: convention.casting_style: preferred_type_casting_style: convert test_fail_inconsistent_type_casting_when_config_shorthand: fail_str: | select convert(int, 1) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select 1::int as bar, 100::int::text, 10::text as coo from foo; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_inconsistent_type_casting_3_arguments_convert_when_config_shorthand: fail_str: | select convert(int, 1, 126) as bar, 100::int::text, cast(10 as text) as coo from foo; fix_str: | select convert(int, 1, 126) as bar, 100::int::text, 10::text as coo from foo; violations_after_fix: - code: CV11 description: Used type casting style is different from the preferred type casting style. line_no: 2 line_pos: 5 name: "convention.casting_style" configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_pass_when_dialect_is_teradata: pass_str: | select convert(varchar, 1) as bar from foo; configs: core: dialect: teradata test_fail_parenthesize_expression_when_config_shorthand_from_cast: fail_str: | select id::int, cast(calendar_date||' 11:00:00' as timestamp) as calendar_datetime from foo; fix_str: | select id::int, (calendar_date||' 11:00:00')::timestamp as calendar_datetime from foo; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_parenthesize_expression_when_config_shorthand_from_convert: fail_str: | select id::int, convert(timestamp, calendar_date||' 11:00:00') as calendar_datetime from foo; fix_str: | select id::int, (calendar_date||' 11:00:00')::timestamp as calendar_datetime from foo; configs: rules: convention.casting_style: preferred_type_casting_style: shorthand test_fail_snowflake_semi_structured_cast_4453: # https://github.com/sqlfluff/sqlfluff/issues/4453 fail_str: | select (trim(value:Longitude::varchar))::double as longitude; select col:a.b:c::varchar as bar; fix_str: | select cast((trim(cast(value:Longitude as varchar))) as double) as longitude; select cast(col:a.b:c as varchar) as bar; configs: core: dialect: snowflake rules: convention.casting_style: preferred_type_casting_style: cast sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/JJ01.yml000066400000000000000000000035221451700765000235220ustar00rootroot00000000000000rule: JJ01 test_simple: pass_str: SELECT 1 from {{ ref('foo') }} test_simple_modified: # Test that the plus/minus notation works fine. pass_str: SELECT 1 from {%+ if true -%} foo {%- endif %} test_simple_modified_fail: # Test that the plus/minus notation works fine. fail_str: SELECT 1 from {%+if true-%} {{ref('foo')}} {%-endif%} fix_str: SELECT 1 from {%+ if true -%} {{ ref('foo') }} {%- endif %} test_fail_jinja_tags_no_space: fail_str: SELECT 1 from {{ref('foo')}} fix_str: SELECT 1 from {{ ref('foo') }} test_fail_jinja_tags_multiple_spaces: fail_str: SELECT 1 from {{ ref('foo') }} fix_str: SELECT 1 from {{ ref('foo') }} test_fail_jinja_tags_no_space_2: fail_str: SELECT 1 from {{+ref('foo')-}} fix_str: SELECT 1 from {{+ ref('foo') -}} test_pass_newlines: # It's ok if there are newlines. pass_str: SELECT 1 from {{ ref('foo') }} test_fail_templated_segment_contains_leading_literal: fail_str: | SELECT user_id FROM `{{"gcp_project"}}.{{"dataset"}}.campaign_performance` fix_str: | SELECT user_id FROM `{{ "gcp_project" }}.{{ "dataset" }}.campaign_performance` configs: core: dialect: bigquery test_fail_segment_contains_multiple_templated_slices_last_one_bad: fail_str: CREATE TABLE `{{ "project" }}.{{ "dataset" }}.{{"table"}}` fix_str: CREATE TABLE `{{ "project" }}.{{ "dataset" }}.{{ "table" }}` configs: core: dialect: bigquery test_fail_jinja_tags_no_space_no_content: fail_str: SELECT {{""-}}1 fix_str: SELECT {{ "" -}}1 test_fail_jinja_tags_across_segment_boundaries: fail_str: SELECT a{{-"1 + b"}}2 fix_str: SELECT a{{- "1 + b" }}2 test_pass_python_templater: pass_str: SELECT * FROM hello.{my_table}; configs: core: templater: python templater: python: context: my_table: foo sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT01-alignment.yml000066400000000000000000000105531451700765000255140ustar00rootroot00000000000000rule: LT01 test_excess_space_without_align_alias: fail_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo fix_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo configs: # This is the default config but we're being explicit # here for testing. layout: type: alias_expression: spacing_before: single test_excess_space_with_align_alias: # NOTE: The config here shouldn't move the table alias fail_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS bar fix_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS bar configs: &align_alias layout: type: alias_expression: spacing_before: align align_within: select_clause align_scope: bracketed test_missing_keyword_with_align_alias: fail_str: | SELECT a first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo fix_str: | SELECT a first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo configs: *align_alias test_skip_alias_with_align_alias: fail_str: | SELECT a , b , (a + b) / 2 FROM foo fix_str: | SELECT a, b, (a + b) / 2 FROM foo configs: *align_alias test_excess_space_with_align_alias_wider: # NOTE: The config here SHOULD move the table alias # NOTE: The combined LT01 also fixes the missing space # between `USING` and `(a)`. fail_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS first_table JOIN my_tbl AS second_table USING(a) fix_str: | SELECT a AS first_column, b AS second_column, (a + b) / 2 AS third_column FROM foo AS first_table JOIN my_tbl AS second_table USING (a) configs: &align_alias_wider layout: type: alias_expression: spacing_before: align align_within: select_statement align_scope: bracketed test_align_alias_boundary: # The alias inside the expression shouldn't move. fail_str: | SELECT a AS first_column, (SELECT b AS c) AS second_column fix_str: | SELECT a AS first_column, (SELECT b AS c) AS second_column configs: *align_alias test_align_alias_inline_pass: # The aliases on the same line shouldn't panic. pass_str: SELECT a AS b, c AS d FROM tbl configs: *align_alias test_align_alias_inline_fail: # The aliases on the same line shouldn't panic. fail_str: SELECT a AS b , c AS d FROM tbl fix_str: SELECT a AS b, c AS d FROM tbl configs: *align_alias test_align_multiple_a: # https://github.com/sqlfluff/sqlfluff/issues/4023 fail_str: | CREATE TABLE tbl ( foo VARCHAR(25) NOT NULL, barbar INT NULL ) fix_str: | CREATE TABLE tbl ( foo VARCHAR(25) NOT NULL, barbar INT NULL ) configs: layout: type: data_type: spacing_before: align align_within: create_table_statement column_constraint_segment: spacing_before: align align_within: create_table_statement test_align_multiple_b: # If there are multiple options on the same line, choose the first # to align with (i.e. `not null` rather than `unique`). # https://github.com/sqlfluff/sqlfluff/issues/4023 # https://github.com/sqlfluff/sqlfluff/pull/5238 fail_str: | create table tab ( foo varchar(25) not null, barbar int not null unique ) fix_str: | create table tab ( foo varchar(25) not null, barbar int not null unique ) configs: layout: type: data_type: spacing_before: align align_within: create_table_statement column_constraint_segment: spacing_before: align align_within: create_table_statement sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT01-brackets.yml000066400000000000000000000012411451700765000253260ustar00rootroot00000000000000rule: LT01 test_pass_parenthesis_block_isolated: pass_str: | SELECT * FROM (SELECT 1 AS C1) AS T1; test_pass_parenthesis_block_isolated_template: pass_str: | {{ 'SELECT * FROM (SELECT 1 AS C1) AS T1;' }} configs: core: ignore_templated_areas: false test_fail_parenthesis_block_not_isolated: fail_str: | SELECT * FROM(SELECT 1 AS C1)AS T1; fix_str: | SELECT * FROM (SELECT 1 AS C1) AS T1; test_fail_parenthesis_block_not_isolated_templated: fail_str: | {{ 'SELECT * FROM(SELECT 1 AS C1)AS T1;' }} configs: core: ignore_templated_areas: false test_pass_parenthesis_function: pass_str: | SELECT foo(5) FROM T1; sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT01-commas.yml000066400000000000000000000022151451700765000250110ustar00rootroot00000000000000rule: LT01 test_fail_whitespace_before_comma: fail_str: SELECT 1 ,4 fix_str: SELECT 1, 4 test_fail_whitespace_before_comma_template: fail_str: | {{ 'SELECT 1 ,4' }} configs: core: ignore_templated_areas: false test_pass_errors_only_in_templated_and_ignore: pass_str: | {{ 'SELECT 1 ,4' }}, 5, 6 configs: core: ignore_templated_areas: true test_fail_errors_only_in_non_templated_and_ignore: fail_str: | {{ 'SELECT 1, 4' }}, 5 , 6 fix_str: | {{ 'SELECT 1, 4' }}, 5, 6 configs: core: ignore_templated_areas: true test_pass_single_whitespace_after_comma: pass_str: SELECT 1, 4 test_pass_single_whitespace_after_comma_template: pass_str: | {{ 'SELECT 1, 4' }} configs: core: ignore_templated_areas: false test_fail_multiple_whitespace_after_comma: fail_str: SELECT 1, 4 fix_str: SELECT 1, 4 test_fail_no_whitespace_after_comma: fail_str: SELECT 1,4 fix_str: SELECT 1, 4 test_fail_no_whitespace_after_comma_2: fail_str: SELECT FLOOR(dt) ,count(*) FROM test fix_str: SELECT FLOOR(dt), count(*) FROM test test_pass_bigquery_trailing_comma: pass_str: SELECT 1, 2, sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT01-excessive.yml000066400000000000000000000223111451700765000255270ustar00rootroot00000000000000rule: LT01 test_basic: pass_str: SELECT 1 test_basic_template: pass_str: | {{ 'SELECT 1' }} configs: core: ignore_templated_areas: false test_basic_fix: fail_str: SELECT 1 fix_str: SELECT 1 test_basic_fail_template: fail_str: | {{ 'SELECT 1' }} configs: core: ignore_templated_areas: false test_simple_fix: fail_str: | select 1 + 2 + 3 + 4 -- Comment from foo fix_str: | select 1 + 2 + 3 + 4 -- Comment from foo test_identifier_fix: fail_str: | SELECT [thistable] . [col] FROM [thisdatabase] . [thisschema] . [thistable] fix_str: | SELECT [thistable].[col] FROM [thisdatabase].[thisschema].[thistable] configs: core: dialect: tsql test_comparison_operator_fix: fail_str: | SELECT foo FROM bar WHERE baz > = 10; fix_str: | SELECT foo FROM bar WHERE baz >= 10; configs: core: dialect: tsql test_comparison_operator_pass: pass_str: | SELECT foo FROM bar WHERE baz >= 10; configs: core: dialect: tsql test_casting_operator_fix: fail_str: | SELECT '1' :: INT; fix_str: | SELECT '1'::INT; configs: core: dialect: postgres test_casting_operator_pass: pass_str: | SELECT '1'::INT; configs: core: dialect: postgres test_fix_tsql_spaced_chars: fail_str: | SELECT col1 FROM table1 WHERE 1 > = 1 fix_str: | SELECT col1 FROM table1 WHERE 1 >= 1 configs: core: dialect: tsql # Check CASE Statement parses with newlines properly # See https://github.com/sqlfluff/sqlfluff/issues/2495 test_pass_postgres_case_statement: pass_str: | SELECT a, CASE WHEN 1 THEN 'one' WHEN 2 THEN 'two' ELSE 'other' END AS b FROM test; configs: core: dialect: postgres test_excess_space_cast: fail_str: | select '1' :: INT as id1, '2'::int as id2 from table_a fix_str: | select '1'::INT as id1, '2'::int as id2 from table_a test_redshift_at_time_zone: pass_str: | SELECT date_w_tz[0] AT TIME ZONE 'Etc/UTC' AS bar FROM foo configs: core: dialect: redshift test_pass_snowflake_semi_structured: pass_str: "SELECT to_array(a.b:c) FROM d" configs: core: dialect: snowflake test_fail_snowflake_semi_structured_single: fail_str: | SELECT to_array(a.b : c) as d, e : f : g::string as h FROM j fix_str: | SELECT to_array(a.b:c) as d, e:f:g::string as h FROM j configs: core: dialect: snowflake test_fail_snowflake_semi_structured_multi: fail_str: | SELECT to_array(a.b : c) as d, e : f : g::string as h FROM j fix_str: | SELECT to_array(a.b:c) as d, e:f:g::string as h FROM j configs: core: dialect: snowflake test_pass_bigquery_specific: # Test a selection of bigquery specific spacings work. # Specifically EXCEPT & qualified functions. pass_str: | SELECT * EXCEPT (order_id); SELECT NET.HOST(LOWER(url)) AS host FROM urls; configs: core: dialect: bigquery test_pass_bigquery_specific_arrays_1: # An example of _no whitespace_ after an array type pass_str: | SELECT ARRAY[1, 2, 3] AS floats; configs: core: dialect: bigquery test_pass_bigquery_specific_arrays_2: # An example of _whitespace_ after an array type pass_str: | CREATE TEMPORARY FUNCTION DoSomething(param1 STRING, param2 STRING) RETURNS ARRAY LANGUAGE js AS """Some JS"""; SELECT DoSomething(col1) FROM table1 configs: core: dialect: bigquery test_pass_bigquery_array_function: # Test spacing of Array Generator function brackets pass_str: | SELECT ARRAY(SELECT 1 FROM table1); configs: core: dialect: bigquery test_pass_bigquery_specific_structs: # Test spacing of complex STRUCT brackets pass_str: | create table testing.array_struct_tbl ( address_array_of_nested_structs ARRAY, col2 STRING>> ) configs: core: dialect: bigquery test_pass_bigquery_specific_struct_access: # Test spacing of function access pass_str: | SELECT testFunction(a).b AS field, testFunction(a).* AS wildcard, testFunction(a).b.c AS field_with_field, testFunction(a).b.* AS field_with_wildcard, testFunction(a)[OFFSET(0)].* AS field_with_offset_wildcard, testFunction(a)[SAFE_OFFSET(0)].* AS field_with_safe_offset_wildcard, testFunction(a)[ORDINAL(1)].* AS field_with_ordinal_wildcard, testFunction(a)[ORDINAL(1)].a AS field_with_ordinal_field FROM table1 configs: core: dialect: bigquery test_pass_bigquery_struct_function_no_spaces: # Test struct function does not flag for missing spaces # e.g. doesn't flag `STRUCT()` as should be `STRUCT ()` pass_str: | SELECT TO_JSON(STRUCT()), TO_JSON(STRUCT(1, 2, 3)), STRUCT(1, 2, 3) FROM table1 configs: core: dialect: bigquery test_postgres_datatype: # https://github.com/sqlfluff/sqlfluff/issues/4521 # https://github.com/sqlfluff/sqlfluff/issues/4565 pass_str: | select 1::NUMERIC(3, 1), 2::double precision, '2020-01-01'::timestamp with time zone, 'foo'::character varying, B'10101'::bit(3), B'10101'::bit varying(3), B'10101'::bit varying configs: core: dialect: postgres test_redshift_datatype: pass_str: | select 1::NUMERIC(3, 1), 2::double precision, '2020-01-01'::timestamp with time zone, 'foo'::character varying, 'foo'::character varying(MAX), 'foo'::character varying(255), '10101'::binary varying(6) configs: core: dialect: redshift test_bigquery_datatype: pass_str: | select 1::NUMERIC(3, 1) configs: core: dialect: bigquery test_athena_datatype: pass_str: | select 1::DECIMAL(3, 1), 'foo'::VARCHAR(4), 'bar'::CHAR(3), col1::STRUCT, col2::ARRAY, '2020-01-01'::timestamp with time zone configs: core: dialect: athena test_hive_datatype: pass_str: | select 1::DECIMAL(3, 1), 1::DEC(3, 1), 1::NUMERIC(3, 1), col1::STRUCT, col2::ARRAY, col3::ARRAY[4] configs: core: dialect: hive test_sqlite_datatype: pass_str: | select 1::double precision, 1::DECIMAL(10, 5), 1::unsigned big int, 'foo'::varying character(255), 'foo'::character(20), 'foo'::nvarchar(200) configs: core: dialect: sqlite test_sparksql_datatype: pass_str: | select 1::DECIMAL(3, 1), 1::DEC(3, 1), 1::NUMERIC(3, 1), 'bar'::CHAR(3), col1::STRUCT, col2::ARRAY configs: core: dialect: sparksql test_exasol_datatype: pass_str: | select 1::double precision, 1::DECIMAL(3, 1), 1::NUMERIC(3, 1), 'bar'::VARCHAR(2000 CHAR), col1::INTERVAL DAY(2) TO SECOND(1) configs: core: dialect: exasol test_teradata_datatype: pass_str: | select 1::DECIMAL(3, 1), 1::DEC(3, 1), 1::NUMERIC(3, 1), 'bar'::CHAR(3) configs: core: dialect: teradata test_tsql_datatype: pass_str: | select 1::DECIMAL(3, 1), 1::DEC(3, 1), 1::NUMERIC(3, 1), 'bar'::character varying(3) configs: core: dialect: tsql test_snowflake_match_pattern: # Check that the spacing within the pattern isn't changed. # The MATCH_RECOGNIZE & PATTERN keywords however act as keywords and not as functions # therefore _should_ have a space after them. # See: https://docs.snowflake.com/en/sql-reference/constructs/match_recognize pass_str: | select * from stock_price_history match_recognize ( partition by company order by price_date measures match_number() as match_number, first(price_date) as start_date, last(price_date) as end_date, count(*) as rows_in_sequence, count(row_with_price_decrease.*) as num_decreases, count(row_with_price_increase.*) as num_increases one row per match after match skip to last row_with_price_increase pattern ((A | B){5} C+) define row_with_price_decrease as price < lag(price), row_with_price_increase as price > lag(price) ) order by company, match_number; configs: core: dialect: snowflake test_hive_set_statement: # This should use ColonDelimiter so it shouldn't have spacing around it. pass_str: | set hivevar:cat = "Chloe"; configs: core: dialect: hive test_spark_set_statement: pass_str: | SET -v; configs: core: dialect: sparksql test_clickhouse_system_path: # We shouldn't introduce extra spaces within the path. pass_str: | SYSTEM RELOAD MODEL /model/path; configs: core: dialect: clickhouse sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT01-literals.yml000066400000000000000000000064561451700765000253640ustar00rootroot00000000000000rule: LT01 test_pass_simple_select: pass_str: "SELECT 'foo'" test_pass_expression: # Test that brackets don't trigger it pass_str: "SELECT ('foo' || 'bar') as buzz" test_fail_as: fail_str: | SELECT 'foo'AS bar FROM foo fix_str: | SELECT 'foo' AS bar FROM foo test_fail_expression: fail_str: "SELECT ('foo'||'bar') as buzz" fix_str: "SELECT ('foo' || 'bar') as buzz" test_pass_comma: pass_str: | SELECT col1, 'string literal' AS new_column_literal, CASE WHEN col2 IN ('a', 'b') THEN 'Y' ELSE 'N' END AS new_column_case FROM some_table WHERE col2 IN ('a', 'b', 'c', 'd'); test_pass_semicolon: pass_str: | ALTER SESSION SET TIMEZONE = 'UTC'; configs: core: dialect: snowflake test_pass_bigquery_udf_triple_single_quote: pass_str: | CREATE TEMPORARY FUNCTION a() LANGUAGE js AS ''' CODE GOES HERE '''; configs: core: dialect: bigquery test_pass_bigquery_udf_triple_double_quote: pass_str: | CREATE TEMPORARY FUNCTION a() LANGUAGE js AS """ CODE GOES HERE """; configs: core: dialect: bigquery test_pass_ansi_single_quote: pass_str: "SELECT a + 'b' + 'c' FROM tbl;" test_fail_ansi_single_quote: fail_str: "SELECT a +'b'+ 'c' FROM tbl;" fix_str: "SELECT a + 'b' + 'c' FROM tbl;" test_pass_tsql_unicode_single_quote: pass_str: "SELECT a + N'b' + N'c' FROM tbl;" configs: core: dialect: tsql test_fail_tsql_unicode_single_quote: fail_str: "SELECT a +N'b'+N'c' FROM tbl;" fix_str: "SELECT a + N'b' + N'c' FROM tbl;" configs: core: dialect: tsql test_fail_ansi_unicode_single_quote: fail_str: "SELECT a + N'b' + N'c' FROM tbl;" fix_str: "SELECT a + N 'b' + N 'c' FROM tbl;" configs: core: dialect: ansi test_pass_casting_expression: pass_str: "SELECT my_date = '2022-01-01'::DATE AS is_current FROM t;" test_fail_bigquery_casting: fail_str: "SELECT DATE'2007-01-01';" fix_str: "SELECT DATE '2007-01-01';" configs: core: dialect: bigquery test_fail_teradata_casting_type1: fail_str: "SELECT DATE'2007-01-01' AS the_date;" fix_str: "SELECT DATE '2007-01-01' AS the_date;" configs: core: dialect: teradata test_pass_teradata_casting_type2: fail_str: "SELECT '9999-12-31'(DATE);" fix_str: "SELECT '9999-12-31' (DATE);" configs: core: dialect: teradata test_pass_sparksql_ansi_interval_minus: pass_str: SELECT INTERVAL -'20 15:40:32.99899999' DAY TO SECOND AS col; configs: core: dialect: sparksql test_pass_sparksql_multi_units_interval_minus: pass_str: SELECT INTERVAL 2 HOUR -'3' MINUTE AS col; configs: core: dialect: sparksql test_fail_old_python_test: fail_str: SELECT a +'b'+'c' FROM tbl; fix_str: SELECT a + 'b' + 'c' FROM tbl; violations: - code: LT01 description: Expected single whitespace between binary operator '+' and quoted literal. line_no: 1 line_pos: 11 name: layout.spacing - code: LT01 description: Expected single whitespace between quoted literal and binary operator '+'. line_no: 1 line_pos: 14 name: layout.spacing - code: LT01 description: Expected single whitespace between binary operator '+' and quoted literal. line_no: 1 line_pos: 15 name: layout.spacing sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT01-missing.yml000066400000000000000000000021621451700765000252040ustar00rootroot00000000000000rule: LT01 test_fail_no_space_after_using_clause: fail_str: select * from a JOIN b USING(x) fix_str: select * from a JOIN b USING (x) test_pass_newline_after_using_clause: # Check LT01 passes if there's a newline between pass_str: | select * from a JOIN b USING (x) test_fail_cte_no_space_after_as: # Check fixing of single space rule when space is missing fail_str: WITH a AS(select 1) select * from a fix_str: WITH a AS (select 1) select * from a test_fail_multiple_spaces_after_as: # Check fixing of single space rule on multiple spaces fail_str: WITH a AS (select 1) select * from a fix_str: WITH a AS (select 1) select * from a test_fail_cte_newline_after_as: # Check fixing of replacing newline with space fail_str: | WITH a AS ( select 1 ) select * from a fix_str: | WITH a AS ( select 1 ) select * from a test_fail_cte_newline_and_spaces_after_as: # Check stripping newlines and extra whitespace fail_str: | WITH a AS ( select 1 ) select * from a fix_str: | WITH a AS ( select 1 ) select * from a sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT01-operators.yml000066400000000000000000000054461451700765000255610ustar00rootroot00000000000000rule: LT01 test_pass_brackets: # Test that we don't fail * operators in brackets pass_str: "SELECT COUNT(*) FROM tbl\n\n" test_pass_expression: # Github Bug #207 pass_str: | select field, date(field_1) - date(field_2) as diff from table test_fail_expression: # Github Bug #207 fail_str: | select field, date(field_1)-date(field_2) as diff from table fix_str: | select field, date(field_1) - date(field_2) as diff from table # Check we don't get false alarms with newlines, or sign indicators # ------------------- test_pass_newline_1: pass_str: | SELECT 1 + 2 test_pass_newline_2: pass_str: | SELECT 1 + 2 test_pass_newline_£: pass_str: | SELECT 1 + 2 test_pass_sign_indicators: pass_str: SELECT 1, +2, -4 test_pass_tilde: pass_str: SELECT ~1 # ------------------- fail_simple: fail_str: "SELECT 1+2" fix_str: "SELECT 1 + 2" pass_bigquery_hyphen: # hyphenated table reference should not fail pass_str: SELECT col_foo FROM foo-bar.foo.bar configs: core: dialect: bigquery pass_sparksql_ansi_interval_minus: pass_str: SELECT INTERVAL -'20 15:40:32.99899999' DAY TO SECOND AS col; configs: core: dialect: sparksql test_pass_sparksql_multi_units_interval_minus: pass_str: SELECT INTERVAL -2 HOUR '3' MINUTE AS col; configs: core: dialect: sparksql pass_tsql_assignment_operator: # Test that we fix the outer whitespace but don't add any in between + and =. fail_str: SET @param1+=1 fix_str: SET @param1 += 1 configs: core: dialect: tsql pass_concat_string: pass_str: SELECT 'barry' || 'pollard' test_pass_placeholder_spacing: # Test for spacing issues around placeholders # https://github.com/sqlfluff/sqlfluff/issues/4253 pass_str: | {% set is_dev_environment = true %} SELECT * FROM table WHERE some_col IS TRUE {% if is_dev_environment %} AND created_at >= DATE_SUB(CURRENT_DATE, INTERVAL 7 DAY) {% else %} AND created_at >= DATE_SUB(CURRENT_DATE, INTERVAL 30 DAY) {% endif %} AND TRUE ; fail_bigquery_whitespaces_in_function_reference: fail_str: SELECT dataset . AddFourAndDivide(5, 10) fix_str: SELECT dataset.AddFourAndDivide(5, 10) configs: core: dialect: bigquery pass_bigquery_safe_prefix_function: # SAFE prefix to function calls should not fail pass_str: SELECT SAFE.STRING(JSON '1') configs: core: dialect: bigquery fail_bigquery_safe_prefix_function: # Check that additional whitespaces introduced by # https://github.com/sqlfluff/sqlfluff/issues/4645 # get fixed. fail_str: SELECT SAFE . STRING(JSON '1') fix_str: SELECT SAFE.STRING(JSON '1') configs: core: dialect: bigquery sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT01-trailing.yml000066400000000000000000000014601451700765000253440ustar00rootroot00000000000000rule: LT01 test_fail_trailing_whitespace: fail_str: "SELECT 1 \n" fix_str: "SELECT 1\n" test_fail_trailing_whitespace_on_initial_blank_line: fail_str: " \nSELECT 1 \n" fix_str: "\nSELECT 1\n" test_pass_trailing_whitespace_before_template_code: pass_str: | SELECT {% for elem in ["a", "b"] %} {{ elem }}, {% endfor %} 0 test_fail_trailing_whitespace_and_whitespace_control: fail_str: "{%- set temp = 'temp' -%}\n\nSELECT\n 1, \n 2,\n" fix_str: "{%- set temp = 'temp' -%}\n\nSELECT\n 1,\n 2,\n" test_pass_macro_trailing: pass_str: | {% macro foo(bar) %} {{bar}} {% endmacro %} with base as ( select a, b, {{ foo(1) }} as c from tblb ) select * from tbl sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT02-indent.yml000066400000000000000000001315431451700765000250230ustar00rootroot00000000000000rule: LT02 test_fail_reindent_first_line_1: fail_str: " SELECT 1" fix_str: SELECT 1 violations: - code: LT02 description: First line should not be indented. line_no: 1 line_pos: 1 name: layout.indent test_fail_reindent_first_line_2: # Github Bug #99. Python2 Issues with fixing LT02 fail_str: " select 1 from tbl;" fix_str: select 1 from tbl; test_pass_indentation_of_comments_1: # Github Bug #203 # Comments should be aligned to the following line. pass_str: | SELECT -- Compute the thing (a + b) AS c FROM acceptable_buckets test_pass_indentation_of_comments_2: # Comments should be aligned to the following line. pass_str: | SELECT user_id FROM age_data JOIN audience_size USING (user_id, list_id) -- We LEFT JOIN because blah LEFT JOIN verts USING (user_id) test_fail_tab_indentation: # Using tabs as indents works fail_str: | SELECT a, b FROM my_tbl fix_str: | SELECT a, b FROM my_tbl configs: indentation: indent_unit: tab violations: - code: LT02 description: Expected indent of 1 tabs. line_no: 3 line_pos: 1 name: layout.indent test_pass_indented_joins_default: # Configurable indents work. # a) default pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) test_pass_indented_joins_false: # b) specific pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: false test_pass_indented_joins_true: # c) specific True, but passing pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true test_fail_indented_joins_true_fix: # d) specific True, but failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true violations: - code: LT02 description: Expected indent of 4 spaces. line_no: 3 line_pos: 1 name: layout.indent test_fail_indented_joins_false_fix: # e) specific False, and failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: false test_pass_indented_using_on_default: # Configurable using_on indents work. # 2.a) default pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) test_pass_indented_using_on_true: # 2.b) specific pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true indented_using_on: true test_pass_indented_using_on_false: # 2.c) specific False, but passing pass_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true indented_using_on: false test_fail_indented_using_on_false: # 2.d) specific False, but failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true indented_using_on: false test_fail_indented_joins_using_on_true: # 2.e) specific True, and failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: true indented_using_on: true test_fail_indented_joins_using_on_false: # 2.f) specific false for both, and failing fail_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) fix_str: | SELECT a, b, c FROM my_tbl LEFT JOIN another_tbl USING(a) configs: indentation: indented_joins: false indented_using_on: false test_fail_indented_using_on_merge_statment_default: # indented_using_on also covers MERGE INTO statements fail_str: | MERGE INTO t USING u ON t.a = u.b WHEN MATCHED THEN UPDATE SET a = 1 fix_str: | MERGE INTO t USING u ON t.a = u.b WHEN MATCHED THEN UPDATE SET a = 1 test_pass_indented_using_on_merge_statment_false: # indented_using_on also covers MERGE INTO statements pass_str: | MERGE INTO t USING u ON t.a = u.b WHEN MATCHED THEN UPDATE SET a = 1 configs: indentation: indented_using_on: false test_pass_indented_on_contents_default: # Test indented_on_contents when default (true) pass_str: | SELECT r.a, s.b FROM r JOIN s ON r.a = s.a AND true test_pass_indented_on_contents_true: # Test indented_on_contents when true (default) fail_str: | SELECT r.a, s.b FROM r JOIN s ON r.a = s.a AND true fix_str: | SELECT r.a, s.b FROM r JOIN s ON r.a = s.a AND true configs: indentation: indented_on_contents: true test_pass_indented_on_contents_false: # Test indented_on_contents when false (non-default) pass_str: | SELECT r.a, s.b FROM r JOIN s ON r.a = s.a AND true configs: indentation: indented_on_contents: false test_fail_indented_on_contents_default_fix_a: # Default config for indented_on_contents is true fail_str: | SELECT * FROM t1 JOIN t2 ON true AND true fix_str: | SELECT * FROM t1 JOIN t2 ON true AND true test_fail_indented_on_contents_default_fix_b: # Default config for indented_on_contents is true. # This is an alternate interpretation of untaken indents. fail_str: | SELECT * FROM t1 JOIN t2 ON true AND true fix_str: | SELECT * FROM t1 JOIN t2 ON true AND true test_fail_indented_on_contents_false_fix: fail_str: | SELECT t1.a, t2.b FROM t1 JOIN t2 ON true AND true fix_str: | SELECT t1.a, t2.b FROM t1 JOIN t2 ON true AND true configs: indentation: indented_on_contents: false test_pass_indented_from_with_comment: pass_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) test_fail_indented_from_with_comment_alternate: # This shows the alternative position of comments being allowed. pass_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) test_fail_indented_from_with_comment_fix: # This shows the fix still returns to the primary location. fail_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) fix_str: | SELECT * FROM t1 -- Comment JOIN t2 USING (user_id) test_fail_indented_multi_line_comment: fail_str: | SELECT business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the customer_type FROM global_actions_states fix_str: | SELECT business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the customer_type FROM global_actions_states test_jinja_with_disbalanced_pairs: # The range(3) -%} results in swallowing the \n # N.B. The way LT02 handles this is questionable, # and this test seals in that behaviour. pass_str: | SELECT cohort_month {% for i in range(3) -%} , {{ i }} AS index_{{ i }} {% endfor -%} , TRUE AS overall FROM orders test_fail_attempted_hanger_fix: # Check messy hanger correction. fail_str: | SELECT coalesce(foo, bar) FROM tbl fix_str: | SELECT coalesce( foo, bar ) FROM tbl test_fail_possible_hanger_fix: # Same note as above, but with a messier example. fail_str: | SELECT coalesce(foo, bar) FROM tbl fix_str: | SELECT coalesce( foo, bar ) FROM tbl test_fail_consecutive_hangers: fail_str: | select * from foo where a like 'a%' and b like 'b%' and c like 'c%' and d like 'd%' and e like 'e%' and f like 'f%' fix_str: | select * from foo where a like 'a%' and b like 'b%' and c like 'c%' and d like 'd%' and e like 'e%' and f like 'f%' test_fail_consecutive_hangers_implicit: # NOTE: The allowed implicit indent in the WHERE clause, # but by default they're not enabled. fail_str: | select * from foo where a like 'a%' and b like 'b%' and c like 'c%' and d like 'd%' and e like 'e%' and f like 'f%' fix_str: | select * from foo where a like 'a%' and b like 'b%' and c like 'c%' and d like 'd%' and e like 'e%' and f like 'f%' configs: indentation: allow_implicit_indents: true test_fail_clean_reindent_fix: # A "clean" indent is where the previous line ends with an # indent token (as per this example). We should use the # default approach and indent by 1 step. # NOTE: That because the indent opened before "coalesce" # isn't closed before the end of the line, we force an # additional indent before it. fail_str: | SELECT coalesce( foo, bar) FROM tbl fix_str: | SELECT coalesce( foo, bar ) FROM tbl # https://github.com/sqlfluff/sqlfluff/issues/643 test_pass_indent_snowflake: pass_str: | with source_data as ( select * from {{ source('source_name', 'xxx_yyy_zzz') }} ) select * from source_data configs: core: dialect: snowflake # https://github.com/sqlfluff/sqlfluff/issues/643 test_pass_indent_indent_bigquery: pass_str: | with source_data as ( select * from {{ source('source_name', 'xxx_yyy_zzz') }} ) select * from source_data configs: core: dialect: bigquery test_jinja_indent_templated_table_name_a: fail_str: | -- This file combines product data from individual brands into a staging table {% for product in ['table1', 'table2'] %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} fix_str: | -- This file combines product data from individual brands into a staging table {% for product in ['table1', 'table2'] %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} # Like test_jinja_indent_1_a but "FROM" table not initially # indented. test_jinja_indent_templated_table_name_b: fail_str: | -- This file combines product data from individual brands into a staging table {% for product in ['table1', 'table2'] %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} fix_str: | -- This file combines product data from individual brands into a staging table {% for product in ['table1', 'table2'] %} SELECT brand, country_code, category, name, id FROM {{ product }} {% if not loop.last -%} UNION ALL {%- endif %} {% endfor %} test_jinja_nested_blocks: fail_str: | WITH raw_effect_sizes AS ( SELECT {% for action in ['a'] %} {% if True %} rate_su_{{action}}, {% endif %} {% endfor %} ) SELECT 1 fix_str: | WITH raw_effect_sizes AS ( SELECT {% for action in ['a'] %} {% if True %} rate_su_{{action}}, {% endif %} {% endfor %} ) SELECT 1 # LIMIT, QUALIFY, and WINDOW both indent test_limit_and_qualify_and_window_indent: fail_str: | SELECT a, b FROM my_tbl QUALIFY 1 LIMIT 1 WINDOW some_window AS (PARTITION BY 1) fix_str: | SELECT a, b FROM my_tbl QUALIFY 1 LIMIT 1 WINDOW some_window AS (PARTITION BY 1) configs: core: dialect: bigquery # LIMIT, QUALIFY and WINDOW both acceptable on single line test_limit_and_qualify_and_window_single_line: pass_str: | SELECT a, b FROM my_tbl QUALIFY 1 LIMIT 1 WINDOW some_window AS (PARTITION BY 1) configs: core: dialect: bigquery # By default CTEs should not be indented test_pass_cte: pass_str: | WITH some_cte AS ( SELECT 1 FROM table1 ), some_other_cte AS ( SELECT 1 FROM table1 ) SELECT 1 FROM table1 configs: core: dialect: bigquery # CTEs can be configured to be indented test_fail_indented_cte: fail_str: | WITH some_cte AS ( SELECT 1 FROM table1 ), some_other_cte AS ( SELECT 1 FROM table1 ) SELECT 1 FROM table1 fix_str: | WITH some_cte AS ( SELECT 1 FROM table1 ), some_other_cte AS ( SELECT 1 FROM table1 ) SELECT 1 FROM table1 configs: core: dialect: bigquery indentation: indented_ctes: true # Exasol LUA script test_exasol_script: pass_str: | CREATE OR REPLACE LUA SCRIPT ASCRIPT (APARAM) RETURNS ROWCOUNT AS res = 1 suc = true if not suc then error("ERROR") end return res / configs: core: dialect: exasol test_pass_tsql_else_if: pass_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; configs: core: dialect: tsql test_fail_tsql_else_if: fail_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; fix_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; configs: core: dialect: tsql test_fail_tsql_else_if_successive: fail_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; fix_str: | IF (1 > 1) PRINT 'A'; ELSE IF (2 > 2) PRINT 'B'; ELSE IF (3 > 3) PRINT 'C'; ELSE PRINT 'D'; configs: core: dialect: tsql # TSQL function test_tsql_function: fail_str: | CREATE FUNCTION dbo.isoweek (@DATE datetime) RETURNS int WITH EXECUTE AS CALLER AS BEGIN DECLARE @ISOweek int; SET @ISOweek = DATEPART(wk, @DATE) + 1 - DATEPART(wk, CAST(DATEPART(yy, @DATE) AS char(4)) + '0104'); --Special cases Jan 1-3 may belong to the previous year IF (@ISOweek = 0) SET @ISOweek = dbo.ISOWEEK(CAST(DATEPART(yy, @DATE) - 1 AS char(4) ) + '12' + CAST(24 + DATEPART(day, @DATE) AS char(2))) + 1; --Special case Dec 29-31 may belong to the next year IF ((DATEPART(mm, @DATE) = 12) AND ((DATEPART(dd, @DATE) - DATEPART(dw, @DATE)) >= 28)) SET @ISOweek = 1; RETURN(@ISOweek); END; GO fix_str: | CREATE FUNCTION dbo.isoweek (@DATE datetime) RETURNS int WITH EXECUTE AS CALLER AS BEGIN DECLARE @ISOweek int; SET @ISOweek = DATEPART(wk, @DATE) + 1 - DATEPART(wk, CAST(DATEPART(yy, @DATE) AS char(4)) + '0104'); --Special cases Jan 1-3 may belong to the previous year IF (@ISOweek = 0) SET @ISOweek = dbo.ISOWEEK(CAST( DATEPART(yy, @DATE) - 1 AS char(4) ) + '12' + CAST(24 + DATEPART(day, @DATE) AS char(2))) + 1; --Special case Dec 29-31 may belong to the next year IF ( (DATEPART(mm, @DATE) = 12) AND ((DATEPART(dd, @DATE) - DATEPART(dw, @DATE)) >= 28) ) SET @ISOweek = 1; RETURN(@ISOweek); END; GO configs: core: dialect: tsql test_pass_ignore_templated_whitespace: pass_str: | SELECT c1, {{ " c2" }} FROM my_table configs: core: ignore_templated_areas: false test_fail_ignore_templated_whitespace_1: fail_str: | SELECT c1, d{{ " c2" }} FROM my_table fix_str: | SELECT c1, d{{ " c2" }} FROM my_table configs: core: ignore_templated_areas: false test_fail_ignore_templated_whitespace_2: fail_str: | SELECT c1, d{{ " c2" }} FROM my_table fix_str: | SELECT c1, d{{ " c2" }} FROM my_table configs: core: ignore_templated_areas: false test_fail_ignore_templated_whitespace_3: fail_str: | SELECT c1, d{{ " c2" }} FROM my_table fix_str: | SELECT c1, d{{ " c2" }} FROM my_table configs: core: ignore_templated_areas: false test_pass_ignore_templated_whitespace_4: # Note the newline after c2. This causes "AS other_id" to be on a different # line in templated space, but not raw space. LT02 should ignore lines like # this. pass_str: | SELECT c1, {{ " c2\n" }} AS other_id FROM my_table test_pass_ignore_templated_newline_not_last_line: pass_str: | select * from {{ "\n\nmy_table" }} inner join my_table2 using (id) test_pass_ignore_templated_newline_last_line: pass_str: | select * from {{ "\n\nmy_table" }} test_fail_fix_template_indentation_1: fail_str: | SELECT c1, {{ "c2" }} fix_str: | SELECT c1, {{ "c2" }} test_fail_fix_template_indentation_2: fail_str: | with first_join as ( select {{ "c1" }}, c2 from helper {{ "group by 1" }} ) select * from first_join fix_str: | with first_join as ( select {{ "c1" }}, c2 from helper {{ "group by 1" }} ) select * from first_join test_pass_tsql_update_indent: pass_str: | update Extracts.itt_parm_base set DateF = convert(varchar, @from_date, 112), DateT = convert(varchar, @to_date, 112) configs: core: dialect: tsql test_pass_tsql_declare_indent: fail_str: | DECLARE @prv_qtr_1st_dt DATETIME, @last_qtr INT, @last_qtr_first_mn INT, @last_qtr_yr INT; fix_str: | DECLARE @prv_qtr_1st_dt DATETIME, @last_qtr INT, @last_qtr_first_mn INT, @last_qtr_yr INT; configs: core: dialect: tsql test_pass_tsql_set_indent: pass_str: | SET @prv_qtr_1st_dt = CAST(@last_qtr_yr AS VARCHAR(4)) + '-' + CAST(@last_qtr_first_mn AS VARCHAR(2)) + '-01' configs: core: dialect: tsql test_pass_tsql_set_indent_multiple_params: pass_str: | SET @param1 = 1, @param2 = 2 configs: core: dialect: tsql test_pass_tsql_if_indent: pass_str: | IF 1 > 1 AND 2 < 2 SELECT 1; configs: core: dialect: tsql test_pass_exasol_func_indent: pass_str: | CREATE FUNCTION schem.func ( p1 VARCHAR(6), p2 VARCHAR(10) ) RETURN VARCHAR (20) IS res VARCHAR(20); BEGIN IF p1 IS NOT NULL AND p2 IS NOT NULL THEN IF p1 = 1 THEN res:= 'Hello World'; ELSE IF p2 = 3 THEN res:= 'ABC'; END IF; res:= 'WOHOOOO'; END IF; END IF; RETURN res; END schem.func; / configs: core: dialect: exasol test_fail_fix_exa_func_format: fail_str: | CREATE FUNCTION schem.func ( p1 VARCHAR(6) ) RETURN VARCHAR (20) IS res VARCHAR(20); BEGIN IF p1 = 1 THEN res:= 'Hello World'; END IF; RETURN res; END schem.func; / fix_str: | CREATE FUNCTION schem.func ( p1 VARCHAR(6) ) RETURN VARCHAR (20) IS res VARCHAR(20); BEGIN IF p1 = 1 THEN res:= 'Hello World'; END IF; RETURN res; END schem.func; / configs: core: dialect: exasol test_pass_tsql_index_indent: pass_str: | CREATE UNIQUE INDEX AK_UnitMeasure_Name ON Production.UnitMeasure(Name); configs: core: dialect: tsql test_pass_tsql_statistics_indent: pass_str: | CREATE STATISTICS [stat_ccode] ON [dbo].[CodeValues]([ccode]); configs: core: dialect: tsql test_fail_snowflake_merge_statement: fail_str: | merge into foo.bar as tgt using ( select foo::date as bar from foo.bar where split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$' OR foo IN ('BAR','FOO') ) as src on src.foo = tgt.foo when matched then update set tgt.foo = src.foo ; fix_str: | merge into foo.bar as tgt using ( select foo::date as bar from foo.bar where split(foo, '|')[2] REGEXP '^\\d+\\-\\d+\\-\\d+ \\d+\\:\\d+$' OR foo IN ('BAR','FOO') ) as src on src.foo = tgt.foo when matched then update set tgt.foo = src.foo ; configs: core: dialect: snowflake test_fail_hanging_indents_convert_to_normal_indent: # This takes advantage of new indent treatment in 2.0.x fail_str: | SELECT a.line + (a.with + a.hanging_indent) as actually_not_ok, FROM tbl as a fix_str: | SELECT a.line + ( a.with + a.hanging_indent ) as actually_not_ok, FROM tbl as a test_fail_hanging_indents_fix_mixed_indents: # The tab is removed. fail_str: | SELECT a.line + ( a.something_indented_well + least( a.good_example, -- there is a tab here a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a fix_str: | SELECT a.line + ( a.something_indented_well + least( a.good_example, -- there is a tab here a.bad_example, a.really_bad_example, a.nother_good_example ) ) as some_harder_problems FROM tbl as a test_pass_indented_procedure_parameters: pass_str: | CREATE OR ALTER PROCEDURE some_procedure @param1 int AS SELECT * FROM dbo configs: core: dialect: tsql test_fail_unindented_procedure_parameters: fail_str: | CREATE OR ALTER PROCEDURE someOtherProcedure @param1 nvarchar(100), @param2 nvarchar(20) AS SELECT * FROM dbo fix_str: | CREATE OR ALTER PROCEDURE someOtherProcedure @param1 nvarchar(100), @param2 nvarchar(20) AS SELECT * FROM dbo configs: core: dialect: tsql test_tsql_bubble_up_newline_after_fix: # Tests issue 3303, where an LT02 fix leaves a newline as the final child # segment that has to be "bubbled up" two levels to avoid violating the # _is_code_or_meta() check in core/parser/segments/base.py. fail_str: | create procedure name as begin drop table if exists #something end fix_str: | create procedure name as begin drop table if exists #something end configs: core: dialect: tsql test_tsql_cross_apply_indentation: # Test for behavior in issue #3672 pass_str: | SELECT table1.col, table2.col FROM table1 CROSS APPLY ( VALUES ((1), (2)) ) AS table2(col) INNER JOIN table3 ON table1.col = table3.col; configs: core: dialect: tsql test_tsql_cross_join_indentation: # Test for behavior in issue #3672 pass_str: | SELECT table1.col, table2.col FROM table1 CROSS JOIN table2 INNER JOIN table3 ON table1.col = table3.col; configs: core: dialect: tsql test_tsql_nested_join: # Test for behavior prior to issue #3672 fail_str: | SELECT table1.col, table2.col FROM table1 INNER JOIN table2 INNER JOIN table3 ON table1.col = table2.col AND table1.col = table3.col; fix_str: | SELECT table1.col, table2.col FROM table1 INNER JOIN table2 INNER JOIN table3 ON table1.col = table2.col AND table1.col = table3.col; configs: core: dialect: tsql test_tsql_outer_apply_indentation: # Test for behavior in issue #3685 pass_str: | SELECT table1.* FROM table1 OUTER APPLY table2 INNER JOIN table3 ON table1.col = table3.col configs: core: dialect: tsql test_tsql_outer_apply_indentation_fix: # Test for behavior in issue #3685 fail_str: | SELECT table1.* FROM table1 OUTER APPLY table2 INNER JOIN table3 ON table1.col = table3.col fix_str: | SELECT table1.* FROM table1 OUTER APPLY table2 INNER JOIN table3 ON table1.col = table3.col configs: core: dialect: tsql test_fail_consuming_whitespace_a: # Test that this works even with tags which consume whitespace. fail_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} fix_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} test_fail_consuming_whitespace_b: # Additional test to make sure that crazy things don't happen # with the first newline. fail_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} fix_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} test_pass_consuming_whitespace_stable: # Test for stability in fixes with loops and consuming tags. # https://github.com/sqlfluff/sqlfluff/issues/3185 pass_str: | {% for item in [1, 2] -%} SELECT * FROM some_table {{ 'UNION ALL\n' if not loop.last }} {%- endfor %} test_fail_trailing_comments: # Additional test to make sure that crazy things don't happen # with the first newline. fail_str: | SELECT 1 -- foo -- bar fix_str: | SELECT 1 -- foo -- bar test_fail_case_statement: # Test for issue with case statement indentation: # https://github.com/sqlfluff/sqlfluff/issues/3836 fail_str: | SELECT foo , CASE WHEN 1 = 1 THEN 2 END AS example FROM tbl fix_str: | SELECT foo , CASE WHEN 1 = 1 THEN 2 END AS example FROM tbl configs: indentation: tab_space_size: 2 test_pass_templated_case_statement: # Test for template block in case statement indentation # https://github.com/sqlfluff/sqlfluff/issues/3988 pass_str: | {%- set json_keys = ["a", "b", "c"] -%} with dummy as ( select {% for json_key in json_keys -%} case when 1 = 1 {% if json_key in ["b"] %} then 0 {% else %} then 1 {% endif %} else null end as {{ json_key }}_suffix{% if not loop.last %}, {% endif %} {% endfor %} ) select * from dummy test_pass_jinja_tag_multiline: # Test that jinja block tags which contain newlines # aren't linted, because we can't reliably fix them. # The default fixing routine would only moving the # start of the tag, which is ok but potentially strange. # TODO: At some point we should find a better solution for # this. pass_str: | SELECT 1, {{ "my_jinja_tag_with_odd_indents" }}, 2, {% if True %} 3, -- NOTE: indented because within block {% endif %} 4 test_pass_trailing_inline_noqa: pass_str: | SELECT col1, col2 FROM table1 -- noqa: CV09 test_pass_implicit_indent: # Test for ImplicitIndent. # The theoretical indent between WHERE and "a" is implicit. pass_str: | SELECT * FROM foo WHERE a AND b configs: indentation: allow_implicit_indents: true test_fail_deny_implicit_indent: # Test for ImplicitIndent. # The theoretical indent between WHERE and "a" is implicit. fail_str: | SELECT * FROM foo WHERE a AND b fix_str: | SELECT * FROM foo WHERE a AND b configs: indentation: allow_implicit_indents: false test_pass_templated_newlines: # NOTE: The macro has many newlines in it, # and the calling of it is indented. Check that # this doesn't panic. pass_str: | {% macro my_macro() %} macro + with_newlines {% endmacro %} SELECT {{ my_macro() }} as awkward_indentation FROM foo test_fail_fix_beside_templated: # Check that templated code checks aren't too aggressive. # https://github.com/sqlfluff/sqlfluff/issues/4215 fail_str: | {% if False %} SELECT 1 {% else %} SELECT c FROM t WHERE c < 0 {% endif %} fix_str: | {% if False %} SELECT 1 {% else %} SELECT c FROM t WHERE c < 0 {% endif %} test_pass_block_comment: # Check that subsequent block comment lines are ok to be indented. # https://github.com/sqlfluff/sqlfluff/issues/4224 pass_str: | SELECT /* This comment is unusually indented - and contains - even more indents */ foo FROM bar test_fix_block_comment: # Check other comments are still fixed. # https://github.com/sqlfluff/sqlfluff/issues/4224 fail_str: | SELECT -- bad -- good foo, /* bad */ foo_bad, /* long comment which should keep indent - including this */ good_foo, /* and this this is ok this is NOT ok */ bar FROM tbl fix_str: | SELECT -- bad -- good foo, /* bad */ foo_bad, /* long comment which should keep indent - including this */ good_foo, /* and this this is ok this is NOT ok */ bar FROM tbl test_fail_case_else_end_clause: # Checks linting of missing newline in CASE statement. # More specifically this is a case of a multi-dedent # not being handled properly when one of the indents # it covers is taken, but the other is untaken. # https://github.com/sqlfluff/sqlfluff/issues/4222 fail_str: | select case when a then 'abc' when b then 'def' else 'ghi' end as field, bar from foo fix_str: | select case when a then 'abc' when b then 'def' else 'ghi' end as field, bar from foo test_fail_hard_templated_indents: # Test for consumed initial indents and consumed line indents. # https://github.com/sqlfluff/sqlfluff/issues/4230 # NOTE: We're using a block indentation indicator because the # test query has initial leading whitespace. # https://yaml.org/spec/1.2.2/#8111-block-indentation-indicator fail_str: |2 {%- if true -%} SELECT * FROM {{ "t1" }} {%- endif %} fix_str: |2 {%- if true -%} SELECT * FROM {{ "t1" }} {%- endif %} test_fail_fix_consistency_around_comments: # Check that comments don't make fixes inconsistent. # https://github.com/sqlfluff/sqlfluff/issues/4223 fail_str: | select case when a then b end as foo, case when a -- bar then b end as bar from c fix_str: | select case when a then b end as foo, case when a -- bar then b end as bar from c test_fail_coverage_indent_trough: # This test primarily tests the handling of closing trough indents fail_str: | WITH bar as (SELECT 1 FROM foo) SELECT a FROM bar fix_str: | WITH bar as ( SELECT 1 FROM foo ) SELECT a FROM bar test_pass_combined_comment_impulses: # This tests issue #4252 # https://github.com/sqlfluff/sqlfluff/issues/4252 pass_str: | WITH cte AS ( SELECT * FROM ( SELECT * FROM table WHERE NOT bool_column AND NOT bool_column AND some_column >= 1 -- This is a comment ) ), SELECT * FROM cte ; SELECT * FROM table3 ; test_indented_comment_tsql: # TSQL redefines the block_comment. This checks that is done correctly. # https://github.com/sqlfluff/sqlfluff/issues/4249 pass_str: | /* Author: tester Create date: 2021-03-16 */ SELECT 1 AS a configs: core: dialect: tsql test_pass_join_comment_indents_1: # https://github.com/sqlfluff/sqlfluff/issues/4291 pass_str: | select * from a left join b -- comment on (a.x = b.x) test_pass_join_comment_indents_2: # https://github.com/sqlfluff/sqlfluff/issues/4291 pass_str: | select * from a left join b -- comment on (a.x = b.x) test_comment_effect_indents_default: # https://github.com/sqlfluff/sqlfluff/issues/4294 fail_str: | SELECT * FROM table WHERE TRUE -- comment AND TRUE fix_str: | SELECT * FROM table WHERE TRUE -- comment AND TRUE test_comment_effect_indents_implicit: # https://github.com/sqlfluff/sqlfluff/issues/4294 fail_str: | SELECT * FROM table WHERE TRUE -- comment AND TRUE fix_str: | SELECT * FROM table WHERE TRUE -- comment AND TRUE configs: indentation: allow_implicit_indents: true test_untaken_negative_1: # https://github.com/sqlfluff/sqlfluff/issues/4234 fail_str: | CREATE TABLE mytable AS (SELECT id, user_id FROM another_table ) ; fix_str: | CREATE TABLE mytable AS ( SELECT id, user_id FROM another_table ) ; test_untaken_negative_2: # https://github.com/sqlfluff/sqlfluff/issues/4234 fail_str: | WITH m AS (SELECT firstCol , secondCol FROM dbo.myTable ) SELECT * FROM m fix_str: | WITH m AS ( SELECT firstCol , secondCol FROM dbo.myTable ) SELECT * FROM m test_untaken_negative_implicit: # NOTE: Check that implicit indents don't # apply before single brackets. pass_str: | SELECT * FROM foo WHERE ( a = b ) GROUP BY a configs: indentation: allow_implicit_indents: true test_fail_mixed_tabs_and_spaces: # NOTE: This used to be L002 (rather than L003) fail_str: "SELECT\n \t 1" fix_str: "SELECT\n 1" test_fix_implicit_indents_4467_a: # https://github.com/sqlfluff/sqlfluff/issues/4467 fail_str: | SELECT * FROM d LEFT JOIN l ON d.a = l.a AND d.b = l.b fix_str: | SELECT * FROM d LEFT JOIN l ON d.a = l.a AND d.b = l.b configs: indentation: allow_implicit_indents: true test_fix_implicit_indents_4467_b: # https://github.com/sqlfluff/sqlfluff/issues/4467 pass_str: | SELECT * FROM d LEFT JOIN l ON d.a = l.a AND d.b = l.b configs: indentation: allow_implicit_indents: true tab_space_size: 2 test_fix_macro_indents_4367: # https://github.com/sqlfluff/sqlfluff/issues/4367 fail_str: | {% macro my_macro(col) %} {{ col }} {% endmacro %} SELECT something, {{ my_macro("mycol") }}, something_else FROM mytable fix_str: | {% macro my_macro(col) %} {{ col }} {% endmacro %} SELECT something, {{ my_macro("mycol") }}, something_else FROM mytable test_fix_untaken_positive_4433: # https://github.com/sqlfluff/sqlfluff/issues/4433 fail_str: | CREATE TABLE mytable AS (SELECT id, user_id FROM another_table WHERE TRUE ) ; fix_str: | CREATE TABLE mytable AS ( SELECT id, user_id FROM another_table WHERE TRUE ) ; test_implicit_case_4542: # https://github.com/sqlfluff/sqlfluff/issues/4542 pass_str: | select a, case when b is null then 0 else 1 end as c from my_table; configs: indentation: allow_implicit_indents: true test_indented_joins_4484: # https://github.com/sqlfluff/sqlfluff/issues/4484 pass_str: | select * from table_1 inner join table_2 on table_1.key = table_2.key inner join table_3 on table_2.key = table_3.key configs: indentation: indented_joins: true test_tsql_where_implicit_4559: # https://github.com/sqlfluff/sqlfluff/issues/4559 pass_str: | SELECT t.col1 WHERE t.col2 = 'foo' AND t.col3 = 'bar' configs: core: dialect: tsql indentation: allow_implicit_indents: true test_jinja_nested_tracking: # This tests the caching features of BlockTracker # in the lexer. If that's not functioning properly # the indentation of the nested jinja blocks in this # query will likely fail. pass_str: | SELECT * FROM {% for action in ['a', 'b'] %} {% if loop.first %} {{action}}_var {% else %} JOIN {{action}}_var USING (c, d, e) {% endif %} {% endfor %} test_configure_no_indent_before_then_4589: # THEN can be configured to not be indented pass_str: | SELECT a, CASE WHEN b >= 42 THEN 1 ELSE 0 END AS c FROM some_table configs: core: dialect: ansi indentation: indented_then: false test_bigquery_insert_statement_values_clause: pass_str: | INSERT dataset.inventory (product, quantity) VALUES("top load washer", 10); configs: core: dialect: bigquery test_bigquery_merge_statement_values_clause: fail_str: | MERGE dataset.detailedinventory AS t USING dataset.inventory AS s ON t.product = s.product WHEN NOT MATCHED AND quantity < 20 THEN INSERT (product, quantity, supply_constrained) VALUES (product, quantity, TRUE) WHEN NOT MATCHED THEN INSERT (product, quantity, supply_constrained) VALUES (product, quantity, FALSE); fix_str: | MERGE dataset.detailedinventory AS t USING dataset.inventory AS s ON t.product = s.product WHEN NOT MATCHED AND quantity < 20 THEN INSERT (product, quantity, supply_constrained) VALUES (product, quantity, TRUE) WHEN NOT MATCHED THEN INSERT (product, quantity, supply_constrained) VALUES (product, quantity, FALSE); configs: core: dialect: bigquery test_fail_issue_4680: # NOTE: It doesn't reindent the second clause, but the important # thing is that we don't get an exception. fail_str: | SELECT col1 FROM table WHERE {% if true %} col1 > 1 {% else %} col1 > 0 {% endif %} fix_str: | SELECT col1 FROM table WHERE {% if true %} col1 > 1 {% else %} col1 > 0 {% endif %} test_implicit_indent_when: fail_str: | SELECT col1, CASE WHEN col2 = 1 THEN col2 + 1 END AS col2 FROM table1 fix_str: | SELECT col1, CASE WHEN col2 = 1 THEN col2 + 1 END AS col2 FROM table1 configs: indentation: allow_implicit_indents: true indented_then: false test_implicit_indent_nested_when: fail_str: | SELECT col1, CASE WHEN col2 = 1 THEN CASE WHEN col2 = 2 THEN col2 + 1 END END AS col2 FROM table1 fix_str: | SELECT col1, CASE WHEN col2 = 1 THEN CASE WHEN col2 = 2 THEN col2 + 1 END END AS col2 FROM table1 configs: indentation: allow_implicit_indents: true indented_then: false indented_then_contents: false test_fail_issue_4745: fail_str: | with {% for a in [1, 2, 3] %}{% for b in ['C'] %} {{ b }}_fill_{{ a }} as ( select * from data ), {% endfor %}{% endfor %} select 1 fix_str: | with {% for a in [1, 2, 3] %}{% for b in ['C'] %} {{ b }}_fill_{{ a }} as ( select * from data ), {% endfor %}{% endfor %} select 1 test_pass_trailing_comment_1: # NOTE: This checks that we allow the alternative placement of comments pass_str: | select bar -- comment from foo test_pass_trailing_comment_2: # NOTE: This checks that we allow the alternative placement of comments pass_str: | select bar /* comment with more lines */ from foo test_pass_issue_4582: # https://github.com/sqlfluff/sqlfluff/issues/4582 pass_str: | select a.col /* Multi line comment 1 */ from a /* Multi line comment 2 */ inner join b on a.id = b.id; select a.col /* Single line comment 1 */ from a /* Single line comment 2 */ inner join b on a.id = b.id test_pass_issue_4540: # https://github.com/sqlfluff/sqlfluff/issues/4540 pass_str: | with cte as ( select a from b qualify row_number() over ( partition by a ) = 1 ) select a from cte qualify row_number() over ( partition by a ) = 1; configs: core: dialect: snowflake test_pass_closed_bracketed_implicit: pass_str: | select * from a where (b = a) and (c = d) configs: indentation: allow_implicit_indents: true test_fix_unclosed_bracketed_implicit: fail_str: | select * from a where (b = a and c = d) fix_str: | select * from a where ( b = a and c = d ) configs: indentation: allow_implicit_indents: true test_pass_implicit_where: pass_str: | SELECT a FROM b WHERE c = d AND e = f ; configs: indentation: allow_implicit_indents: true test_pass_templated_join: # See: https://github.com/sqlfluff/sqlfluff/issues/5290 pass_str: | select * from a {% if True %} left join b using(x) {% endif %} test_whitespace_control_issue_5277: # https://github.com/sqlfluff/sqlfluff/issues/5277 fail_str: | WITH a AS ( SELECT * FROM tbl WHERE TRUE {% if True -%} AND b > (SELECT 1 FROM {{ this }}) {%- endif %} ) select * from a fix_str: | WITH a AS ( SELECT * FROM tbl WHERE TRUE {% if True -%} AND b > (SELECT 1 FROM {{ this }}) {%- endif %} ) select * from a test_inconsistent_indent: # In specific circumstances the indentation algorithm can behave # unexpectedly, this is a test case to catch one, where the # fix was unexpected. # https://github.com/sqlfluff/sqlfluff/issues/5277 fail_str: | WITH x AS ( SELECT o.p AS p {% for action in ["a", "b"] %} , n.campaign_count_{{ action }} {% endfor %} FROM o ) SELECT * FROM x fix_str: | WITH x AS ( SELECT o.p AS p {% for action in ["a", "b"] %} , n.campaign_count_{{ action }} {% endfor %} FROM o ) SELECT * FROM x sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT02-tab-space.yml000066400000000000000000000030431451700765000253720ustar00rootroot00000000000000rule: LT02 spaces_pass_default: pass_str: "SELECT\n 1" spaces_fail: fail_str: "SELECT\n 1" fix_str: "SELECT\n\t1" configs: rules: indent_unit: tab spaces_fail_custom_tab_space_size: fail_str: "SELECT\n MAX(\n a\n )" fix_str: "SELECT\n\tMAX(\n\t\ta\n\t)" configs: rules: indent_unit: tab tab_space_size: 2 tabs_fail_default: fail_str: "SELECT\n\t\t1\n" fix_str: "SELECT\n 1\n" tabs_fail_default_set_tab_space_size: fail_str: "SELECT\n\t\t1\n" fix_str: "SELECT\n 1\n" configs: rules: tab_space_size: 2 tabs_pass: pass_str: "SELECT\n\t1" configs: rules: indent_unit: tab indented_comments: pass_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam indented_comments_default_config: fail_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam # The rule will only fix the indent before the select targets. # Here tab indent is replaced with spaces. fix_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam indented_comments_tab_config: fail_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam # The rule will only fix the indent before the select targets. # Here spaces indent is replaced with tab. fix_str: | SELECT a, -- Some comment longer_col -- A lined up comment FROM spam configs: rules: indent_unit: tab sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT03.yml000066400000000000000000000107001451700765000235340ustar00rootroot00000000000000rule: LT03 passes_on_before_default: pass_str: | select a + b from foo fails_on_after_default: fail_str: | select a + b from foo fix_str: | select a + b from foo fails_on_after_default_on_and: fail_str: | select a AND b from foo fix_str: | select a AND b from foo fails_on_after_with_comment_order_preserved: fail_str: | select a AND -- comment1! -- comment2! b from foo fix_str: | select a -- comment1! -- comment2! AND b from foo passes_on_before_explicit: pass_str: | select a + b from foo configs: &operator_after layout: type: binary_operator: line_position: leading comparison_operator: line_position: leading fails_on_after_explicit: fail_str: | select a + b from foo fix_str: | select a + b from foo configs: *operator_after fails_on_after_multi_explicit: fail_str: | SELECT * FROM foo WHERE (g > z) AND ( (a = 'a' AND b = 'b') OR (a = 'a' AND b = 'e') OR (a = 'c' AND b = 'g') OR (a = 'c' AND b = 'e') OR 1 = 1 ); fix_str: | SELECT * FROM foo WHERE (g > z) AND ( (a = 'a' AND b = 'b') OR (a = 'a' AND b = 'e') OR (a = 'c' AND b = 'g') OR (a = 'c' AND b = 'e') OR 1 = 1 ); configs: *operator_after fails_on_before_override: fail_str: | select a + b from foo fix_str: | select a + b from foo configs: &operator_before layout: type: binary_operator: line_position: trailing comparison_operator: line_position: trailing passes_on_after_override: pass_str: | select a + b from foo configs: *operator_before fails_on_before_override_with_comment_order: fail_str: | select a -- comment1! -- comment2! -- comment3! + b from foo fix_str: | select a + -- comment1! -- comment2! -- comment3! b from foo configs: *operator_before fails_on_after_override_with_comment_order: fail_str: | select a + -- comment1! -- comment2! -- comment3! b from foo fix_str: | select a -- comment1! -- comment2! -- comment3! + b from foo configs: *operator_after # Fix the different variations of problematic comments in a leading case. fails_leading_with_comments: fail_str: | SELECT 1 + /* foo */ 2, 1 + -- foo 2, 1 /* foo */ + 2, 1 /* foo */ + -- foo /* foo */ 2 fix_str: | SELECT 1 /* foo */ + 2, 1 -- foo + 2, 1 /* foo */ + 2, 1 /* foo */ -- foo /* foo */ + 2 configs: *operator_after # Fix the different variations of problematic comments in a trailing case. fails_trailing_with_comments: fail_str: | SELECT 1 + /* foo */ 2, 1 -- foo + 2, 1 /* foo */ + 2, 1 -- foo /* foo */ + /* foo */ 2 fix_str: | SELECT 1 + /* foo */ 2, 1 + -- foo 2, 1 + /* foo */ 2, 1 + -- foo /* foo */ /* foo */ 2 configs: *operator_before passes_templated_newline: pass_str: | {% macro binary_literal(expression) %} X'{{ expression }}' {% endmacro %} select * from my_table where a = {{ binary_literal("0000") }} fails_templated_code_non_templated_newline: fail_str: | {% macro binary_literal(expression) %} X'{{ expression }}' {% endmacro %} select * from my_table where a = {{ binary_literal("0000") }} passes_operator_alone_on_line: # Special case: An operator on a line by itself is always okay. pass_str: | SELECT 'asdf' || 'jklm' fixes_tuple_error_issue: # https://github.com/sqlfluff/sqlfluff/issues/4184 # NB: This one isn't fixable. fail_str: | select * from foo where c is not null and -- comment {% if true -%}a >= b and -- comment. {% endif %} true configs: indentation: template_blocks_indent: false sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT04.yml000066400000000000000000000174251451700765000235500ustar00rootroot00000000000000rule: LT04 leading_comma_violations: fail_str: | SELECT a , b FROM c fix_str: | SELECT a, b FROM c leading_comma_violation_with_inline_comment: fail_str: | SELECT a , b -- inline comment , c /* non inline comment */ , d FROM e fix_str: | SELECT a, b, -- inline comment c, /* non inline comment */ d FROM e leading_commas_allowed: pass_str: | SELECT a , b FROM c configs: layout: type: comma: line_position: leading leading_commas_allowed_with_header: pass_str: | SELECT a , b FROM c configs: layout: type: comma: line_position: leading leading_comma_violations_in_with_statement: fail_str: | WITH cte_1 as ( SELECT * FROM table_1 ) , cte_2 as ( SELECT * FROM table_2 ) SELECT * FROM table_3 fix_str: | WITH cte_1 as ( SELECT * FROM table_1 ), cte_2 as ( SELECT * FROM table_2 ) SELECT * FROM table_3 leading_commas_allowed_in_with_statement: pass_str: | WITH cte_1 as ( SELECT * FROM table_1 ) , cte_2 as ( SELECT * FROM table_2 ) SELECT * FROM table_3 configs: layout: type: comma: line_position: leading trailing_comma_violations: fail_str: | SELECT a, b FROM c fix_str: | SELECT a , b FROM c configs: layout: type: comma: line_position: leading trailing_commas_allowed: pass_str: | SELECT a, b FROM c trailing_comma_fixing_removes_extra_whitespace: fail_str: | SELECT field_1 , field_2 ,field_3, field_4, field_5 FROM a fix_str: | SELECT field_1, field_2, field_3, field_4, field_5 FROM a leading_comma_fixing_flows_around_comments: fail_str: | SELECT a.foo -- another comment , a.baz, -- another comment a.bar, -- This is an important comment with awkward line spacing a.foobar /* Which might also be followed by a multiline one */ , a.barfoo FROM a WHERE a.field4 in ( '1', '2', '3' ,'4' ) fix_str: | SELECT a.foo -- another comment , a.baz -- another comment , a.bar -- This is an important comment with awkward line spacing , a.foobar /* Which might also be followed by a multiline one */ , a.barfoo FROM a WHERE a.field4 in ( '1' , '2' , '3' ,'4' ) configs: layout: type: comma: line_position: leading trailing_comma_move_past_several_comment_lines: fail_str: | SELECT COUNT(1) AS campaign_count, state_user_v_peer_open ,business_type -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. ,SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states fix_str: | SELECT COUNT(1) AS campaign_count, state_user_v_peer_open, business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states configs: core: # Set runaway_limit=2 to verify the fix only requires one pass. In an # earlier version, the comma before "SAFE_DIVIDE()" was being moved one # line per pass. Too lazy! runaway_limit: 2 leading_comma_move_past_several_comment_lines: fail_str: | SELECT COUNT(1) AS campaign_count ,state_user_v_peer_open, business_type, -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states fix_str: | SELECT COUNT(1) AS campaign_count ,state_user_v_peer_open , business_type -- The following is the slope of the regression line. Note that CORR (which is the Pearson's correlation -- coefficient is symmetric in its arguments, but since STDDEV_POP(open_rate_su) appears in the -- numerator this is the slope of the regression line considering STDDEV_POP(open_rate_su) to be -- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. , SAFE_DIVIDE(SAFE_MULTIPLY(CORR(open_rate_su, uses_small_subject_line), STDDEV_POP(open_rate_su)), STDDEV_POP(uses_small_subject_line)) FROM global_actions_states configs: core: # Set runaway_limit=2 to verify the fix only requires one pass. In an # earlier version for the trailing comma case, commas were being moved # "through" comment blocks one line per pass. Too lazy! runaway_limit: 2 layout: type: comma: line_position: leading leading_comma_with_templated_column_1: fail_str: | SELECT c1, {{ "c2" }} AS days_since FROM logs fix_str: | SELECT c1 , {{ "c2" }} AS days_since FROM logs configs: layout: type: comma: line_position: leading leading_comma_with_templated_column_2: pass_str: | SELECT c1 , {{ " c2" }} AS days_since FROM logs configs: layout: type: comma: line_position: leading trailing_comma_with_templated_column_1: fail_str: | SELECT {{ "c1" }} , c2 AS days_since FROM logs fix_str: | SELECT {{ "c1" }}, c2 AS days_since FROM logs trailing_comma_with_templated_column_2: pass_str: | SELECT {{ "c1 " }}, c2 AS days_since FROM logs leading_comma_fix_mixed_indent: # See: https://github.com/sqlfluff/sqlfluff/issues/4255 # NOTE: Undisturbed mixed indent. fail_str: | select B ,C from A fix_str: | select B, C from A configs: layout: type: comma: line_position: trailing trailing_comma_fix_mixed_indent: # See: https://github.com/sqlfluff/sqlfluff/issues/4255 # NOTE: Undisturbed mixed indent. fail_str: | select B, C from A fix_str: | select B , C from A configs: layout: type: comma: line_position: leading sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT05.yml000066400000000000000000000566011451700765000235500ustar00rootroot00000000000000rule: LT05 test_pass_line_too_long_config_override: # Long lines (with config override) pass_str: "SELECT COUNT(*) FROM tbl\n" configs: core: max_line_length: 30 test_fail_line_too_long_with_comments_1: # Check we move comments correctly fail_str: "SELECT 1 -- Some Comment\n" fix_str: "-- Some Comment\nSELECT 1\n" configs: core: max_line_length: 18 test_fail_line_too_long_with_comments_1_after: # Check we move comments correctly fail_str: "SELECT 1 -- Some Comment\n" fix_str: "SELECT 1\n-- Some Comment\n" configs: core: max_line_length: 18 indentation: trailing_comments: after test_fail_line_too_long_with_comments_1_no_newline: # Check we move comments correctly, and that it # still works when there isn't a trailing newline. # https://github.com/sqlfluff/sqlfluff/issues/4386 fail_str: "SELECT 1 -- Some Comment" fix_str: "-- Some Comment\nSELECT 1" configs: core: max_line_length: 18 test_fail_line_too_long_with_comments_2: # Check we can add newlines after dedents (with an indent). # NOTE: That for LT05, we don't repair the initial indent # but that the following lines will be fixed as though it # has been corrected. Ideally LT02 would have been run _first_ # on this file. fail_str: " SELECT COUNT(*) FROM tbl\n" fix_str: " SELECT COUNT(*)\nFROM tbl\n" configs: core: max_line_length: 20 test_fail_line_too_long_with_comments_3: # Check priority of fixes fail_str: "SELECT COUNT(*) FROM tbl -- Some Comment\n" fix_str: "-- Some Comment\nSELECT COUNT(*)\nFROM tbl\n" configs: core: max_line_length: 18 test_fail_line_too_long_with_comments_4: # In this case, the inline comment is NOT on a line by itself (note the # leading comma), but even if we move it onto a line by itself, it's still # too long. In this case, the rule should do nothing, otherwise it triggers # an endless cycle of "fixes" that simply keeps adding blank lines. fail_str: | SELECT c1 ,-- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. c2 configs: core: max_line_length: 80 test_pass_line_too_long_with_comments_ignore_comment_lines: # Same case as above, but should pass as ignore_comment_lines is set to true pass_str: | SELECT c1 ,-- the "y variable" and uses_small_subject_line to be the "x variable" in terms of the regression line. c2 configs: core: max_line_length: 80 rules: layout.long_lines: ignore_comment_lines: true test_fail_line_too_long_only_comments: # Check long lines that are only comments are linted correctly fail_str: "-- Some really long comments on their own line\n\nSELECT 1" configs: core: max_line_length: 18 test_fail_line_too_long_handling_indents: # Check we handle indents nicely fail_str: "SELECT 12345\n" fix_str: "SELECT\n 12345\n" configs: core: max_line_length: 10 test_pass_line_too_long_ignore_comments_true: # Check we can ignore comments if we want pass_str: "SELECT 1\n-- Some long comment over 10 characters\n" configs: core: max_line_length: 10 rules: layout.long_lines: ignore_comment_lines: true test_pass_line_too_long_ignore_comments_false: # Check we still pick up long comments if we don't want to ignore fail_str: "SELECT 1\n-- Some long comment over 10 characters\n" configs: core: max_line_length: 10 rules: layout.long_lines: ignore_comment_lines: false test_compute_line_length_before_template_expansion_1: # Line 3 is fine before expansion. Too long after expansion is NOT considered # a violation. pass_str: | SELECT user_id FROM `{{bi_ecommerce_orders}}` {{table_at_job_start}} configs: core: dialect: bigquery templater: jinja: context: table_at_job_start: FOR SYSTEM_TIME AS OF CAST('2021-03-02T01:22:59+00:00' AS TIMESTAMP) bi_ecommerce_orders: bq-business-intelligence.user.ecommerce_orders test_compute_line_length_before_template_expansion_2: # Line 3 is too long before expansion. It's fine after expansion, but the rule # does not look at that. fail_str: | SELECT user_id FROM `{{bi_ecommerce_orders_bi_ecommerce_orders}}` AS {{table_alias_table_alias_table_alias_table_alias_table_alias_table_alias}} fix_str: | SELECT user_id FROM `{{bi_ecommerce_orders_bi_ecommerce_orders}}` AS {{table_alias_table_alias_table_alias_table_alias_table_alias_table_alias}} violations_after_fix: # Even after fixing, the final line is still too long. - description: Line is too long (86 > 80). line_no: 4 line_pos: 9 name: layout.long_lines configs: core: dialect: bigquery templater: jinja: context: bi_ecommerce_orders_bi_ecommerce_orders: bq-business-intelligence.user.ecommerce_orders table_alias_table_alias_table_alias_table_alias_table_alias_table_alias: t test_long_jinja_comment: fail_str: | SELECT * {# comment #} {# ........................................................................... #} FROM table configs: core: max_line_length: 80 rules: layout.long_lines: ignore_comment_lines: false test_long_jinja_comment_ignore: # A Jinja comment is a comment. pass_str: | SELECT * {# comment #} {# ........................................................................... #} FROM table configs: core: max_line_length: 80 rules: layout.long_lines: ignore_comment_lines: true test_for_loop: # A Jinja for loop pass_str: | {% for elem in 'foo' %} SELECT '{{ elem }}' FROM table1; SELECT '{{ elem }}' FROM table2; {% endfor %} test_for_loop_repeating_elements_starts_with_literal: # A Jinja for loop with repeating elements (that are difficult to match) # but starting with a literal that can be used to match pass_str: | {% set elements = 'foo' %} SELECT CASE {% for elem in elements %} WHEN '{{ elem }}' = '' THEN 1 WHEN '{{ elem }}' = '' THEN 1 {% endfor %} END test_for_loop_starting_with_templated_piece: # A Jinja for loop starting with non-literals # But unique parts can be used to match pass_str: | {% set elements = 'foo' %} {% set when = 'WHEN' %} SELECT CASE {% for elem in elements %} {{ when }} '{{ elem }}' = '' THEN 1 {{ when }} '{{ elem }}' = '' THEN 2 {% endfor %} END test_for_loop_fail_complex_match: # A Jinja for loop starting with non-literals # But non-unique parts which therefore cannot # be used to match pass_str: | {% set elements = 'foo' %} {% set when = 'WHEN' %} SELECT CASE {% for elem in elements %} {{ when }} '{{ elem }}' = '' THEN 1 {{ when }} '{{ elem }}' = '' THEN 1 {% endfor %} END test_for_loop_fail_simple_match: # If for loop only contains literals it should still pass pass_str: | {% set elements = 'foo' %} SELECT CASE {% for elem in elements %} WHEN 'f' THEN a {% endfor %} END test_set_statement: # A Jinja set statement pass_str: | {% set statement = "SELECT 1 from table1;" %} {{ statement }}{{ statement }} configs: core: max_line_length: 80 test_issue_1666_line_too_long_unfixable_jinja: # Note the trailing space at the end of line 1. This is a necessary part of # the test, because the space (which is passed through to the output) was # "tricking" LT05 into trying to split the line, then encountering an internal # error. fail_str: "{{ config (schema='bronze', materialized='view', sort =['id','number'], dist = 'all', tags =['longlonglonglonglong']) }} \n\nselect 1\n" test_fail_ignore_comment_clauses_1: # Too long, comment clause not ignored fail_str: | CREATE OR REPLACE TABLE mytable ( col1 NUMBER COMMENT 'col1 comment', col2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length', col3 NUMBER COMMENT 'col3 comment' ) test_fail_ignore_comment_clauses_2: # Too long even after ignoring comment clause fail_str: | CREATE OR REPLACE TABLE mytable ( col1 NUMBER COMMENT 'col1 comment', colaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbcccccccccccccccddddddddddddddddeeeeeeeeeeeeeee2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length', col3 NUMBER COMMENT 'col3 comment' ) configs: rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses: pass_str: | CREATE OR REPLACE TABLE mytable ( col1 NUMBER COMMENT 'col1 comment', col2 BOOLEAN COMMENT 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length', col3 NUMBER COMMENT 'col3 comment' ) configs: rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses_teradata: pass_str: | comment on table sandbox_db.Org_Descendant is 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length'; configs: core: dialect: teradata rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses_exasol: pass_str: | CREATE TABLE IF NOT EXISTS SCHEM.TAB ( ID DECIMAL(18, 0) IDENTITY CONSTRAINT PRIMARY KEY DISABLE COMMENT IS 'without constraint name' ) COMMENT IS 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length'; configs: core: dialect: exasol rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses_snowflake: pass_str: | CREATE TABLE foo_table (bar INTEGER) COMMENT = 'very long comment line, exceeding max_line_lengthvery long comment line, exceeding max_line_length very long comment line, exceeding max_line_length very long comment line, exceeding max_line_length' configs: core: dialect: snowflake rules: layout.long_lines: ignore_comment_clauses: true test_pass_ignore_comment_clauses_postgres: pass_str: | CREATE TABLE IF NOT EXISTS foo ( id UUID DEFAULT uuid_generate_v4() PRIMARY KEY, name TEXT NOT NULL ); COMMENT ON TABLE foo IS 'Windows Phone 8, however, was never able to overcome a long string of disappointments for Microsoft. '; configs: core: dialect: postgres rules: layout.long_lines: ignore_comment_clauses: true test_fail_templated_comment_line: fail_str: | SELECT * {# ........................................................................... #} FROM table configs: templater: jinja: context: {} test_pass_ignore_templated_comment_lines: # NOTE: This is potentially a behaviour change in 2.0.0. # This was erroneously using the `ignore_comment_clauses` # config when this query contains no comment clauses. pass_str: | SELECT * {# ........................................................................... #} FROM table configs: rules: layout.long_lines: ignore_comment_lines: true templater: jinja: context: {} test_fail_operator_precedence_1: # Make sure we split at the + operator. fail_str: | select ISNULL(count, '0') * 10000 + ISNULL(planned, 100) from blah fix_str: | select ISNULL(count, '0') * 10000 + ISNULL(planned, 100) from blah configs: core: max_line_length: 30 test_fail_operator_precedence_2: # Make sure we split at the AND operator. fail_str: | select recommendation_list[ORDINAL(1)] = 'uses_small_subject_line' AND uses_small_subject_line != CAST(effect_size_list[ORDINAL(1)] AS FLOAT64) from blah fix_str: | select recommendation_list[ORDINAL(1)] = 'uses_small_subject_line' AND uses_small_subject_line != CAST(effect_size_list[ORDINAL(1)] AS FLOAT64) from blah configs: core: max_line_length: 120 test_fail_operator_precedence_3: # Stretching cases for operators and comma fail_str: | select a, b + c, long_name + long_name * long_name - long_name as foo, long_name AND long_name OR long_name OR long_name as bar from blah fix_str: | select a, b + c, long_name + long_name * long_name - long_name as foo, long_name AND long_name OR long_name OR long_name as bar from blah configs: core: max_line_length: 30 test_pass_long_multiline_jinja: # None of the lines are longer than 30 # but the whole tag is. It shouldn't # cause issues. pass_str: | select {{ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 }} from blah configs: core: max_line_length: 30 test_fail_long_inline_statement: # Tests that breaks happen between clauses properly fail_str: | select distinct a + b from c join d using (e) where f = g and h = i order by j fix_str: | select distinct a + b from c join d using (e) where f = g and h = i order by j configs: core: max_line_length: 50 test_pass_check_off_1: # Tests that we can disable the check (using 0). pass_str: | select my_really_really_really_really_really_really_really_really_really_really_really_long_var from tbl configs: core: max_line_length: 0 test_pass_check_off_2: # Tests that we can disable the check (using -1). pass_str: | select my_really_really_really_really_really_really_really_really_really_really_really_long_var from tbl configs: core: max_line_length: -1 test_comment_move_mid_query: fail_str: | select my_long_long_line as foo -- with some comment from foo fix_str: | select -- with some comment my_long_long_line as foo from foo configs: core: max_line_length: 40 test_fix_implicit_indent: # Test for ImplicitIndent. # The theoretical indent between WHERE and "a" is implicit. fail_str: | SELECT CASE WHEN longer_and_longer AND much_much_much_longer THEN longer_and_longer AND much_much_much_longer ELSE longer_and_longer AND much_much_much_longer END as foobar, CASE WHEN a THEN b END as bar FROM foo WHERE a_really_long_field AND a_nother_really_long_field HAVING a_really_long_field AND a_nother_really_long_field fix_str: | SELECT CASE WHEN longer_and_longer AND much_much_much_longer THEN longer_and_longer AND much_much_much_longer ELSE longer_and_longer AND much_much_much_longer END as foobar, CASE WHEN a THEN b END as bar FROM foo WHERE a_really_long_field AND a_nother_really_long_field HAVING a_really_long_field AND a_nother_really_long_field configs: core: max_line_length: 45 indentation: allow_implicit_indents: true test_fix_no_implicit_indent: # Test explicitly preventing implicit indents. fail_str: | SELECT CASE WHEN longer_and_longer AND much_much_much_longer THEN longer_and_longer AND much_much_much_longer ELSE longer_and_longer AND much_much_much_longer END as foobar, CASE WHEN a THEN b END as bar FROM foo WHERE a_really_long_field AND a_nother_really_long_field HAVING a_really_long_field AND a_nother_really_long_field fix_str: | SELECT CASE WHEN longer_and_longer AND much_much_much_longer THEN longer_and_longer AND much_much_much_longer ELSE longer_and_longer AND much_much_much_longer END as foobar, CASE WHEN a THEN b END as bar FROM foo WHERE a_really_long_field AND a_nother_really_long_field HAVING a_really_long_field AND a_nother_really_long_field configs: core: max_line_length: 45 indentation: allow_implicit_indents: false test_fix_window_function: # https://github.com/sqlfluff/sqlfluff/issues/4292 fail_str: | select * from t qualify a = coalesce( first_value(iff(b = 'none', null, a)) ignore nulls over (partition by c order by d desc), first_value(a) respect nulls over (partition by c order by d desc) ) fix_str: | select * from t qualify a = coalesce( first_value( iff(b = 'none', null, a) ) ignore nulls over (partition by c order by d desc), first_value(a) respect nulls over (partition by c order by d desc) ) configs: core: max_line_length: 50 dialect: snowflake test_fail_do_not_fix_noqa: # https://github.com/sqlfluff/sqlfluff/issues/4248 # NOTE: No fix_str, because this should be unfixable. fail_str: | SELECT col1, col2, col3 FROM really_really_really_really_really_really_long_schema_name.TABLE1 -- noqa: L014 test_block_comment_single_line_noqa: # Ignore long line in block comment pass_str: | /* A very long single-line block comment which is ignored --noqa: LT05 */ /* Another single-line block comment which is also ignored. I'm too lazy to specify which rule to ignore though. -- noqa */ /* A very long multi-line block comment which is ignored -- noqa: LT05 */ /* noqa: disable=all */ -- A noqa by itself in a block comment is followed /* noqa: enable=all */ /* -- noqa: disable=all */ -- A noqa in a block comment preceded by a single line comment marker (--) is followed /* noqa: enable=all */ /* Ignore warnings for a bit! -- noqa: disable=all */ -- If the noqa is accompanied by preceding text in a block comment it -- must be preceded by a single-line comment marker, and it must be -- the last part of the line. /* noqa: enable=all */ select 1 configs: core: max_line_length: 30 test_fail_block_comment_single_line_noqa: # Don't ignore long line in single-line block comment fail_str: | /* A very long single-line block comment which is not ignored */ configs: core: max_line_length: 30 test_fail_block_comment_multi_line_noqa: # Don't ignore long line in multi-line block comment fail_str: | /* A very long multi-line block comment which is not ignored */ configs: core: max_line_length: 30 test_fail_block_comment_noqa_following_preceding_text_noqa: # Don't follow noqa if it follows preceding text and is not # immediately preceded by a single line comment marker (--) fail_str: | /* This won't work: noqa: disable=all */ -- A noqa will NOT be followed if it follows preceding text, and is -- not immediately preceded by a single-line comment marker (--). /* noqa: enable=all */ configs: core: max_line_length: 30 test_fail_block_comment_noqa_not_at_end_of_line_noqa: # Don't follow noqa if it is not at the end of the line fail_str: | /* -- noqa: disable=all Invalid noqa declaration */ -- A noqa will NOT be followed if it is not the last part of the line. /* noqa: enable=all */ select 1 configs: core: max_line_length: 55 test_operator_precedence: fail_str: | SELECT * FROM foo left join abcdef_abcd_details on foo.abcdefgh_id = abcdef_abcd_details.abcdefgh_id and abcdef_abcd_details.abcdef_abcdef_abcdef_abcdef = 1 fix_str: | SELECT * FROM foo left join abcdef_abcd_details on foo.abcdefgh_id = abcdef_abcd_details.abcdefgh_id and abcdef_abcd_details.abcdef_abcdef_abcdef_abcdef = 1 configs: core: max_line_length: 100 dialect: snowflake test_long_functions_and_aliases: # https://github.com/sqlfluff/sqlfluff/issues/4033 fail_str: | SELECT my_function(col1 + col2, arg2, arg3) over (partition by col3, col4 order by col5 rows between unbounded preceding and current row) as my_relatively_long_alias, my_other_function(col6, col7 + col8, arg4) as my_other_relatively_long_alias, my_expression_function(col6, col7 + col8, arg4) = col9 + col10 as another_relatively_long_alias FROM my_table fix_str: | SELECT my_function(col1 + col2, arg2, arg3) over ( partition by col3, col4 order by col5 rows between unbounded preceding and current row ) as my_relatively_long_alias, my_other_function(col6, col7 + col8, arg4) as my_other_relatively_long_alias, my_expression_function(col6, col7 + col8, arg4) = col9 + col10 as another_relatively_long_alias FROM my_table test_order_by_rebreak_span: # This tests that we can correctly rebreak an "order by" expressions. fail_str: | select * from ( select tbl1.*, row_number() over ( partition by tbl1.the_name, {{ ['a', 'b', 'c', 'd'] | join(', ') }} order by created_at desc ) rnk from foo inner join tbl2 on tbl1.the_name = tbl2.the_name ) fix_str: | select * from ( select tbl1.*, row_number() over ( partition by tbl1.the_name, {{ ['a', 'b', 'c', 'd'] | join(', ') }} order by created_at desc ) rnk from foo inner join tbl2 on tbl1.the_name = tbl2.the_name ) test_trailing_semicolon_moves: # The checks that we don't move the semicolon or the comma. fail_str: | SELECT my_very_long_field, FROM foo ORDER BY my_very_long_field; fix_str: | SELECT my_very_long_field, FROM foo ORDER BY my_very_long_field; configs: core: dialect: bigquery max_line_length: 20 # After fixing there are still issues, but we're still keeping # the comma and semicolon where they are. violations_after_fix: - description: Line is too long (23 > 20). line_no: 2 line_pos: 5 name: layout.long_lines - description: Line is too long (23 > 20). line_no: 5 line_pos: 5 name: layout.long_lines test_pass_window_function: # Test that we don't flag too eagerly on window functions. pass_str: | select col, rank() over ( partition by a, b, c order by d desc ) as rnk from foo test_fail_no_fix_long_templated: # Test we fail but don't try and fix a long templated line fail_str: | select '{{ "', '".join(["foo", "bar", "whatever", "whatever", "whatever", "whatever"]) }}' sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT06.yml000066400000000000000000000005261451700765000235440ustar00rootroot00000000000000rule: LT06 passing_example: pass_str: SELECT SUM(1) passing_example_window_function: pass_str: SELECT AVG(c) OVER (PARTITION BY a) simple_fail: fail_str: SELECT SUM (1) fix_str: SELECT SUM(1) complex_fail_1: fail_str: SELECT SUM /* SOMETHING */ (1) complex_fail_2: fail_str: | SELECT SUM -- COMMENT (1) sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT07.yml000066400000000000000000000036261451700765000235510ustar00rootroot00000000000000rule: LT07 test_pass_with_clause_closing_aligned: # with statement indentation pass_str: | with cte as ( select 1 ) select * from cte test_pass_with_clause_closing_oneline: # with statement oneline pass_str: with cte as (select 1) select * from cte test_pass_with_clause_closing_misaligned_indentation: # Fix with statement indentation pass_str: | with cte as ( select 1 ) select * from cte test_pass_with_clause_closing_misaligned_negative_indentation: # Fix with statement that has negative indentation pass_str: |2 with cte as ( select 1 ) select * from cte test_move_parenthesis_to_next_line: fail_str: | with cte_1 as ( select foo from tbl_1) -- Foobar select cte_1.foo from cte_1 fix_str: | with cte_1 as ( select foo from tbl_1 ) -- Foobar select cte_1.foo from cte_1 test_pass_cte_with_column_list: # Issue 2851: Ignore the CTE column list, only check the query. pass_str: | with search_path (node_ids, total_time) as ( select 1 ) select * from search_path test_pass_with_clause_closing_misaligned_indentation_in_templated_block: pass_str: | with {% if true %} cte as ( select 1 ) {% else %} cte as ( select 2 ) {% endif %} select * from cte test_move_parenthesis_to_next_line_in_templated_block: fail_str: | with {% if true %} cte as ( select 1) {% endif %} select * from cte fix_str: | with {% if true %} cte as ( select 1 ) {% endif %} select * from cte test_pass_templated_clauses: pass_str: | with {% for tbl in ['a', 'b'] %} {{ tbl }} as ( SELECT 1 ), {% endfor %} final as ( SELECT 1 ) select * from final join a using (x) join b using (x) sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT08.yml000066400000000000000000000105751451700765000235530ustar00rootroot00000000000000rule: LT08 test_pass_blank_line_after_cte_trailing_comma: # Test cases for LT08, both leading and trailing commas. pass_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte test_pass_blank_line_after_cte_leading_comma: pass_str: | with my_cte as ( select 1 ) , other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_no_blank_line_after_each_cte: fail_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte fix_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_no_blank_line_after_cte_before_comment: fail_str: | with my_cte as ( select 1 ), -- Comment other_cte as ( select 1 ) select * from my_cte cross join other_cte fix_str: | with my_cte as ( select 1 ), -- Comment other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_no_blank_line_after_cte_and_comment: # Issue #2136 fail_str: | WITH mycte AS ( SELECT col FROM my_table ) /* cte comment */ SELECT col FROM mycte fix_str: | WITH mycte AS ( SELECT col FROM my_table ) /* cte comment */ SELECT col FROM mycte test_fail_no_blank_line_after_last_cte_trailing_comma: fail_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte fix_str: | with my_cte as ( select 1 ), other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_no_blank_line_after_last_cte_leading_comma: fail_str: | with my_cte as ( select 1 ) , other_cte as ( select 1 ) select * from my_cte cross join other_cte fix_str: | with my_cte as ( select 1 ) , other_cte as ( select 1 ) select * from my_cte cross join other_cte test_fail_oneline_cte_leading_comma: # Fixes oneline cte with leading comma style fail_str: | with my_cte as (select 1), other_cte as (select 1) select * from my_cte cross join other_cte fix_str: | with my_cte as (select 1) , other_cte as (select 1) select * from my_cte cross join other_cte # NOTE: we're using the global comma position config configs: layout: type: comma: line_position: leading test_fail_cte_floating_comma: # Fixes cte with a floating comma fail_str: | with my_cte as (select 1) , other_cte as (select 1) select * from my_cte cross join other_cte fix_str: | with my_cte as (select 1) , other_cte as (select 1) select * from my_cte cross join other_cte test_pass_column_name_definition: # Issue #2136 pass_str: | with recursive t(n) as ( select 1 union all select n + 1 from t ) select n from t limit 100; test_pass_column_name_definition_multiple: # Issue #3474 pass_str: | WITH cte_1 AS ( SELECT 1 AS var ), cte_2 (var) AS ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; test_fail_column_name_definition_newline: fail_str: | WITH cte_1 (var) AS ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; fix_str: | WITH cte_1 (var) AS ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; test_fail_column_name_definition_comment: fail_str: | WITH cte_1 (var) AS /* random comment */ ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; fix_str: | WITH cte_1 (var) AS /* random comment */ ( SELECT 2 ) SELECT cte_1.var, cte_2.var FROM cte_1, cte_2; test_pass_recursive_with_argument_list: pass_str: | WITH RECURSIVE my_cte (n) AS ( select 1 ) select * from my_cte test_pass_recursive_with_argument_list_postgres: pass_str: | WITH RECURSIVE my_cte (n) AS ( select 1 ) select * from my_cte configs: core: dialect: postgres sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT09.yml000066400000000000000000000171051451700765000235500ustar00rootroot00000000000000rule: LT09 test_single_select_target_and_no_newline_between_select_and_select_target: pass_str: select a from x test_single_wildcard_select_target_and_no_newline_between_select_and_select_target_1: fail_str: | select * from x fix_str: | select * from x configs: rules: layout.select_targets: wildcard_policy: multiple test_single_wildcard_select_target_and_no_newline_between_select_and_select_target_2: pass_str: | select * from x test_single_select_target_and_newline_after_select_target_1: pass_str: | select * from x test_single_select_target_and_newline_after_select_target_2: fail_str: | select * from x fix_str: | select * from x configs: rules: layout.select_targets: wildcard_policy: multiple test_single_select_target_and_newline_before_select_target: fail_str: | select a from x fix_str: | select a from x test_multiple_select_targets_on_newlines_and_newline_after_select: pass_str: | select a, b, c from x test_single_wildcard_select_target_and_newline_before_select_target_1: pass_str: | select * from x test_single_wildcard_select_target_and_newline_before_select_target_2: pass_str: | select * from x configs: rules: layout.select_targets: wildcard_policy: multiple test_single_wildcard_select_target_and_newline_before_select_target_plus_from_on_same_line_1: fail_str: | select * from x fix_str: | select * from x configs: rules: layout.select_targets: wildcard_policy: multiple test_single_wildcard_select_target_and_newline_before_select_target_plus_from_on_same_line_2: fail_str: | select * from x fix_str: | select * from x test_multiple_select_targets_all_on_the_same_line: fail_str: | select a, b, c from x fix_str: "select\na,\nb,\nc\nfrom x\n" test_multiple_select_targets_all_on_the_same_line_plus_from_clause: fail_str: | select a, b, c from x fix_str: "select\na,\nb,\nc\nfrom x\n" test_multiple_select_targets_including_wildcard_all_on_the_same_line_plus_from_clause: fail_str: | select *, b, c from x fix_str: "select\n*,\nb,\nc\nfrom x\n" test_multiple_select_target_plus_from_clause_on_the_same_line: fail_str: | select a, b, c from x fix_str: | select a, b, c from x test_multiple_select_targets_trailing_whitespace_after_select: # TRICKY: Use explicit newlines to preserve the trailing space after "SELECT". pass_str: "SELECT \n a,\n b\nFROM t\n" test_single_select_with_comment_after_select: # Currently not autofixed because dealing with the comment is tricky. # Could be supported later. fail_str: | SELECT --some comment a test_comment_between_select_and_single_select_target: fail_str: | SELECT -- This is the user's ID. user_id FROM safe_user fix_str: | SELECT user_id -- This is the user's ID. FROM safe_user test_multiple_select_targets_some_newlines_missing_1: fail_str: | select a, b, c, d, e, f, g, h from x # The spaces before a, d, and h look odd, but these are places where the # select targets were already on a separate line, and the rule made no # changes. fix_str: | select a, b, c, d, e, f, g, h from x test_multiple_select_targets_some_newlines_missing_2: fail_str: | select a, b, c, d, e, f, g, h from x # The spaces before d, and h look odd, but these are places where the # select targets were already on a separate line, and the rule made no # changes. fix_str: | select a, b, c, d, e, f, g, h from x test_cte: fail_str: | WITH cte1 AS ( SELECT c1 AS c FROM t ) SELECT 1 FROM cte1 fix_str: | WITH cte1 AS ( SELECT c1 AS c FROM t ) SELECT 1 FROM cte1 test_single_newline_no_from: fail_str: | SELECT id fix_str: | SELECT id test_single_distinct_no_from: fail_str: | SELECT DISTINCT id fix_str: | SELECT DISTINCT id test_distinct_many: fail_str: | SELECT distinct a, b, c FROM my_table fix_str: | SELECT distinct a, b, c FROM my_table test_distinct_single_pass: pass_str: | SELECT distinct a FROM my_table test_distinct_single_fail_a: fail_str: | SELECT distinct a FROM my_table fix_str: | SELECT distinct a FROM my_table test_distinct_single_fail_b: fail_str: | SELECT distinct a FROM my_table fix_str: | SELECT distinct a FROM my_table test_single_select_with_no_from: fail_str: "SELECT\n 10000000\n" fix_str: "SELECT 10000000\n" test_single_select_with_no_from_previous_comment: fail_str: "SELECT\n /* test */ 10000000\n" fix_str: "SELECT 10000000 /* test */\n" test_single_select_with_comment_after_column: fail_str: | SELECT 1 -- this is a comment FROM my_table fix_str: | SELECT 1 -- this is a comment FROM my_table test_single_select_with_comment_after_column_no_space: fail_str: | SELECT 1-- this is a comment FROM my_table fix_str: | SELECT 1 -- this is a comment FROM my_table test_single_select_with_multiple_mixed_comments: fail_str: | SELECT -- previous comment 1 -- this is a comment FROM my_table fix_str: | SELECT 1 -- previous comment -- this is a comment FROM my_table test_single_select_with_comment before: fail_str: | SELECT /* comment before */ 1 FROM my_table fix_str: | SELECT 1 /* comment before */ FROM my_table test_create_view: fail_str: | CREATE VIEW a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id); fix_str: | CREATE VIEW a AS SELECT c FROM table1 INNER JOIN table2 ON (table1.id = table2.id); test_multiline_single: # https://github.com/sqlfluff/sqlfluff/issues/4516 pass_str: | SELECT SUM( 1 + SUM( 2 + 3 ) ) AS col FROM test_table test_multiline_expressions: # NOTE: LT09 doesn't fix the indentation, so that may still look strange here, # but we should make sure we're inserting new line breaks in the right places. # https://github.com/sqlfluff/sqlfluff/issues/5258 fail_str: | SELECT a, b1, b2, COUNT(DISTINCT id) AS c1, COUNT(DISTINCT name) AS c2, COUNT(DISTINCT city) AS c3, COUNT( DISTINCT id) AS d1, COUNT(DISTINCT name) AS d2, COUNT(DISTINCT city) AS d3, COUNT(DISTINCT id) AS e1, COUNT(DISTINCT name) AS e2, COUNT(DISTINCT city) AS e3 FROM some_table; fix_str: | SELECT a, b1, b2, COUNT(DISTINCT id) AS c1, COUNT(DISTINCT name) AS c2, COUNT(DISTINCT city) AS c3, COUNT( DISTINCT id) AS d1, COUNT(DISTINCT name) AS d2, COUNT(DISTINCT city) AS d3, COUNT(DISTINCT id) AS e1, COUNT(DISTINCT name) AS e2, COUNT(DISTINCT city) AS e3 FROM some_table; test_pass_leading_commas: # https://github.com/sqlfluff/sqlfluff/issues/5329 # NOTE: We shouldn't even need to configure the leading commas here. # Commas shouldn't be accounted for in whether elements are on the same line. pass_str: | select a , b , c sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT10.yml000066400000000000000000000022161451700765000235350ustar00rootroot00000000000000rule: LT10 test_fail_distinct_on_next_line_1: fail_str: | SELECT DISTINCT user_id, list_id FROM safe_user fix_str: | SELECT DISTINCT user_id, list_id FROM safe_user test_fail_distinct_on_next_line_2: fail_str: | SELECT -- The table contains duplicates, so we use DISTINCT. DISTINCT user_id FROM safe_user fix_str: | SELECT DISTINCT -- The table contains duplicates, so we use DISTINCT. user_id FROM safe_user test_fail_distinct_on_next_line_3: fail_str: | select distinct abc, def from a; fix_str: | select distinct abc, def from a; test_fail_distinct_on_next_line_4: fail_str: | CREATE OR REPLACE TABLE myschema.mytable AS ( SELECT DISTINCT cola , colb FROM myschema.mytable ); fix_str: | CREATE OR REPLACE TABLE myschema.mytable AS ( SELECT DISTINCT cola , colb FROM myschema.mytable ); test_pass_distinct_on_same_line_with_select: pass_str: SELECT DISTINCT user_id FROM safe_user sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT11.yml000066400000000000000000000044301451700765000235360ustar00rootroot00000000000000rule: LT11 test_fail_simple_fix_union_all_before: fail_str: | SELECT 'a' UNION ALL SELECT 'b' fix_str: | SELECT 'a' UNION ALL SELECT 'b' test_fail_simple_fix_union_all_after: fail_str: | SELECT 'a' UNION ALL SELECT 'b' fix_str: | SELECT 'a' UNION ALL SELECT 'b' test_fail_simple_fix_union_all_before_and_after: fail_str: | SELECT 'a' UNION ALL SELECT 'b' fix_str: | SELECT 'a' UNION ALL SELECT 'b' test_pass_multiple_newlines_are_allowed: pass_str: | SELECT 'a' UNION ALL SELECT 'b' # The autofix of LT11 doesn't respect indentation of the surrounding query. # Hence, the fix result of only LT11 looks ugly. But LT02 will fix the indentation # in a second step. # See the test blow. test_fail_fix_works_in_subqueries: fail_str: | SELECT * FROM ( SELECT 'g' UNION ALL SELECT 'h' UNION ALL SELECT 'j' ) fix_str: | SELECT * FROM ( SELECT 'g' UNION ALL SELECT 'h' UNION ALL SELECT 'j' ) # Test autofix after LT02 passes LT11 test_pass_fix_works_in_subqueries_after_LT02_fix: pass_str: | SELECT * FROM ( SELECT 'g' UNION ALL SELECT 'h' UNION ALL SELECT 'j' ) test_fail_simple_fix_union_before_and_after: fail_str: | SELECT 'a' UNION SELECT 'b' fix_str: | SELECT 'a' UNION SELECT 'b' test_fail_simple_fix_intersect_before_and_after: fail_str: | SELECT 'a' INTERSECT SELECT 'b' fix_str: | SELECT 'a' INTERSECT SELECT 'b' test_fail_simple_fix_except_before_and_after: fail_str: | SELECT 'a' EXCEPT SELECT 'b' fix_str: | SELECT 'a' EXCEPT SELECT 'b' test_fail_simple_fix_minus_before_and_after: fail_str: | SELECT 'a' EXCEPT SELECT 'b' fix_str: | SELECT 'a' EXCEPT SELECT 'b' test_fail_simple_fix_bigquery_intersect_distinct_before_and_after: fail_str: | SELECT 'a' INTERSECT DISTINCT SELECT 'b' fix_str: | SELECT 'a' INTERSECT DISTINCT SELECT 'b' configs: core: dialect: bigquery # NOTE: We used to exclude TSQL from fixing these queries, but # the reflow logic now enables this. sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT12.yml000066400000000000000000000023741451700765000235440ustar00rootroot00000000000000rule: LT12 test_pass_single_final_newline: pass_str: "SELECT foo FROM bar\n" test_fail_no_final_newline: fail_str: "SELECT foo FROM bar" fix_str: "SELECT foo FROM bar\n" test_fail_multiple_final_newlines: fail_str: "SELECT foo FROM bar\n\n" fix_str: "SELECT foo FROM bar\n" test_pass_templated_plus_raw_newlines: pass_str: "{{ '\n\n' }}\n" test_fail_templated_plus_raw_newlines: fail_str: "{{ '\n\n' }}" fix_str: "{{ '\n\n' }}\n" test_fail_templated_plus_raw_newlines_extra_newline: fail_str: "{{ '\n\n' }}\n\n" fix_str: "{{ '\n\n' }}\n" test_pass_templated_macro_newlines: # Tricky because the rendered code ends with two newlines: # - Literal newline inserted by the macro # - Literal newline at the end of the file # The slicing algorithm should treat the first newline as "templated" because # it was inserted when *expanding* the templated macro call. pass_str: | {% macro get_keyed_nulls(columns) %} {{ columns }} {% endmacro %} SELECT {{ get_keyed_nulls("other_id") }} test_fail_templated_no_newline: # Tricky because there's no newline at the end of the file (following the # templated code). fail_str: "{% if true %}\nSELECT 1 + 1\n{%- endif %}" fix_str: "{% if true %}\nSELECT 1 + 1\n{%- endif %}\n" sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/LT13.yml000066400000000000000000000033511451700765000235410ustar00rootroot00000000000000rule: LT13 test_pass_leading_whitespace_statement: pass_str: "SELECT foo FROM bar\n" test_pass_leading_whitespace_comment: pass_str: "/*I am a comment*/\nSELECT foo FROM bar\n" test_pass_leading_whitespace_inline_comment: pass_str: "--I am a comment\nSELECT foo FROM bar\n" test_pass_leading_whitespace_inline_comment_hash: pass_str: "# I am a comment\nSELECT foo FROM bar\n" configs: core: dialect: bigquery test_pass_leading_whitespace_jinja_comment: pass_str: "{# I am a comment #}\nSELECT foo FROM bar\n" test_pass_leading_whitespace_jinja_if: pass_str: "{% if True %}\nSELECT foo\nFROM bar;\n{% endif %}\n" test_pass_leading_whitespace_jinja_for: pass_str: "{% for item in range(10) %}\nSELECT foo_{{ item }}\nFROM bar;\n{% endfor %}\n" test_fail_leading_whitespace_statement: fail_str: "\n SELECT foo FROM bar\n" fix_str: "SELECT foo FROM bar\n" test_fail_leading_whitespace_comment: fail_str: "\n /*I am a comment*/\nSELECT foo FROM bar\n" fix_str: "/*I am a comment*/\nSELECT foo FROM bar\n" test_fail_leading_whitespace_inline_comment: fail_str: "\n --I am a comment\nSELECT foo FROM bar\n" fix_str: "--I am a comment\nSELECT foo FROM bar\n" test_fail_leading_whitespace_jinja_comment: fail_str: "\n {# I am a comment #}\nSELECT foo FROM bar\n" fix_str: "{# I am a comment #}\nSELECT foo FROM bar\n" test_fail_leading_whitespace_jinja_if: fail_str: "\n {% if True %}\nSELECT foo\nFROM bar;\n{% endif %}\n" fix_str: "{% if True %}\nSELECT foo\nFROM bar;\n{% endif %}\n" test_fail_leading_whitespace_jinja_for: fail_str: "\n {% for item in range(10) %}\nSELECT foo_{{ item }}\nFROM bar;\n{% endfor %}\n" fix_str: "{% for item in range(10) %}\nSELECT foo_{{ item }}\nFROM bar;\n{% endfor %}\n" sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/README.md000066400000000000000000000026461451700765000236200ustar00rootroot00000000000000# Rule Tests All the individual rule unit tests are defined in yml files. Note that all the enumerated test names (test_1 etc.) were copied over from a previous format with an automated script. All new tests should be named descriptively with comments for more context if needed. ## Making a test case ### Writing a test for a query that should pass linting A test for a passing query is declared like this: ``` descriptive_test_name: pass_str: select * from x ``` ### Writing a test for a query that should fail linting A test for a failing query is declared like this: ``` descriptive_test_name: fail_str: select * FROM x ``` and can optionally include a test for the fixed query that the rule returns: ``` descriptive_test_name: fail_str: select * FROM x fix_str: select * from x ``` ### Rule Configuration If your test needs additional rule configuration, this can be supplied through a `configs` key, such as: ``` test_keyword_as_identifier: fail_str: SELECT parameter configs: rules: references.keywords: only_aliases: false ``` ## Yaml Syntax Using yaml make it really easy to flexibly create test cases. You can create single line test cases with explicit newlines and tabs using `\n` and `\t` when it makes sense, or create multi-line test cases which are much easier to read for longer queries. A good reference on multiline yaml syntax can be found [here](https://yaml-multiline.info/). sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/RF01.yml000066400000000000000000000157571451700765000235430ustar00rootroot00000000000000rule: RF01 test_pass_object_referenced_1: # References in quotes in bigquery pass_str: SELECT bar.user_id FROM `foo.far.bar` configs: core: dialect: bigquery rules: references.from: force_enable: true test_fail_object_not_referenced_1: desc: Name foo is not referenced in FROM clause. It would need to be at the end of identifier in ticks or an alias. fail_str: SELECT foo.user_id FROM `foo.far.bar` configs: core: dialect: bigquery rules: references.from: force_enable: true test_fail_object_not_referenced_2: # References in WHERE clause fail_str: SELECT * FROM my_tbl WHERE foo.bar > 0 test_pass_object_referenced_2: pass_str: | SELECT * FROM db.sc.tbl2 WHERE a NOT IN (SELECT a FROM db.sc.tbl1) test_pass_object_referenced_3: pass_str: | SELECT * FROM db.sc.tbl2 WHERE a NOT IN (SELECT tbl2.a FROM db.sc.tbl1) test_pass_object_referenced_4: # Test ambiguous column reference caused by use of BigQuery structure fields. # Here, 'et2' could either be a schema name or a table name. # https://github.com/sqlfluff/sqlfluff/issues/1079 pass_str: | SELECT et2.txn.amount FROM `example_dataset2.example_table2` AS et2 configs: core: dialect: bigquery rules: references.from: force_enable: true test_pass_object_referenced_5a: # Test ambiguous column reference caused by use of BigQuery structure fields. # Here, column,field should not trigger the rule as by default this rule is # disabled for bigquery # https://github.com/sqlfluff/sqlfluff/issues/1503 pass_str: | SELECT col1.field, col FROM `example_dataset2.example_table2` configs: core: dialect: bigquery test_pass_object_referenced_5b: # Same test as above but default (ANSI) should trigger fail_str: | SELECT col1.field FROM table1 test_pass_object_referenced_5c: # Same test as above but for BigQuery but force is # enabled so should fail fail_str: | SELECT col1.field FROM `example_dataset2.example_table2` configs: core: dialect: bigquery rules: references.from: force_enable: true test_pass_object_referenced_5d: # Test for extra dialect (hive) compatibility pass_str: SELECT col1.field, col2 FROM example_table configs: core: dialect: hive test_pass_object_referenced_5e: # Test for extra dialect (redshift) compatibility pass_str: SELECT col1.field, col2 FROM example_table configs: core: dialect: redshift test_pass_object_referenced_6: # Test references in subqueries (see issue #1939) pass_str: | select cc.c1 from ( select table1.c1 from table1 inner join table2 on table1.x_id = table2.x_id inner join table3 on table2.y_id = table3.y_id ) as cc test_pass_object_referenced_7: pass_str: | UPDATE my_table SET row_sum = ( SELECT COUNT(*) AS row_sum FROM another_table WHERE another_table.id = my_table.id ) test_fail_object_referenced_7: fail_str: | UPDATE my_table SET row_sum = ( SELECT COUNT(*) AS row_sum FROM another_table WHERE another_table.id = my_tableeee.id ) test_pass_object_referenced_8: pass_str: | DELETE FROM agent1 WHERE EXISTS( SELECT customer.cust_id FROM customer WHERE agent1.agent_code <> customer.agent_code); test_pass_two_part_reference_8: pass_str: | delete from public.agent1 where exists( select customer.cust_id from customer where agent1.agent_code <> customer.agent_code) test_pass_two_part_reference_9: pass_str: | delete from public.agent1 where exists( select customer.cust_id from customer where public.agent1.agent_code <> customer.agent_code) test_fail_two_part_reference_10: fail_str: | select * from schema1.agent1 where schema2.agent1.agent_code <> 'abc' test_fail_two_part_reference_11: fail_str: | delete from schema1.agent1 where exists( select customer.cust_id from customer where schema2.agent1.agent_code <> customer.agent_code) test_pass_two_part_reference_11: pass_str: | select * from agent1 where public.agent1.agent_code <> '3' test_pass_bigquery_dash: # References in quotes in bigquery pass_str: SELECT bar.user_id FROM foo-far.bar configs: core: dialect: bigquery rules: references.from: force_enable: true test_pass_exasol_select_into: pass_str: | select * into table schm.tab from schm.tab2 configs: core: dialect: exasol test_pass_simple_delete: pass_str: | delete from table1 where 1 = 1 test_exasol_invalid_foreign_key_from: pass_str: | SELECT * WITH INVALID FOREIGN KEY (nr) FROM T1 REFERENCING T2 (id) configs: core: dialect: exasol test_tsql_select_system_as_identifier: pass_str: | SELECT @@IDENTITY AS 'Identity' configs: core: dialect: tsql test_mysql_select_no_from_should_not_except: pass_str: | SELECT DATE_SUB('1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND); configs: core: dialect: mysql test_nested_join_clause_does_not_flag: pass_str: | SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN I LEFT OUTER JOIN P ON I.Pcd = P.Iid ON BA.Iid = I.Bcd configs: core: dialect: tsql test_nested_join_clauses_do_not_flag: pass_str: | SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN I LEFT OUTER JOIN P ON I.Pcd = P.Iid LEFT OUTER JOIN C ON I.Pcd = C.Iid ON BA.Iid = I.Bcd configs: core: dialect: tsql test_parenthesized_join_clauses_do_not_flag: pass_str: | SELECT 1 AS RegionCode FROM BA LEFT OUTER JOIN ( I LEFT OUTER JOIN P ON I.Pcd = P.Iid ) ON BA.Iid = I.Bcd configs: core: dialect: tsql test_soql_ignore_rule: pass_str: | SELECT Account.Name FROM Contact configs: core: dialect: soql test_postgres_value_table_alias: pass_str: | select sc.col1 as colx , pn.col1 as coly from sch1.tbl1 as sc cross join unnest(array[111, 222]) as pn(col1) configs: core: dialect: postgres test_pass_update_with_alias: pass_str: | UPDATE tbl AS dest SET t.title = 'TEST' WHERE t.id = 101 AND EXISTS ( SELECT 1 FROM foobar AS tmp WHERE tmp.idx = dest.idx) test_pass_postgres_merge_with_alias: pass_str: | merge dw.sch.tbl dest using land.sch.tbl src on src.idx = dest.idx and src.name = dest.name and src.idy = dest.idy when not matched by source and exists ( select 1 as tmp from land.sch.tag as ld where ld.idx = dest.idx and ld.name = dest.name ) then update set dest.ac = 'N' configs: core: dialect: tsql sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/RF02.yml000066400000000000000000000231061451700765000235270ustar00rootroot00000000000000rule: RF02 test_pass_qualified_references_multi_table_statements: pass_str: | SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a test_fail_unqualified_references_multi_table_statements: fail_str: | SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a test_pass_qualified_references_multi_table_statements_subquery: pass_str: | SELECT a FROM ( SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a ) test_fail_unqualified_references_multi_table_statements_subquery: fail_str: | SELECT a FROM ( SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a ) test_pass_qualified_references_multi_table_statements_subquery_mix: pass_str: | SELECT foo.a, vee.b FROM ( SELECT c FROM bar ) AS foo LEFT JOIN vee ON vee.a = foo.a test_allow_date_parts_as_function_parameter_bigquery: # Allow use of BigQuery date parts (which are not quoted and were previously # mistaken for column references and flagged by this rule). pass_str: | SELECT timestamp_trunc(a.ts, month) AS t FROM a JOIN b ON a.id = b.id configs: core: dialect: bigquery test_allow_date_parts_as_function_parameter_snowflake: # Allow use of Snowflake date parts (which are not quoted and were previously # mistaken for column references and flagged by this rule). pass_str: | SELECT datediff(year, a.column1, b.column2) FROM a JOIN b ON a.id = b.id configs: core: dialect: snowflake test_ignore_value_table_functions_when_counting_tables: # Allow use of unnested value tables from bigquery without counting as a # table reference. This test passes despite unqualified reference # because we "only select from one table" pass_str: | select unqualified_reference_from_table_a, _t_start from a left join unnest(generate_timestamp_array( '2020-01-01', '2020-01-30', interval 1 day)) as _t_start on true configs: core: dialect: bigquery test_ignore_value_table_functions_when_counting_unqualified_aliases: # Allow use of unnested value tables from bigquery without qualification. # The function `unnest` returns a table which is only one unnamed column. # This is impossible to qualify further, and as such the rule allows it. pass_str: | select a.*, b.*, _t_start from a left join b on true left join unnest(generate_timestamp_array( '2020-01-01', '2020-01-30', interval 1 day)) as _t_start on true configs: core: dialect: bigquery test_allow_unqualified_references_in_sparksql_lambdas: pass_str: | SELECT transform(array(1, 2, 3), x -> x + 1); configs: core: dialect: sparksql test_allow_unqualified_references_in_athena_lambdas: pass_str: | select t1.id, filter(array[t1.col1, t1.col2, t2.col3], x -> x is not null) as flt from t1 inner join t2 on t1.id = t2.id configs: core: dialect: athena test_allow_unqualified_references_in_athena_lambdas_with_several_arguments: pass_str: | select t1.id, filter(array[(t1.col1, t1.col2)], (x, y) -> x + y) as flt from t1 inner join t2 on t1.id = t2.id configs: core: dialect: athena test_disallow_unqualified_references_in_malformed_lambdas: fail_str: | select t1.id, filter(array[(t1.col1, t1.col2)], (x, y), z -> x + y) as flt from t1 inner join t2 on t1.id = t2.id configs: core: dialect: athena test_fail_column_and_alias_same_name: # See issue #2169 fail_str: | SELECT foo AS foo, bar AS bar FROM a LEFT JOIN b ON a.id = b.id test_pass_column_and_alias_same_name_1: pass_str: | SELECT a.foo AS foo, b.bar AS bar FROM a LEFT JOIN b ON a.id = b.id test_pass_column_and_alias_same_name_2: # Possible for unqualified columns if # it is actually an alias of another column. pass_str: | SELECT a.bar AS baz, baz FROM a LEFT JOIN b ON a.id = b.id test_pass_qualified_references_multi_table_statements_mysql: pass_str: | SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: core: dialect: mysql test_fail_unqualified_references_multi_table_statements_mysql: fail_str: | SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: core: dialect: mysql test_fail_column_and_alias_same_name_mysql: # See issue #2169 fail_str: | SELECT foo AS foo, bar AS bar FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: mysql test_pass_column_and_alias_same_name_1_mysql: pass_str: | SELECT a.foo AS foo, b.bar AS bar FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: mysql test_pass_column_and_alias_same_name_2_mysql: # Possible for unqualified columns if # it is actually an alias of another column. pass_str: | SELECT a.bar AS baz, baz FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: mysql test_pass_qualified_references_multi_table_statements_tsql: pass_str: | SELECT foo.a, vee.b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: core: dialect: tsql test_fail_unqualified_references_multi_table_statements_tsql: fail_str: | SELECT a, b FROM foo LEFT JOIN vee ON vee.a = foo.a configs: core: dialect: tsql test_fail_column_and_alias_same_name_tsql: # See issue #2169 fail_str: | SELECT foo AS foo, bar AS bar FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: tsql test_pass_column_and_alias_same_name_1_tsql: pass_str: | SELECT a.foo AS foo, b.bar AS bar FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: tsql test_pass_column_and_alias_same_name_2_tsql: # Possible for unqualified columns if # it is actually an alias of another column. pass_str: | SELECT a.bar AS baz, baz FROM a LEFT JOIN b ON a.id = b.id configs: core: dialect: tsql test_pass_rowtype_with_join: # Check we don't wrongly interpret rowtype attributes # as field alias when more than one tables in join pass_str: | select cast(row(t1.attr, t2.attr) as row(fld1 double, fld2 double)) as flds from sch.tab1 as t1 join sch.tab2 as t2 on t2.id = t1.id configs: core: dialect: hive test_fail_table_plus_flatten_snowflake_1: # FLATTEN() returns a table, thus there are two tables, thus lint failure. fail_str: | SELECT r.rec:foo::string AS foo, value:bar::string AS bar FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) AS x configs: core: dialect: snowflake test_fail_table_plus_flatten_snowflake_2: # FLATTEN() returns a table, thus there are two tables, thus lint failure, # even though there's no alias provided for FLATTEN(). fail_str: | SELECT r.rec:foo::string AS foo, value:bar::string AS bar FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) configs: core: dialect: snowflake test_pass_table_plus_flatten_snowflake_1: # FLATTEN() returns a table, thus there are two tables. This one passes, # unlike the above, because both aliases are used. pass_str: | SELECT r.rec:foo::string AS foo, x.value:bar::string AS bar FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) AS x configs: core: dialect: snowflake test_pass_ignore_words_column_name: pass_str: | SELECT test1, test2 FROM t_table1 LEFT JOIN t_table_2 ON TRUE configs: rules: references.qualification: ignore_words: test1,test2 test_pass_ignore_words_regex_column_name: pass_str: | SELECT _test1, _test2 FROM t_table1 LEFT JOIN t_table_2 ON TRUE configs: rules: references.qualification: ignore_words_regex: ^_ test_pass_ignore_words_regex_bigquery_declare_example: pass_str: DECLARE _test INT64 DEFAULT 42; SELECT _test FROM t_table1 LEFT JOIN t_table_2 ON TRUE configs: core: dialect: bigquery rules: references.qualification: ignore_words_regex: ^_ test_pass_redshift: # This was failing in issue 3380. pass_str: SELECT account.id FROM salesforce_sd.account INNER JOIN salesforce_sd."user" ON salesforce_sd."user".id = account.ownerid configs: core: dialect: redshift test_pass_tsql: # This was failing in issue 3342. pass_str: select psc.col1 from tbl1 as psc where exists ( select 1 as data from tbl2 as pr join tbl2 as c on c.cid = pr.cid where c.col1 = 'x' and pr.col2 <= convert(date, getdate()) and pr.pid = psc.pid ) configs: core: dialect: tsql test_pass_ansi: # This was failing in issue 3055. pass_str: | SELECT my_col FROM my_table WHERE EXISTS ( SELECT 1 FROM other_table INNER JOIN mapping_table ON (mapping_table.other_fk = other_table.id_pk) WHERE mapping_table.kind = my_table.kind ) test_pass_redshift_convert: # This was failing in issue 3651. pass_str: | SELECT sellers.name, CONVERT(integer, sales.pricepaid) AS price FROM sales LEFT JOIN sellers ON sales.sellerid = sellers.sellerid WHERE sales.salesid = 100 configs: core: dialect: redshift sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/RF03.yml000066400000000000000000000175521451700765000235400ustar00rootroot00000000000000rule: RF03 # Mixed qualification of references. test_fail_single_table_mixed_qualification_of_references: fail_str: SELECT my_tbl.bar, baz FROM my_tbl fix_str: SELECT my_tbl.bar, my_tbl.baz FROM my_tbl test_pass_single_table_consistent_references_1: pass_str: SELECT bar FROM my_tbl test_pass_single_table_consistent_references_2: pass_str: SELECT my_tbl.bar FROM my_tbl test_pass_on_tableless_table: # tests particular code branch for coverage pass_str: SELECT (SELECT MAX(bar) FROM tbl) + 1 AS col test_fail_single_table_mixed_qualification_of_references_subquery: # NOTE: Even though there's a subquery here, we can still fix it # because there is no ambiguity about which table we're referencing. fail_str: SELECT * FROM (SELECT my_tbl.bar, baz FROM my_tbl) fix_str: SELECT * FROM (SELECT my_tbl.bar, my_tbl.baz FROM my_tbl) test_pass_lateral_table_ref: pass_str: | SELECT tbl.a, tbl.b, tbl.a + tbl.b AS col_created_right_here, col_created_right_here + 1 AS sub_self_ref FROM tbl test_pass_single_table_consistent_references_1_subquery: pass_str: SELECT * FROM (SELECT bar FROM my_tbl) test_pass_single_table_consistent_references_2_subquery: pass_str: SELECT * FROM (SELECT my_tbl.bar FROM my_tbl) test_fail_single_table_reference_when_unqualified_config: fail_str: SELECT my_tbl.bar FROM my_tbl fix_str: SELECT bar FROM my_tbl configs: rules: references.consistent: single_table_references: unqualified test_fail_single_table_reference_when_qualified_config: fail_str: SELECT bar FROM my_tbl WHERE foo fix_str: SELECT my_tbl.bar FROM my_tbl WHERE my_tbl.foo configs: rules: references.consistent: single_table_references: qualified test_pass_single_table_reference_in_subquery: # Catch issues with subqueries properly pass_str: | SELECT * FROM db.sc.tbl2 WHERE a NOT IN (SELECT a FROM db.sc.tbl1) test_value_table_functions_do_not_require_qualification: pass_str: | select a.*, _t_start from a left join unnest(generate_timestamp_array( '2020-01-01', '2020-01-30', interval 1 day)) as _t_start on true configs: core: dialect: bigquery rules: references.consistent: force_enable: true test_object_references_1a: # This should fail as "a" is an unreference object # We dont try to be smart. fail_str: SELECT a.bar, b FROM my_tbl fix_str: SELECT a.bar, my_tbl.b FROM my_tbl test_object_references_1b: # This should not-fail as "a" is potentially a STRUCT pass_str: SELECT a.bar, b FROM my_tbl configs: core: dialect: bigquery test_object_references_1c: # This should fail as even though "a" is potenitally a STRUCT # The config has been set to enforce the rule fail_str: SELECT a.bar, b FROM my_tbl AS c fix_str: SELECT c.a.bar, c.b FROM my_tbl AS c configs: core: dialect: bigquery rules: references.consistent: force_enable: true single_table_references: qualified test_object_references_1d: # "a" is not a named table and therefore is a STRUCT pass_str: SELECT a.bar, b FROM my_tbl configs: core: dialect: bigquery rules: references.consistent: force_enable: true test_object_references_1e: pass_str: SELECT my_tbl.a.bar, my_tbl.b FROM my_tbl configs: core: dialect: bigquery rules: references.consistent: force_enable: true test_object_references_struct_inconsistent_fix_a: fail_str: SELECT a.bar, my_tbl.b FROM my_tbl fix_str: SELECT my_tbl.a.bar, my_tbl.b FROM my_tbl configs: core: dialect: bigquery rules: references.consistent: force_enable: true test_object_references_1f: # This should not-fail as "a" is potentially a STRUCT pass_str: SELECT a.bar, b FROM my_tbl configs: core: dialect: hive test_object_references_1g: # This should not-fail as "a" is potentially a STRUCT pass_str: SELECT a.bar, b FROM my_tbl configs: core: dialect: redshift test_tsql_pivot_are_excluded: # This should pass as tsql PIVOT columns do not need to be # qualified pass_str: | select t1._id , [1] as lvl_1 , [2] as lvl_2 , [3] as lvl_3 from table1 t1 pivot (max(value) for rn in([1], [2], [3]) ) as pvt configs: core: dialect: tsql test_date_functions_are_excluded: # This should pass as date keywords columns do not need to be # qualified pass_str: | SELECT a.[hello], DATEDIFF(day, a.[mydate], GETDATE()) AS [test] FROM mytable AS a configs: core: dialect: tsql test_select_alias_in_where_clause_1: # This should pass for certain dialects pass_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t where alias_col1 > 5 configs: core: dialect: redshift test_select_alias_in_where_clause_2: # This should pass for certain dialects pass_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t where alias_col1 > 5 configs: core: dialect: snowflake test_pass_group_by_alias: pass_str: | select t.col1 + 1 as alias_col1, count(1) from table1 as t group by alias_col1 test_pass_order_by_alias: pass_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t order by alias_col1 test_pass_having: pass_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t having alias_col1 > 5 test_fail_select_alias_in_where_clause_5: # This should fail for ansi (and be fixable) fail_str: | select t.col0, t.col1 + 1 as alias_col1 from table1 as t where alias_col1 > 5 fix_str: | select col0, col1 + 1 as alias_col1 from table1 as t where alias_col1 > 5 configs: rules: references.consistent: single_table_references: unqualified test_pass_tsql_parameter: # This should pass for certain dialects pass_str: | DECLARE @id_date int; SET @id_date = 20211108; SELECT sometable.some_column FROM sometable WHERE 1 = 1 AND sometable.column_with_date = @id_date configs: core: dialect: tsql test_pass_tsql_pivot: # This should pass for certain dialects pass_str: | SELECT 1 FROM ( SELECT DaysToManufacture, StandardCost FROM Production.Product ) AS SourceTable PIVOT ( AVG(StandardCost) FOR DaysToManufacture IN ([0], [1], [2], [3], [4]) ) AS PivotTable; configs: core: dialect: tsql test_unfixable_ambiguous_reference_subquery: # `field_2` could be from the outer query or the inner # query (i.e. from `other_table` or `my_alias`) and because # it's ambiguous we shouldn't provide a fix. fail_str: | SELECT ( SELECT other_table.other_table_field_1 FROM other_table WHERE other_table.id = field_2 ) FROM (SELECT * FROM some_table) AS my_alias test_pass_snowflake_flatten_function: # Tests a fix for issue 3178. This query passes because the second column # could refer to either 'r' or the table returned by FLATTEN(). pass_str: | SELECT r.rec:foo::string, value:bar::string FROM foo.bar AS r, LATERAL FLATTEN(input => rec:result) configs: core: dialect: snowflake passes_tql_table_variable: # Issue 3243 pass_str: select a, b from @tablevar configs: core: dialect: tsql rules: references.consistent: single_table_references: qualified fail_but_dont_fix_templated_table_name_consistent: fail_str: | SELECT a, {{ "foo" }}.b FROM {{ "foo" }} fail_but_dont_fix_templated_table_name_qualified: fail_str: | SELECT a, {{ "foo" }}.b FROM {{ "foo" }} configs: rules: references.consistent: single_table_references: qualified sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/RF04.yml000066400000000000000000000072501451700765000235330ustar00rootroot00000000000000rule: RF04 test_pass_valid_identifier: pass_str: CREATE TABLE artist(artist_name TEXT) test_fail_keyword_as_identifier_column: fail_str: CREATE TABLE artist(create TEXT) test_fail_keyword_as_identifier_column_alias: fail_str: SELECT 1 as parameter test_fail_keyword_as_identifier_table_alias: fail_str: SELECT x FROM tbl AS parameter test_pass_valid_identifier_not_alias: # should pass on default config as not alias pass_str: SELECT parameter test_fail_keyword_as_identifier_not_alias_all: fail_str: SELECT parameter configs: rules: references.keywords: unquoted_identifiers_policy: all test_pass_valid_identifier_table_alias_column_alias_config: pass_str: SELECT x FROM tbl AS parameter configs: rules: references.keywords: unquoted_identifiers_policy: column_aliases test_fail_keyword_as_identifier_column_alias_config: fail_str: SELECT x AS date FROM tbl AS parameter configs: rules: references.keywords: unquoted_identifiers_policy: column_aliases test_pass_valid_quoted_identifier: pass_str: CREATE TABLE [artist]([artist_name] TEXT) configs: rules: references.keywords: quoted_identifiers_policy: aliases core: dialect: tsql test_fail_keyword_as_quoted_identifier_column: fail_str: CREATE TABLE "artist"("create" TEXT) configs: rules: references.keywords: quoted_identifiers_policy: aliases test_pass_keyword_as_quoted_identifier_column_none_policy: pass_str: CREATE TABLE "artist"("create" TEXT) configs: rules: references.keywords: quoted_identifiers_policy: none test_fail_keyword_as_quoted_identifier_column_alias: fail_str: SELECT 1 as [parameter] configs: rules: references.keywords: quoted_identifiers_policy: aliases core: dialect: tsql test_fail_keyword_as_quoted_identifier_table_alias: fail_str: SELECT [x] FROM [tbl] AS [parameter] configs: rules: references.keywords: quoted_identifiers_policy: aliases core: dialect: tsql test_pass_valid_quoted_identifier_not_alias: # should pass on default config as not alias pass_str: SELECT [parameter] configs: rules: references.keywords: quoted_identifiers_policy: aliases core: dialect: tsql test_fail_keyword_as_quoted_identifier_not_alias_all: fail_str: SELECT [parameter] configs: rules: references.keywords: quoted_identifiers_policy: all core: dialect: tsql test_pass_valid_quoted_identifier_table_alias_column_alias_config: pass_str: SELECT [x] FROM [tbl] AS [parameter] configs: rules: references.keywords: quoted_identifiers_policy: column_aliases core: dialect: tsql test_fail_keyword_as_quoted_identifier_column_alias_config: fail_str: SELECT [x] AS [date] FROM [tbl] AS [parameter] configs: rules: references.keywords: quoted_identifiers_policy: column_aliases core: dialect: tsql test_pass_ignore_word1: pass_str: CREATE TABLE artist(create TEXT) configs: rules: references.keywords: ignore_words: create test_pass_ignore_word2: pass_str: SELECT col1 AS date FROM table1 configs: rules: references.keywords: ignore_words: date test_pass_ignore_words_regex1: pass_str: CREATE TABLE artist(create TEXT) configs: rules: references.keywords: ignore_words_regex: ^cr test_pass_ignore_words_regex2: pass_str: SELECT col1 AS date FROM table1 configs: rules: references.keywords: ignore_words_regex: ^da test_pass_one_character_identifier: pass_str: SELECT d.col1 FROM table1 d configs: core: dialect: snowflake sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/RF05.yml000066400000000000000000000223071451700765000235340ustar00rootroot00000000000000rule: RF05 test_fail_special_chars_create_table_space: fail_str: CREATE TABLE DBO.ColumnNames ( "Internal Space" INT ) test_fail_special_chars_create_table_gt: fail_str: CREATE TABLE DBO.ColumnNames ( "Greater>Than" INT ) test_fail_special_chars_create_table_lt: fail_str: CREATE TABLE DBO.ColumnNames ( "Less 0 as is_fab from fancy_table test_pass_case_cannot_be_reduced_2: pass_str: | select case when fab > 0 then true end as is_fab from fancy_table test_pass_case_cannot_be_reduced_3: pass_str: | select case when fab is not null then false end as is_fab from fancy_table test_pass_case_cannot_be_reduced_4: pass_str: | select case when fab > 0 then true else true end as is_fab from fancy_table test_pass_case_cannot_be_reduced_5: pass_str: | select case when fab <> 0 then 'just a string' end as fab_category from fancy_table test_pass_case_cannot_be_reduced_6: pass_str: | select case when fab <> 0 then true when fab < 0 then 'not a bool' end as fab_category from fancy_table test_pass_case_cannot_be_reduced_7: pass_str: | select foo, case when bar is null then bar else '123' end as test from baz; test_pass_case_cannot_be_reduced_8: pass_str: | select foo, case when bar is not null then '123' else bar end as test from baz; test_pass_case_cannot_be_reduced_9: pass_str: | select foo, case when bar is not null then '123' when foo is not null then '456' else bar end as test from baz; test_pass_case_cannot_be_reduced_10: pass_str: | select foo, case when bar is not null and abs(foo) > 0 then '123' else bar end as test from baz; test_pass_case_cannot_be_reduced_11: pass_str: | SELECT dv_runid, CASE WHEN LEAD(dv_startdateutc) OVER ( PARTITION BY rowid ORDER BY dv_startdateutc ) IS NULL THEN 1 ELSE 0 END AS loadstate FROM d; test_pass_case_cannot_be_reduced_12: pass_str: | select field_1, field_2, field_3, case when coalesce(field_2, field_3) is null then 1 else 0 end as field_4 from my_table; test_pass_case_cannot_be_reduced_13: pass_str: | SELECT CASE WHEN item.submitted_timestamp IS NOT NULL THEN item.sitting_id END configs: core: dialect: postgres test_fail_unnecessary_case_1: fail_str: | select case when fab > 0 then true else false end as is_fab from fancy_table fix_str: | select coalesce(fab > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_2: fail_str: | select case when fab > 0 then false else true end as is_fab from fancy_table fix_str: | select not coalesce(fab > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_3: fail_str: | select case when fab > 0 and tot > 0 then true else false end as is_fab from fancy_table fix_str: | select coalesce(fab > 0 and tot > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_4: fail_str: | select case when fab > 0 and tot > 0 then false else true end as is_fab from fancy_table fix_str: | select not coalesce(fab > 0 and tot > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_5: fail_str: | select case when not fab > 0 or tot > 0 then false else true end as is_fab from fancy_table fix_str: | select not coalesce(not fab > 0 or tot > 0, false) as is_fab from fancy_table test_fail_unnecessary_case_6: fail_str: | select subscriptions_xf.metadata_migrated, case -- BEFORE ST02 FIX when perks.perk is null then false else true end as perk_redeemed, perks.received_at as perk_received_at from subscriptions_xf fix_str: | select subscriptions_xf.metadata_migrated, not coalesce(perks.perk is null, false) as perk_redeemed, perks.received_at as perk_received_at from subscriptions_xf test_fail_unnecessary_case_7: fail_str: | select foo, case when bar is null then '123' else bar end as test from baz; fix_str: | select foo, coalesce(bar, '123') as test from baz; test_fail_unnecessary_case_8: fail_str: | select foo, case when bar is not null then bar else '123' end as test from baz; fix_str: | select foo, coalesce(bar, '123') as test from baz; test_fail_unnecessary_case_9: fail_str: | select foo, case when bar is null then null else bar end as test from baz; fix_str: | select foo, bar as test from baz; test_fail_unnecessary_case_10: fail_str: | select foo, case when bar is not null then bar else null end as test from baz; fix_str: | select foo, bar as test from baz; test_fail_unnecessary_case_11: fail_str: | select foo, case when bar is not null then bar end as test from baz; fix_str: | select foo, bar as test from baz; test_fail_no_copy_code_out_of_template: # The rule wants to replace the case statement with coalesce(), but # LintFix.has_template_conflicts() correctly prevents it copying code out # of the templated region. Hence, the query is not modified. fail_str: | select foo, case when bar is null then {{ result }} else bar end as test from baz; configs: core: ignore_templated_areas: false templater: jinja: context: result: "'123'" sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/ST03.yml000066400000000000000000000156361451700765000235600ustar00rootroot00000000000000rule: ST03 test_pass_no_cte_defined_1: pass_str: select * from t test_pass_cte_defined_and_used_1: pass_str: | with cte as ( select a, b from t ) select * from cte test_pass_cte_defined_and_used_2: pass_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 JOIN cte2 test_pass_cte_defined_and_used_case_insensitive: pass_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 JOIN Cte2 test_fail_cte_defined_but_unused_1: desc: Two CTEs defined but only one used in final query. fail_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM u ) SELECT * FROM cte1 test_fail_cte_defined_but_unused_2: desc: CTE defined but unused in final query even though table alias mimics CTE's name. fail_str: | WITH cte_orders AS ( SELECT customer_id, total FROM orders ) SELECT * FROM orders AS cte_orders test_pass_cte_defined_and_used_3: pass_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM cte1 ) SELECT * FROM cte2 test_fail_cte_defined_but_unused_3: desc: Two CTEs are defined. CTE2 references CTE1 but in final query only CTE1 is used. fail_str: | WITH cte1 AS ( SELECT a FROM t ), cte2 AS ( SELECT b FROM cte1 ) SELECT * FROM cte1 test_pass_no_cte_defined_2: # Issue 915: Crash on statements that don't have a SELECT pass_str: CREATE TABLE my_table (id INTEGER) test_pass_cte_defined_and_used_4: # Issue 944: Detecting use of CTE in nested SELECT pass_str: | WITH max_date_cte AS ( SELECT MAX(row_updated_date) AS max_date FROM warehouse.loaded_monthly ) SELECT stuff FROM warehouse.updated_weekly WHERE row_updated_date <= (SELECT max_date FROM max_date_cte) test_pass_cte_defined_and_used_5: # Variant on test_9, the WHERE with a nested SELECT is in a CTE pass_str: | WITH max_date_cte AS ( SELECT MAX(row_updated_date) AS max_date FROM warehouse.loaded_monthly ), uses_max_date_cte AS ( SELECT stuff FROM warehouse.updated_weekly WHERE row_updated_date <= (SELECT max_date FROM max_date_cte) ) SELECT stuff FROM uses_max_date_cte test_pass_cte_defined_and_used_6: # Issue 963: Infinite recursion when a CTE references itself pass_str: | with pages_xf as ( select pages.received_at from pages where pages.received_at > (select max(received_at) from pages_xf ) ), final as ( select pages_xf.received_at from pages_xf ) select * from final test_fail_cte_defined_but_unused_4: # Variant on test_11 where there *is* an unused CTE fail_str: | with pages_xf as ( select pages.received_at from pages where pages.received_at > (select max(received_at) from pages_xf ) ), final as ( select pages_xf.received_at from pages_xf ), unused as ( select pages.received_at from pages ) select * from final test_pass_cte_defined_and_used_7: # Variant on test_11 where the CTE references itself indirectly pass_str: | with pages_xf as ( select pages.received_at from pages where pages.received_at > (select max(received_at) from final ) ), final as ( select pages_xf.received_at from pages_xf ) select * from final test_snowflake_delete_cte: fail_str: | DELETE FROM MYTABLE1 USING ( WITH MYCTE AS (SELECT COLUMN2 FROM MYTABLE3) SELECT COLUMN3 FROM MYTABLE3 ) X WHERE COLUMN1 = X.COLUMN3 configs: core: dialect: snowflake test_pass_exasol_values_clause_cte_1: pass_str: | WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt configs: core: dialect: exasol test_pass_exasol_values_clause_cte_2: pass_str: | WITH txt AS ( VALUES (1, 'foo') AS t (id, name) ) SELECT * FROM txt configs: core: dialect: exasol test_pass_sparksql_values_clause_cte_1: pass_str: | WITH txt AS ( VALUES (1) AS t (id) ) SELECT * FROM txt configs: core: dialect: sparksql test_pass_sparksql_values_clause_cte_2: pass_str: | WITH txt AS ( VALUES (1, 'foo') AS t (id, name) ) SELECT * FROM txt configs: core: dialect: sparksql test_fail_query_uses_templating: fail_str: | WITH random_gibberish AS ( SELECT 1 ) SELECT var_bar FROM {{ ref('issue_2235') }} test_pass_update_cte: pass_str: | WITH cte AS ( SELECT id, name, description FROM table1 ) UPDATE table2 SET name = cte.name, description = cte.description FROM cte WHERE table2.id = cte.id; configs: core: dialect: postgres test_fail_update_cte: fail_str: | WITH cte AS ( SELECT id, name, description FROM table1 ) UPDATE table2 SET name = 1, description = 2 configs: core: dialect: postgres test_fail_nested_cte: fail_str: | with a as ( with b as ( select 1 from foo ) select 1 ) select * from a test_pass_nested_query: pass_str: | WITH foo AS ( SELECT * FROM zipcode ), bar AS ( SELECT * FROM county ), stage AS ( (SELECT * FROM foo) UNION ALL (SELECT * FROM bar) ) SELECT * FROM stage test_fail_nested_query: fail_str: | WITH foo AS ( SELECT * FROM zipcode ), bar AS ( SELECT * FROM county ), stage AS ( (SELECT * FROM foo) UNION ALL (SELECT * FROM foo) ) SELECT * FROM stage test_pass_nested_query_in_from_clause: pass_str: | WITH foo AS ( SELECT * FROM zipcode ), stage AS ( SELECT * FROM ( SELECT * FROM foo ) ) SELECT * FROM stage test_fail_nested_query_in_from_clause: fail_str: | WITH foo AS ( SELECT * FROM zipcode ), stage AS ( SELECT * FROM ( SELECT * FROM foofoo ) ) SELECT * FROM stage sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/ST04.yml000066400000000000000000000130361451700765000235510ustar00rootroot00000000000000rule: ST04 test_pass_1: # The nested CASE is under a "WHEN", not an "ELSE". pass_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN colour = 'Black' THEN 'Growl' WHEN colour = 'Grey' THEN 'Squeak' END END AS sound FROM mytable test_pass_2: # Issue 3110. The nested CASE is part of a larger expression. Cannot flatten. pass_str: | SELECT CASE 'b' WHEN 'a' THEN TRUE ELSE '2022-01-01'::date > CURRENT_DATE + CASE 'b' WHEN 'b' THEN 8 WHEN 'c' THEN 9 END AND (c > 10) END AS test test_fail_1: # Simple case. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE WHEN species = 'Dog' THEN 'Woof' END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' WHEN species = 'Dog' THEN 'Woof' END AS sound FROM mytable test_fail_2: # The nested "CASE" has two "WHEN" clauses. Getting # reasonable indentation is tricky. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Mouse' THEN 'Squeak' END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Mouse' THEN 'Squeak' END AS sound FROM mytable test_fail_3: fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Mouse' THEN 'Squeak' ELSE "Whaa" END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Mouse' THEN 'Squeak' ELSE "Whaa" END AS sound FROM mytable test_fail_4: fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE ELSE "Whaa" END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE "Whaa" END AS sound FROM mytable test_fail_5: # The nested "CASE" is a one-liner. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE WHEN species = 'Dog' THEN 'Woof' END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' WHEN species = 'Dog' THEN 'Woof' END AS sound FROM mytable test_double_nesting_1: fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN species = 'Dog' THEN 'Woof' ELSE CASE WHEN species = 'Bird' THEN 'tweet' END END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Bird' THEN 'tweet' END END AS sound FROM mytable test_double_nesting_2: # NOTE: This could be simplified more, but the current version of the rule # only unnests "ELSE" statements. To do this safely, it'd have to verify # that the various "WHEN" clauses are mutually exclusive. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN species = 'Dog' THEN 'Woof' ELSE CASE WHEN species = 'Bird' THEN 'tweet' END END ELSE CASE WHEN species = 'Hyena' THEN 'Cackle' END END AS sound FROM mytable fix_str: | SELECT c1, CASE WHEN species = 'Rat' THEN CASE WHEN species = 'Dog' THEN 'Woof' WHEN species = 'Bird' THEN 'tweet' END WHEN species = 'Hyena' THEN 'Cackle' END AS sound FROM mytable test_fail_no_copy_code_out_of_template: # The rule wants to replace the case statement with coalesce(), but # LintFix.has_template_conflicts() correctly prevents it copying code out # of the templated region. Hence, the query is not modified. fail_str: | SELECT c1, CASE WHEN species = 'Rat' THEN 'Squeak' ELSE CASE {{ inner_when }} END END AS sound FROM mytable configs: core: ignore_templated_areas: false templater: jinja: context: inner_when: "WHEN species = 'Dog' THEN 'Woof'" sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/ST05.yml000066400000000000000000000246031451700765000235540ustar00rootroot00000000000000rule: ST05 select_fail: fail_str: | select a.x, a.y, b.z from a join ( select x, z from b ) as b on (a.x = b.x) fix_str: | with b as ( select x, z from b ) select a.x, a.y, b.z from a join b on (a.x = b.x) cte_select_fail: fail_str: | with prep as ( select 1 as x, 2 as z ) select a.x, a.y, b.z from a join ( select x, z from b ) as b on (a.x = b.x) fix_str: | with prep as ( select 1 as x, 2 as z ), b as ( select x, z from b ) select a.x, a.y, b.z from a join b on (a.x = b.x) cte_with_clashing_name: fail_str: | with prep_1 as ( select 1 as x, 2 as z ) select a.x, a.y, z from a join ( select x, z from b ) on a.x = z fix_str: | with prep_1 as ( select 1 as x, 2 as z ), prep_2 as ( select x, z from b ) select a.x, a.y, z from a join prep_2 on a.x = z double_nested_fail: fail_str: | with p_cte as ( select 1 as x, 2 as z UNION ALL select 1 as x, 2 as z ) select a.x, a.y, b.z from a join ( select x, z from ( select x, z from p_cte ) as c ) as b on (a.x = b.x) fix_str: | with p_cte as ( select 1 as x, 2 as z UNION ALL select 1 as x, 2 as z ), c as ( select x, z from p_cte ), b as ( select x, z from c ) select a.x, a.y, b.z from a join b on (a.x = b.x) configs: rules: structure.subquery: forbid_subquery_in: both double_nested_fail_2: fail_str: | select a.x, a.y, b.z from a join ( select x, z from ( select x, z from p_cte ) as b ) as b on (a.x = b.x) fix_str: | with b as ( select x, z from ( select x, z from p_cte ) as b ) select a.x, a.y, b.z from a join b on (a.x = b.x) violations_after_fix: - description: select_statement clauses should not contain subqueries. Use CTEs instead line_no: 2 line_pos: 20 name: structure.subquery configs: rules: structure.subquery: forbid_subquery_in: both unfixable_cte_clash: fail_str: | with "b" as ( select x, z from p_cte ) select a.x, a.y, b.z from a join ( select x, z from ( select 1 ) as b ) as c on (a.x = b.x) fix_str: | with "b" as ( select x, z from p_cte ), c as ( select x, z from ( select 1 ) as b ) select a.x, a.y, b.z from a join c on (a.x = b.x) violations_after_fix: - description: select_statement clauses should not contain subqueries. Use CTEs instead line_no: 5 line_pos: 20 name: structure.subquery configs: rules: structure.subquery: forbid_subquery_in: both with_recursive_fail_no_fix: fail_str: | with recursive p_cte as ( select 1 from tbl_foo ) select a.x, a.y, b.z from a join ( select x, z from p_cte ) as b on a.x = b.x select_multijoin_fail: fail_str: | select a.x, d.x as foo, a.y, b.z from (select a, x from foo) a join d using(x) join ( select x, z from b ) as b using (x) fix_str: | with a as (select a, x from foo), b as ( select x, z from b ) select a.x, d.x as foo, a.y, b.z from a join d using(x) join b using (x) configs: rules: structure.subquery: forbid_subquery_in: both with_fail: fail_str: | select a.x, a.y, b.z from a join ( with d as ( select x, z from b ) select * from d ) using (x) fix_str: | with prep_1 as ( with d as ( select x, z from b ) select * from d ) select a.x, a.y, b.z from a join prep_1 using (x) set_fail: fail_str: | SELECT a.x, a.y, b.z FROM a JOIN ( select x, z from b union select x, z from d ) USING (x) fix_str: | WITH prep_1 AS ( select x, z from b union select x, z from d ) SELECT a.x, a.y, b.z FROM a JOIN prep_1 USING (x) simple_pass: pass_str: | with c as ( select x, z from b ) select a.x, a.y, c.z from a join c on (a.x = c.x) from_clause_pass: pass_str: | select a.x, a.y from ( select * from b ) as a configs: rules: structure.subquery: forbid_subquery_in: join from_clause_fail: fail_str: | select a.x, a.y from ( select * from b ) as a fix_str: | with a as ( select * from b ) select a.x, a.y from a configs: rules: structure.subquery: forbid_subquery_in: from both_clause_fail: fail_str: | select a.x, a.y from ( select * from b ) as a fix_str: | with a as ( select * from b ) select a.x, a.y from a configs: rules: structure.subquery: forbid_subquery_in: both no_inner_from_pass: pass_str: | select a from (select 1 as a) uses_templating: fail_str: | SELECT a_table.id, b_table.id FROM a_table INNER JOIN ( SELECT id, {{"mrgn"}} AS margin FROM b_tbl ) AS b_table ON a_table.some_column = b_table.some_column issue_2898_redshift_attribute_error: fail_str: | INSERT INTO target_table (target_column) SELECT table1.column1 FROM table1 INNER JOIN ( SELECT table2.join_column FROM table2 ) AS temp3 ON table1.join_column = temp3.join_column fix_str: | INSERT INTO target_table (target_column) WITH temp3 AS ( SELECT table2.join_column FROM table2 ) SELECT table1.column1 FROM table1 INNER JOIN temp3 ON table1.join_column = temp3.join_column configs: core: dialect: postgres issue_3623_internal_error_multiple_templated_files: fail_str: | CREATE TABLE #procs WITH (DISTRIBUTION = HASH([eid])) AS WITH proc_icd AS ( SELECT * FROM fbp ) SELECT * FROM ( SELECT * FROM proc_icd ) sub ; CREATE TABLE #tem WITH (DISTRIBUTION = HASH([eid])) AS SELECT * FROM ( SELECT * FROM a ) b ; fix_str: | CREATE TABLE #procs WITH (DISTRIBUTION = HASH([eid])) AS WITH proc_icd AS ( SELECT * FROM fbp ), sub AS ( SELECT * FROM proc_icd ) SELECT * FROM sub ; CREATE TABLE #tem WITH (DISTRIBUTION = HASH([eid])) AS WITH b AS ( SELECT * FROM a ) SELECT * FROM b ; configs: core: dialect: tsql rules: structure.subquery: forbid_subquery_in: both issue_3622_no_space_after_from: fail_str: | CREATE TABLE t AS SELECT col1 FROM( SELECT 'x' AS col1 ) x fix_str: | CREATE TABLE t AS WITH x AS ( SELECT 'x' AS col1 ) SELECT col1 FROM x configs: rules: structure.subquery: forbid_subquery_in: both issue_3617_parentheses_around_ctas_select: fail_str: | CREATE TABLE t AS (SELECT Col1 FROM ( SELECT 'x' AS COl1 ) x ) configs: rules: structure.subquery: forbid_subquery_in: both issue_3572_correlated_subquery_1: pass_str: | select pd.* from person_dates join (select * from events where events.name = person_dates.name) issue_3572_correlated_subquery_2: pass_str: | select pd.* from person_dates as pd join (select * from events as ce where ce.name = pd.name) issue_3572_correlated_subquery_3: pass_str: | select pd.* from person_dates as pd join (select * from events as ce where ce.name = person_dates.name) issue_3598_avoid_looping_1: fail_str: | WITH cte1 AS ( SELECT a FROM (SELECT a) ) SELECT a FROM cte1 fix_str: | WITH prep_1 AS (SELECT a), cte1 AS ( SELECT a FROM prep_1 ) SELECT a FROM cte1 configs: rules: structure.subquery: forbid_subquery_in: both issue_3598_avoid_looping_2: fail_str: | WITH cte1 AS ( SELECT * FROM (SELECT * FROM mongo.temp) ) SELECT * FROM cte1 fix_str: | WITH prep_1 AS (SELECT * FROM mongo.temp), cte1 AS ( SELECT * FROM prep_1 ) SELECT * FROM cte1 configs: rules: structure.subquery: forbid_subquery_in: both test_fail_subquery_in_cte: fail_str: | with b as ( select x, z from ( select x, z from p_cte ) ) select b.z from b fix_str: | with prep_1 as ( select x, z from p_cte ), b as ( select x, z from prep_1 ) select b.z from b configs: rules: structure.subquery: forbid_subquery_in: both test_fail_subquery_in_cte_2: fail_str: | with b as ( select x, y from (select x, y, z from a) ) select x, y from a union all select x, y from b fix_str: | with prep_1 as (select x, y, z from a), b as ( select x, y from prep_1 ) select x, y from a union all select x, y from b configs: rules: structure.subquery: forbid_subquery_in: both test_fail_subquery_in_cte_3: fail_str: | with b as ( select x, y from(select x, y, z from a) ) select x, y from b fix_str: | with prep_1 as (select x, y, z from a), b as ( select x, y from prep_1 ) select x, y from b configs: rules: structure.subquery: forbid_subquery_in: both sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/ST06.yml000066400000000000000000000106121451700765000235500ustar00rootroot00000000000000rule: ST06 test_pass_select_statement_order: pass_str: | select a, cast(b as int) as b, c from x test_fail_select_statement_order_1: fail_str: | select a, row_number() over (partition by id order by date) as y, b from x line_numbers: [1] fix_str: | select a, b, row_number() over (partition by id order by date) as y from x test_fail_select_statement_order_2: fail_str: | select row_number() over (partition by id order by date) as y, *, cast(b as int) as b_int from x line_numbers: [1] fix_str: | select *, cast(b as int) as b_int, row_number() over (partition by id order by date) as y from x test_fail_select_statement_order_3: fail_str: | select row_number() over (partition by id order by date) as y, cast(b as int) as b_int, * from x line_numbers: [1] fix_str: | select *, cast(b as int) as b_int, row_number() over (partition by id order by date) as y from x test_fail_select_statement_order_4: fail_str: | select row_number() over (partition by id order by date) as y, b::int, * from x line_numbers: [1] fix_str: | select *, b::int, row_number() over (partition by id order by date) as y from x test_fail_select_statement_order_5: fail_str: | select row_number() over (partition by id order by date) as y, *, 2::int + 4 as sum, cast(b) as c from x line_numbers: [1] fix_str: | select *, cast(b) as c, row_number() over (partition by id order by date) as y, 2::int + 4 as sum from x test_union_statements_ignored: pass_str: | select a + b as c, d from table_a union all select c, d from table_b test_insert_statements_ignored: pass_str: | INSERT INTO example_schema.example_table (id, example_column, rank_asc, rank_desc) SELECT id, CASE WHEN col_a IN('a', 'b', 'c') THEN col_a END AS example_column, rank_asc, rank_desc FROM another_schema.another_table test_insert_statement_with_cte_ignored: pass_str: | INSERT INTO my_table WITH my_cte AS (SELECT * FROM t1) SELECT MAX(field1), field2 FROM t1 test_merge_statements_ignored: pass_str: | MERGE INTO t USING ( SELECT DATE_TRUNC('DAY', end_time) AS time_day, b FROM u ) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c) test_merge_statement_with_cte_ignored: pass_str: | MERGE INTO t USING ( WITH my_cte AS (SELECT * FROM t1) SELECT MAX(field1), field2 FROM t1 ) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c) test_create_table_as_select_statements_ignored: pass_str: | CREATE TABLE new_table AS ( SELECT id, CASE WHEN col_a IN('a', 'b', 'c') THEN col_a END AS example_column, rank_asc, rank_desc FROM another_schema.another_table ) test_create_table_as_select_with_cte_ignored: pass_str: | CREATE TABLE new_table AS ( WITH my_cte AS (SELECT * FROM t1) SELECT MAX(field1), field2 FROM t1 ) test_fail_fix_explicit_column_references_1: fail_str: | SELECT DATE_TRUNC('DAY', end_time) AS time_day, b_field FROM table_name GROUP BY time_day, b_field fix_str: | SELECT b_field, DATE_TRUNC('DAY', end_time) AS time_day FROM table_name GROUP BY time_day, b_field test_fail_fix_explicit_column_references_2: fail_str: | SELECT SUM(a_field) OVER (ORDER BY 1) AS a_field_window_sum, b_field FROM table_name GROUP BY a_field_window_sum, b_field fix_str: | SELECT b_field, SUM(a_field) OVER (ORDER BY 1) AS a_field_window_sum FROM table_name GROUP BY a_field_window_sum, b_field test_fail_no_fix_implicit_column_references: fail_str: | SELECT DATE_TRUNC('DAY', end_time) AS time_day, b_field FROM table_name GROUP BY 1, 2 sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/ST07.yml000066400000000000000000000043001451700765000235460ustar00rootroot00000000000000rule: ST07 test_pass_specify_join_keys: pass_str: select x.a from x inner join y on x.id = y.id test_fail_specify_join_keys_1: fail_str: select x.a from x inner join y using (id) fix_str: select x.a from x inner join y ON x.id = y.id test_fail_specify_join_keys_1_with_alias: fail_str: select x.a from foo_table AS x inner join y using (id) fix_str: select x.a from foo_table AS x inner join y ON x.id = y.id test_fail_specify_join_keys_1_with_subquery: fail_str: select x.a from (SELECT 1 AS a) AS x inner join y using (id) fix_str: select x.a from (SELECT 1 AS a) AS x inner join y ON x.id = y.id test_fail_specify_join_keys_1_with_multi_using: fail_str: select x.a from x inner join y using (id, a) fix_str: select x.a from x inner join y ON x.id = y.id AND x.a = y.a test_fail_specify_join_keys_2: desc: Keys were specified for first join but not the second one. fail_str: select x.a from x inner join y on x.id = y.id inner join z using (id) test_partial_fixed_up_to_2nd_join: fail_str: | select x.a from x inner join y using(id, foo) inner join z using(id) fix_str: | select x.a from x inner join y ON x.id = y.id AND x.foo = y.foo inner join z using(id) violations_after_fix: - description: Found USING statement. Expected only ON statements. line_no: 4 line_pos: 14 name: structure.using select_using_fail: fail_str: | SELECT * FROM A_TABLE INNER JOIN ( SELECT margin FROM B_TABLE ) USING (SOME_COLUMN) test_fail_parent_child_positioning: # Check for issue from https://github.com/sqlfluff/sqlfluff/issues/3656 fail_str: | select * from c1 join c2 using (ID) join (select * from c3 join c4 using (ID)) as c5 on c1.ID = c5.ID fix_str: | select * from c1 join c2 ON c1.ID = c2.ID join (select * from c3 join c4 ON c3.ID = c4.ID) as c5 on c1.ID = c5.ID fail_but_dont_fix_templated_table_names: fail_str: | SELECT {{ "table_a" }}.field_1, table_b.field_2 FROM {{ "table_a" }} INNER JOIN table_b USING (id) test_pass_clickhouse: pass_str: SELECT * FROM test1 as t1 LEFT SEMI JOIN test2 USING ty1,ty2; configs: core: dialect: clickhouse sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/ST08.yml000066400000000000000000000024711451700765000235560ustar00rootroot00000000000000rule: ST08 test_fail_distinct_with_parenthesis_1: # Check we get fails for using DISTINCT apparently incorrectly fail_str: SELECT DISTINCT(a) fix_str: SELECT DISTINCT a test_fail_distinct_with_parenthesis_2: fail_str: SELECT DISTINCT(a + b) * c fix_str: SELECT DISTINCT (a + b) * c test_fail_distinct_with_parenthesis_3: fail_str: SELECT DISTINCT (a) fix_str: SELECT DISTINCT a test_fail_distinct_with_parenthesis_4: pass_str: SELECT DISTINCT (a + b) * c test_fail_distinct_with_parenthesis_5: fail_str: | SELECT DISTINCT(field_1) FROM my_table fix_str: | SELECT DISTINCT field_1 FROM my_table test_fail_distinct_with_parenthesis_6: fail_str: | SELECT DISTINCT(a), b fix_str: | SELECT DISTINCT a, b test_fail_distinct_with_parenthesis_7: pass_str: | SELECT DISTINCT ON(bcolor) bcolor, fcolor FROM distinct_demo configs: core: dialect: postgres test_pass_no_distinct: pass_str: | SELECT a, b test_fail_distinct_column_inside_count: fail_str: | SELECT COUNT(DISTINCT(unique_key)) fix_str: | SELECT COUNT(DISTINCT unique_key) test_fail_distinct_concat_inside_count: fail_str: | SELECT COUNT(DISTINCT(CONCAT(col1, '-', col2, '-', col3))) fix_str: | SELECT COUNT(DISTINCT CONCAT(col1, '-', col2, '-', col3)) sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/ST09.yml000066400000000000000000000151261451700765000235600ustar00rootroot00000000000000rule: ST09 test_pass_no_join_clauses: pass_str: select * from foo test_pass_no_join_on_conditions: pass_str: select foo.a, bar.b from foo left join bar using (a) test_pass_ignored_subconditions: pass_str: select foo.a, bar.b from foo left join bar on bar.a between foo.a and foo.b test_pass_unqualified_column_reference: pass_str: select foo.a, bar.b from foo left join bar on bar.b = a test_pass_earlier_table_first: pass_str: | select foo.a, bar.b from foo left join bar on foo.a = bar.a test_pass_later_table_first: pass_str: | select foo.a, bar.b from foo left join bar on bar.a = foo.a configs: rules: structure.join_condition_order: preferred_first_table_in_join_clause: later test_fail_earlier_table_first: fail_str: | select foo.a, bar.b from foo left join bar on foo.a = bar.a fix_str: | select foo.a, bar.b from foo left join bar on bar.a = foo.a configs: rules: structure.join_condition_order: preferred_first_table_in_join_clause: later test_fail_later_table_first: fail_str: | select foo.a, bar.b from foo left join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo left join bar on foo.a = bar.a test_fail_later_table_first_left_outer: fail_str: | select foo.a, bar.b from foo left outer join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo left outer join bar on foo.a = bar.a test_fail_later_table_first_inner: fail_str: | select foo.a, bar.b from foo inner join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo inner join bar on foo.a = bar.a test_fail_later_table_first_right: fail_str: | select foo.a, bar.b from foo right join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo right join bar on foo.a = bar.a test_fail_later_table_first_right_outer: fail_str: | select foo.a, bar.b from foo right outer join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo right outer join bar on foo.a = bar.a test_fail_later_table_first_full_outer: fail_str: | select foo.a, bar.b from foo full outer join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from foo full outer join bar on foo.a = bar.a test_pass_later_table_first_cross: pass_str: | select foo.a, bar.b from foo cross join bar where bar.a = foo.a test_fail_later_table_first_multiple_subconditions: fail_str: | select foo.a, foo.b, bar.c from foo left join bar on bar.a = foo.a and bar.b = foo.b fix_str: | select foo.a, foo.b, bar.c from foo left join bar on foo.a = bar.a and foo.b = bar.b test_fail_later_table_first_multiple_comparison_operators: fail_str: | select foo.a, bar.b, baz.c from foo left join bar on bar.a != foo.a and bar.b > foo.b and bar.c <= foo.c left join baz on baz.a <> foo.a and baz.b >= foo.b and baz.c < foo.c fix_str: | select foo.a, bar.b, baz.c from foo left join bar on foo.a != bar.a and foo.b < bar.b and foo.c >= bar.c left join baz on foo.a <> baz.a and foo.b <= baz.b and foo.c > baz.c test_fail_later_table_first_subquery: fail_str: | select foo.a, bar.b from ( select baz.a, qux.b from baz left join qux on qux.a = baz.a ) foo left join bar on bar.a = foo.a fix_str: | select foo.a, bar.b from ( select baz.a, qux.b from baz left join qux on baz.a = qux.a ) foo left join bar on foo.a = bar.a test_fail_later_table_first_cte: fail_str: | with foo as ( select baz.a, qux.b from baz left join qux on qux.a = baz.a ) select foo.a, bar.b from foo left join bar on bar.a = foo.a fix_str: | with foo as ( select baz.a, qux.b from baz left join qux on baz.a = qux.a ) select foo.a, bar.b from foo left join bar on foo.a = bar.a test_fail_later_table_no_join_clause_in_cte: fail_str: | with foo as ( select * from bar ) select foo.a, baz.b from foo left join baz on baz.a = foo.a fix_str: | with foo as ( select * from bar ) select foo.a, baz.b from foo left join baz on foo.a = baz.a test_fail_later_table_no_join_clause_in_main_query: fail_str: | with foo as ( select bar.b from bar left join baz on baz.a = bar.a ) select b from foo fix_str: | with foo as ( select bar.b from bar left join baz on bar.a = baz.a ) select b from foo test_fail_later_table_first_brackets_after_on: fail_str: | select foo.a, bar.b from foo left join bar on (bar.a = foo.a) fix_str: | select foo.a, bar.b from foo left join bar on (foo.a = bar.a) test_fail_later_table_first_brackets_after_from: fail_str: | select foo.a, bar.b from ( foo left join bar on bar.a = foo.a ) fix_str: | select foo.a, bar.b from ( foo left join bar on foo.a = bar.a ) test_fail_later_table_first_quoted_table_and_column: fail_str: | select "foo"."a", "bar"."b" from "foo" left join "bar" on "bar"."a" = "foo"."a" fix_str: | select "foo"."a", "bar"."b" from "foo" left join "bar" on "foo"."a" = "bar"."a" sqlfluff-2.3.5/test/fixtures/rules/std_rule_cases/TQ01.yml000066400000000000000000000026331451700765000235450ustar00rootroot00000000000000rule: TQ01 test_fail_sp_prefix_1: fail_str: | CREATE PROCEDURE dbo.sp_pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_fail_sp_prefix_2: fail_str: | CREATE PROCEDURE dbo.[sp_pull_data] AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_fail_sp_prefix_3: fail_str: | CREATE PROCEDURE dbo."sp_pull_data" AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_pass_non_sp_prefix_1: pass_str: | CREATE PROCEDURE dbo.pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_pass_non_sp_prefix_2: pass_str: | CREATE PROCEDURE dbo.usp_pull_data AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_pass_non_sp_prefix_3: pass_str: | CREATE PROCEDURE dbo.[usp_pull_data] AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql test_pass_non_sp_prefix_4: pass_str: | CREATE PROCEDURE dbo."usp_pull_data" AS SELECT ID, DataDate, CaseOutput FROM table1 configs: core: dialect: tsql sqlfluff-2.3.5/test/fixtures/templater/000077500000000000000000000000001451700765000201755ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_a/000077500000000000000000000000001451700765000215705ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_a/.sqlfluff000066400000000000000000000001121451700765000234050ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] testing_schema=sch1 testing_table=tbl2 sqlfluff-2.3.5/test/fixtures/templater/jinja_a/jinja.sql000066400000000000000000000000701451700765000234010ustar00rootroot00000000000000SELECT 56 FROM {{ testing_schema }}.{{ testing_table }} sqlfluff-2.3.5/test/fixtures/templater/jinja_a/jinja.yml000066400000000000000000000006421451700765000234100ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: 56 from_clause: - keyword: FROM - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch1 - dot: "." - naked_identifier: tbl2 sqlfluff-2.3.5/test/fixtures/templater/jinja_b/000077500000000000000000000000001451700765000215715ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_b/.sqlfluff000066400000000000000000000001771451700765000234210ustar00rootroot00000000000000[sqlfluff:templater:jinja:macros] some_macro = {% macro some_func(something) %}{{something}} + {{something * 2}}{% endmacro %} sqlfluff-2.3.5/test/fixtures/templater/jinja_b/jinja.sql000066400000000000000000000002311451700765000234010ustar00rootroot00000000000000SELECT {% for key, value in (("a", 3), ("b", 7)) %}{{ some_func(value) }} as {{ key }}{% if not loop.last %},{% endif %}{% endfor %} FROM some_table sqlfluff-2.3.5/test/fixtures/templater/jinja_b/jinja.yml000066400000000000000000000025301451700765000234070ustar00rootroot00000000000000# Testing that templating works as expected with macros file: statement: select_statement: - select_clause: - keyword: SELECT - newline: "\n" - whitespace: " " - select_clause_element: - expression: - numeric_literal: 3 - whitespace: " " - binary_operator: + - whitespace: " " - numeric_literal: 6 - whitespace: " " - alias_expression: - keyword: as - whitespace: " " - naked_identifier: a - comma: "," - select_clause_element: - expression: - numeric_literal: 7 - whitespace: " " - binary_operator: + - whitespace: " " - numeric_literal: 14 - whitespace: " " - alias_expression: - keyword: as - whitespace: " " - naked_identifier: b - newline: "\n" - from_clause: - keyword: FROM - whitespace: " " - from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: some_table newline: "\n" sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/000077500000000000000000000000001451700765000224235ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_config.sql000066400000000000000000000000431451700765000271500ustar00rootroot00000000000000{{ config(blah=60) }} SELECT TRUE sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_config.yml000066400000000000000000000002271451700765000271560ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: boolean_literal: 'TRUE' sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_is_incremental.sql000066400000000000000000000001351451700765000307010ustar00rootroot00000000000000SELECT {{ is_incremental() }} FROM t_table1 {% if is_incremental() %} WHERE TRUE {% endif %} sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_is_incremental.yml000066400000000000000000000007051451700765000307060ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: boolean_literal: 'True' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table1 where_clause: - keyword: WHERE - expression: boolean_literal: 'TRUE' sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_ref.sql000066400000000000000000000000471451700765000264630ustar00rootroot00000000000000SELECT col1 FROM {{ ref('my_table') }} sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_ref.yml000066400000000000000000000006011451700765000264610ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_source.sql000066400000000000000000000000611451700765000272030ustar00rootroot00000000000000SELECT col1 FROM {{ source('source', 'table') }} sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_source.yml000066400000000000000000000006051451700765000272110ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: source_table sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.sql000066400000000000000000000004011451700765000266600ustar00rootroot00000000000000{% test my_cool_test(model, column_name, kwarg1=none, kwarg2=none) %} SELECT {{ column_name }} FROM {{ model }} WHERE thing = 1 {% if kwarg1 %} AND otherthing = 2 {% endif %} {% if kwarg2 %} AND anotherthing = 3 {% endif %} {% endtest %} -- no sql produced sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_test.yml000066400000000000000000000000111451700765000266570ustar00rootroot00000000000000file: [] sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_this.sql000066400000000000000000000000341451700765000266520ustar00rootroot00000000000000SELECT col1 FROM {{ this }} sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_this.yml000066400000000000000000000006031451700765000266560ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: this_model sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_var_default.sql000066400000000000000000000000341451700765000301770ustar00rootroot00000000000000SELECT {{ var('foo', 42) }} sqlfluff-2.3.5/test/fixtures/templater/jinja_c_dbt/dbt_builtins_var_default.yml000066400000000000000000000002641451700765000302060ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: item sqlfluff-2.3.5/test/fixtures/templater/jinja_d_roundtrip/000077500000000000000000000000001451700765000237015ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_d_roundtrip/.sqlfluff000066400000000000000000000001311451700765000255170ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] some_field=nothing_interesting my_table=another_table sqlfluff-2.3.5/test/fixtures/templater/jinja_d_roundtrip/test.sql000066400000000000000000000003441451700765000254020ustar00rootroot00000000000000select {{some_field}}, (1+2 ) AS kev, "wrongly indented field" as something_else, trailing_whitespace , 4678.9 from {{my_table}} where indentation = "wrong" AND NotSpacedProperly AND 4+6 > 9 sqlfluff-2.3.5/test/fixtures/templater/jinja_e/000077500000000000000000000000001451700765000215745ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_e/jinja.sql000066400000000000000000000004531451700765000234120ustar00rootroot00000000000000{%- set evens = [] -%} {%- for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] -%} {%- if x % 2 == 0 -%} {%- do evens.append(x) -%} {%- endif -%} {%- endfor -%} select {% for x in evens -%} {{ x }} as {{ 'col' ~ x }} {%- if not loop.last -%}, {% endif %} {% endfor -%} sqlfluff-2.3.5/test/fixtures/templater/jinja_e/jinja.yml000066400000000000000000000016151451700765000234150ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '2' alias_expression: keyword: as naked_identifier: col2 - comma: ',' - select_clause_element: numeric_literal: '4' alias_expression: keyword: as naked_identifier: col4 - comma: ',' - select_clause_element: numeric_literal: '6' alias_expression: keyword: as naked_identifier: col6 - comma: ',' - select_clause_element: numeric_literal: '8' alias_expression: keyword: as naked_identifier: col8 - comma: ',' - select_clause_element: numeric_literal: '10' alias_expression: keyword: as naked_identifier: col10 sqlfluff-2.3.5/test/fixtures/templater/jinja_f/000077500000000000000000000000001451700765000215755ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_f/.sqlfluff000066400000000000000000000002001451700765000234100ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] top_words=['shop', 'products', 'code'] NUM_EMBEDDING_COMPONENTS=2 num_embedding_components=4 sqlfluff-2.3.5/test/fixtures/templater/jinja_f/jinja.sql000066400000000000000000000004541451700765000234140ustar00rootroot00000000000000SELECT job_id {% for var in top_words %} , MAX(CASE WHEN word = '{{var}}' THEN 1 ELSE 0 END) AS {{var}}_word {% endfor %} {% for position in range(NUM_EMBEDDING_COMPONENTS) %} , safe_cast(vector_array[ORDINAL({{position}})] AS FLOAT64) AS v{{position}} {% endfor %} FROM tbl LIMIT 1 sqlfluff-2.3.5/test/fixtures/templater/jinja_f/jinja.yml000066400000000000000000000125541451700765000234220ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: job_id - comma: "," - select_clause_element: function: function_name: function_name_identifier: MAX bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: word comparison_operator: raw_comparison_operator: '=' quoted_literal: "'shop'" - keyword: THEN - expression: numeric_literal: "1" - else_clause: - keyword: ELSE - expression: numeric_literal: "0" - keyword: END end_bracket: ) alias_expression: keyword: AS naked_identifier: shop_word - comma: "," - select_clause_element: function: function_name: function_name_identifier: MAX bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: word comparison_operator: raw_comparison_operator: '=' quoted_literal: "'products'" - keyword: THEN - expression: numeric_literal: "1" - else_clause: - keyword: ELSE - expression: numeric_literal: "0" - keyword: END end_bracket: ) alias_expression: keyword: AS naked_identifier: products_word - comma: "," - select_clause_element: function: function_name: function_name_identifier: MAX bracketed: start_bracket: ( expression: case_expression: - keyword: CASE - when_clause: - keyword: WHEN - expression: column_reference: naked_identifier: word comparison_operator: raw_comparison_operator: '=' quoted_literal: "'code'" - keyword: THEN - expression: numeric_literal: "1" - else_clause: - keyword: ELSE - expression: numeric_literal: "0" - keyword: END end_bracket: ) alias_expression: keyword: AS naked_identifier: code_word - comma: "," - select_clause_element: function: function_name: function_name_identifier: safe_cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: vector_array array_accessor: start_square_bracket: "[" expression: function: function_name: function_name_identifier: ORDINAL bracketed: start_bracket: ( expression: numeric_literal: "0" end_bracket: ) end_square_bracket: "]" keyword: AS data_type: data_type_identifier: FLOAT64 end_bracket: ) alias_expression: keyword: AS naked_identifier: v0 - comma: "," - select_clause_element: function: function_name: function_name_identifier: safe_cast bracketed: start_bracket: ( expression: column_reference: naked_identifier: vector_array array_accessor: start_square_bracket: "[" expression: function: function_name: function_name_identifier: ORDINAL bracketed: start_bracket: ( expression: numeric_literal: "1" end_bracket: ) end_square_bracket: "]" keyword: AS data_type: data_type_identifier: FLOAT64 end_bracket: ) alias_expression: keyword: AS naked_identifier: v1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: tbl limit_clause: keyword: LIMIT numeric_literal: "1" sqlfluff-2.3.5/test/fixtures/templater/jinja_g_macros/000077500000000000000000000000001451700765000231425ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_g_macros/.sqlfluff000066400000000000000000000003261451700765000247660ustar00rootroot00000000000000[sqlfluff] dialect=postgres [sqlfluff:templater:jinja] load_macros_from_path=macros [sqlfluff:templater:jinja:context] top_words=['shop', 'products', 'code'] NUM_EMBEDDING_COMPONENTS=2 num_embedding_components=4 sqlfluff-2.3.5/test/fixtures/templater/jinja_g_macros/jinja.sql000066400000000000000000000004251451700765000247570ustar00rootroot00000000000000{{ config( materialized = "incremental", unique_key = 'id' ) }} -- Test macro loading from folder. select distinct on (id) (json -> 'type' ->> 'id')::int as id, (json -> 'type' ->> 'name') as name from {{ sb_incremental(this, 'sb_route_events') }} as e sqlfluff-2.3.5/test/fixtures/templater/jinja_g_macros/jinja.yml000066400000000000000000000073171451700765000247700ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_modifier: - keyword: distinct - keyword: 'on' - bracketed: - start_bracket: ( - expression: column_reference: naked_identifier: id - end_bracket: ) - select_clause_element: expression: cast_expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: json - binary_operator: -> - quoted_literal: "'type'" - binary_operator: ->> - quoted_literal: "'id'" end_bracket: ) casting_operator: '::' data_type: keyword: int alias_expression: keyword: as naked_identifier: id - comma: ',' - select_clause_element: expression: bracketed: start_bracket: ( expression: - column_reference: naked_identifier: json - binary_operator: -> - quoted_literal: "'type'" - binary_operator: ->> - quoted_literal: "'name'" end_bracket: ) alias_expression: keyword: as naked_identifier: name from_clause: keyword: from from_expression: from_expression_element: table_expression: bracketed: start_bracket: ( select_statement: select_clause: keyword: select select_clause_element: wildcard_expression: wildcard_identifier: star: '*' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: sb_route_events alias_expression: keyword: as naked_identifier: s where_clause: keyword: where bracketed: start_bracket: ( expression: - column_reference: - naked_identifier: s - dot: . - naked_identifier: match_id - keyword: not - keyword: in - bracketed: - start_bracket: ( - select_statement: select_clause: keyword: select select_clause_modifier: keyword: distinct select_clause_element: column_reference: naked_identifier: match_id from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: this_model - end_bracket: ) end_bracket: ) end_bracket: ) alias_expression: keyword: as naked_identifier: e sqlfluff-2.3.5/test/fixtures/templater/jinja_g_macros/macros/000077500000000000000000000000001451700765000244265ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_g_macros/macros/macro_1.sql000066400000000000000000000005261451700765000264730ustar00rootroot00000000000000{% macro sb_incremental(tbl, source, tbl_id='match_id', source_id='match_id') %} {% if is_incremental() %} ( select * from {{ source }} as s where ( s.{{ source_id }} not in (select distinct {{ tbl_id }} from {{ tbl.name }}) ) ) {% else %} {{ source }} {% endif %} {% endmacro %} sqlfluff-2.3.5/test/fixtures/templater/jinja_h_macros/000077500000000000000000000000001451700765000231435ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_h_macros/.sqlfluff000066400000000000000000000000701451700765000247630ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros sqlfluff-2.3.5/test/fixtures/templater/jinja_h_macros/jinja.sql000066400000000000000000000001761451700765000247630ustar00rootroot00000000000000-- Spacing errors inside and outside of the macro. -- This test make select 1 + 2 + {{ bad_macro() }} + 999+101 from my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_h_macros/jinja.yml000066400000000000000000000014511451700765000247620ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: expression: - numeric_literal: 1 - binary_operator: + - numeric_literal: 2 - binary_operator: + - numeric_literal: 5 - binary_operator: + - numeric_literal: 6 - binary_operator: + - numeric_literal: 7 - binary_operator: + - numeric_literal: 999 - binary_operator: + - numeric_literal: 101 from_clause: - keyword: from - from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_h_macros/macros/000077500000000000000000000000001451700765000244275ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_h_macros/macros/bad_macro.sql000066400000000000000000000000531451700765000270550ustar00rootroot00000000000000{% macro bad_macro() %}5+6+7{% endmacro %} sqlfluff-2.3.5/test/fixtures/templater/jinja_i_raw/000077500000000000000000000000001451700765000224515ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_i_raw/raw_tag.sql000066400000000000000000000002011451700765000246070ustar00rootroot00000000000000SELECT col1, {% raw %} col2, '{{ a_tag_which_should_be_treated_as_raw }}' as col3 {% endraw %} FROM my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_i_raw/raw_tag.yml000066400000000000000000000013041451700765000246160ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 - comma: ',' - select_clause_element: column_reference: naked_identifier: col2 - comma: ',' - select_clause_element: quoted_literal: "'{{ a_tag_which_should_be_treated_as_raw }}'" alias_expression: keyword: as naked_identifier: col3 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_i_raw/raw_tag_2.sql000066400000000000000000000004401451700765000250350ustar00rootroot00000000000000-- Example from https://github.com/sqlfluff/sqlfluff/pull/737 SELECT {% raw %} lower(note_text) NOT LIKE '%daycare: {%' AND lower(note_text) NOT LIKE '%grade/ school name: {%' AND lower(note_text) NOT LIKE '%social history: {%' {% endraw %} AS foo FROM my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_i_raw/raw_tag_2.yml000066400000000000000000000032501451700765000250410ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: expression: - function: function_name: function_name_identifier: lower bracketed: start_bracket: ( expression: column_reference: naked_identifier: note_text end_bracket: ) - keyword: NOT - keyword: LIKE - quoted_literal: "'%daycare: {%'" - binary_operator: AND - function: function_name: function_name_identifier: lower bracketed: start_bracket: ( expression: column_reference: naked_identifier: note_text end_bracket: ) - keyword: NOT - keyword: LIKE - quoted_literal: "'%grade/ school name: {%'" - binary_operator: AND - function: function_name: function_name_identifier: lower bracketed: start_bracket: ( expression: column_reference: naked_identifier: note_text end_bracket: ) - keyword: NOT - keyword: LIKE - quoted_literal: "'%social history: {%'" alias_expression: keyword: AS naked_identifier: foo from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_j_libraries/000077500000000000000000000000001451700765000236355ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_j_libraries/.sqlfluff000066400000000000000000000000551451700765000254600ustar00rootroot00000000000000[sqlfluff:templater:jinja] library_path=libs sqlfluff-2.3.5/test/fixtures/templater/jinja_j_libraries/jinja.sql000066400000000000000000000001271451700765000254510ustar00rootroot00000000000000SELECT 56 FROM {{ foo.schema }}.{{ foo.table("xyz") }} WHERE {{ bar.equals("x", 23) }} sqlfluff-2.3.5/test/fixtures/templater/jinja_j_libraries/jinja.yml000066400000000000000000000012021451700765000254460ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '56' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: foo_xyz where_clause: keyword: WHERE expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '23' sqlfluff-2.3.5/test/fixtures/templater/jinja_j_libraries/libs/000077500000000000000000000000001451700765000245665ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_j_libraries/libs/bar.py000066400000000000000000000002331451700765000257020ustar00rootroot00000000000000"""Module used to test bar within the jinja template.""" def equals(col, val): """Return a string that has col = val.""" return f"{col} = {val}" sqlfluff-2.3.5/test/fixtures/templater/jinja_j_libraries/libs/foo.py000066400000000000000000000002561451700765000257260ustar00rootroot00000000000000"""Module used to test foo within the jinja template.""" schema = "sch1" def table(name): """Return the parameter with foo_ in front of it.""" return f"foo_{name}" sqlfluff-2.3.5/test/fixtures/templater/jinja_j_libraries/libs/not_python.txt000066400000000000000000000000261451700765000275260ustar00rootroot00000000000000I am just a text file sqlfluff-2.3.5/test/fixtures/templater/jinja_k_config_override_path_macros/000077500000000000000000000000001451700765000274065ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_k_config_override_path_macros/.sqlfluff000066400000000000000000000002751451700765000312350ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros [sqlfluff:templater:jinja:macros] foo2_def={%- macro foo2() -%}202{%- endmacro -%} foo3_def={%- macro foo3() -%}203{%- endmacro -%} sqlfluff-2.3.5/test/fixtures/templater/jinja_k_config_override_path_macros/jinja.sql000066400000000000000000000001121451700765000312140ustar00rootroot00000000000000select {{ foo1() }}, {{ foo2() }}, {{ foo3() }} from my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_k_config_override_path_macros/jinja.yml000066400000000000000000000010111451700765000312150ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '101' - comma: ',' - select_clause_element: numeric_literal: '202' - comma: ',' - select_clause_element: numeric_literal: '203' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_k_config_override_path_macros/macros/000077500000000000000000000000001451700765000306725ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_k_config_override_path_macros/macros/foo.sql000066400000000000000000000001211451700765000321700ustar00rootroot00000000000000{%- macro foo1() -%}101{%- endmacro -%} {%- macro foo2() -%}102{%- endmacro -%} sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/000077500000000000000000000000001451700765000227745ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/.sqlfluff000066400000000000000000000001331451700765000246140ustar00rootroot00000000000000[sqlfluff:templater:jinja:context] actions=['a', 'b', 'c'] states="foo\n ,bar" metric=open sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/001.sql000066400000000000000000000003011451700765000240070ustar00rootroot00000000000000{% set some_condition %}TRUE{% endset %} WITH cust AS (SELECT SNAPSHOT_DATE FROM DATAHUB.SNAPSHOT_DAILY WHERE {{some_condition}} ) SELECT DISTINCT cust.SNAPSHOT_DATE FROM custsqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/001.yml000066400000000000000000000047171451700765000240300ustar00rootroot00000000000000file: - placeholder: '{% set some_condition %}' - indent: '' - placeholder: 'TRUE' - dedent: '' - placeholder: '{% endset %}' - newline: "\n" - newline: "\n" - statement: with_compound_statement: - keyword: WITH - whitespace: ' ' - common_table_expression: - naked_identifier: cust - whitespace: ' ' - keyword: AS - newline: "\n" - whitespace: ' ' - bracketed: - start_bracket: ( - indent: '' - select_statement: - select_clause: - keyword: SELECT - indent: '' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: SNAPSHOT_DATE - dedent: '' - newline: "\n" - whitespace: ' ' - from_clause: - keyword: FROM - whitespace: ' ' - from_expression: indent: '' from_expression_element: table_expression: table_reference: - naked_identifier: DATAHUB - dot: . - naked_identifier: SNAPSHOT_DAILY dedent: '' - newline: "\n" - whitespace: ' ' - where_clause: keyword: WHERE indent: '' whitespace: ' ' expression: boolean_literal: 'TRUE' dedent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - end_bracket: ) - newline: "\n" - newline: "\n" - select_statement: select_clause: - keyword: SELECT - whitespace: ' ' - select_clause_modifier: keyword: DISTINCT - indent: '' - whitespace: ' ' - select_clause_element: column_reference: - naked_identifier: cust - dot: . - naked_identifier: SNAPSHOT_DATE - dedent: '' newline: "\n" from_clause: keyword: FROM whitespace: ' ' from_expression: indent: '' from_expression_element: table_expression: table_reference: naked_identifier: cust dedent: '' - end_of_file: '' sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/002.sql000066400000000000000000000007111451700765000240150ustar00rootroot00000000000000SELECT {{ " c2\n" }} AS other_id, {{ states }} {% for action in actions %} , {{metric}}_{{action}} , campaign_count_{{action}} {% endfor %} FROM {% for action in actions %} {% if loop.first %} {{action}}_raw_effect_sizes {% else %} JOIN {{action}}_raw_effect_sizes USING ({{ states }}) {% endif %} {% endfor %} CROSS JOIN action_states sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/002.yml000066400000000000000000000150571451700765000240300ustar00rootroot00000000000000file: statement: select_statement: - select_clause: - keyword: SELECT - indent: '' - newline: "\n" # NB: We end up with double whitespace here # because one is literal and one is templated. - whitespace: ' ' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: c2 newline: "\n" whitespace: ' ' alias_expression: indent: '' keyword: AS whitespace: ' ' naked_identifier: other_id dedent: '' - comma: ',' - newline: "\n" - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: foo - newline: "\n" - whitespace: ' ' - comma: ',' - select_clause_element: column_reference: naked_identifier: bar - newline: "\n" - whitespace: ' ' - placeholder: '{% for action in actions %}' - indent: '' - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: open_a - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: campaign_count_a - newline: "\n" - whitespace: ' ' - dedent: '' - template_loop: '' - indent: '' - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: open_b - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: campaign_count_b - newline: "\n" - whitespace: ' ' - dedent: '' - template_loop: '' - indent: '' - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: open_c - newline: "\n" - whitespace: ' ' - comma: ',' - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: campaign_count_c - dedent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% endfor %}' - newline: "\n" - from_clause: - keyword: FROM - newline: "\n" - whitespace: ' ' - placeholder: '{% for action in actions %}' - indent: '' - newline: "\n" - whitespace: ' ' - placeholder: '{% if loop.first %}' - indent: '' - newline: "\n" - whitespace: ' ' - from_expression: - indent: '' - from_expression_element: table_expression: table_reference: naked_identifier: a_raw_effect_sizes - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% else %}' - indent: '' - placeholder: '... [103 unused template characters] ...' - dedent: '' - placeholder: '{% endif %}' - newline: "\n" - whitespace: ' ' - dedent: '' - template_loop: '' - indent: '' - newline: "\n" - whitespace: ' ' - placeholder: '{% if loop.first %}' - indent: '' - placeholder: '... [49 unused template characters] ...' - dedent: '' - placeholder: '{% else %}' - indent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - join_clause: - keyword: JOIN - indent: '' - newline: "\n" - whitespace: ' ' - from_expression_element: table_expression: table_reference: naked_identifier: b_raw_effect_sizes - newline: "\n" - whitespace: ' ' - dedent: '' - indent: '' - keyword: USING - indent: '' - newline: "\n" - whitespace: ' ' - bracketed: - start_bracket: ( - indent: '' - naked_identifier: foo - newline: "\n" - whitespace: ' ' - comma: ',' - naked_identifier: bar - dedent: '' - end_bracket: ) - dedent: '' - dedent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% endif %}' - newline: "\n" - whitespace: ' ' - dedent: '' - template_loop: '' - indent: '' - newline: "\n" - whitespace: ' ' - placeholder: '{% if loop.first %}' - indent: '' - placeholder: '... [49 unused template characters] ...' - dedent: '' - placeholder: '{% else %}' - indent: '' - newline: "\n" - whitespace: ' ' - join_clause: - keyword: JOIN - indent: '' - newline: "\n" - whitespace: ' ' - from_expression_element: table_expression: table_reference: naked_identifier: c_raw_effect_sizes - newline: "\n" - whitespace: ' ' - dedent: '' - indent: '' - keyword: USING - indent: '' - newline: "\n" - whitespace: ' ' - bracketed: - start_bracket: ( - indent: '' - naked_identifier: foo - newline: "\n" - whitespace: ' ' - comma: ',' - naked_identifier: bar - dedent: '' - end_bracket: ) - dedent: '' - dedent: '' - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% endif %}' - newline: "\n" - whitespace: ' ' - dedent: '' - placeholder: '{% endfor %}' - newline: "\n" - join_clause: - keyword: CROSS - whitespace: ' ' - keyword: JOIN - indent: '' - whitespace: ' ' - from_expression_element: table_expression: table_reference: naked_identifier: action_states - dedent: '' newline: "\n" end_of_file: '' sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/003.sql000066400000000000000000000000611451700765000240140ustar00rootroot00000000000000select 1 {% if false %} + 2 {% endif %}sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/003.yml000066400000000000000000000007221451700765000240220ustar00rootroot00000000000000file: - statement: select_statement: select_clause: keyword: select indent: "" newline: "\n" whitespace: ' ' select_clause_element: numeric_literal: '1' dedent: "" - newline: "\n" - whitespace: ' ' - placeholder: '{% if false %}' - indent: "" - placeholder: '... [11 unused template characters] ...' - dedent: "" - placeholder: '{% endif %}' - end_of_file: "" sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/004.sql000066400000000000000000000000601451700765000240140ustar00rootroot00000000000000select 1 {% if true %} + 2 {% endif %}sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/004.yml000066400000000000000000000011541451700765000240230ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: select indent: "" newline: "\n" whitespace: ' ' select_clause_element: expression: - numeric_literal: '1' - newline: "\n" - whitespace: ' ' - placeholder: '{% if true %}' - indent: '' - newline: "\n" - whitespace: ' ' - binary_operator: + - whitespace: ' ' - numeric_literal: '2' dedent: "" newline: "\n" whitespace: ' ' dedent: '' placeholder: '{% endif %}' end_of_file: "" sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/005.sql000066400000000000000000000001001451700765000240100ustar00rootroot00000000000000select 0, {% for i in [1, 2, 3] %} i, {% endfor %} 4sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/005.yml000066400000000000000000000024161451700765000240260ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: numeric_literal: '0' - comma: ',' - newline: "\n" - whitespace: ' ' - placeholder: '{% for i in [1, 2, 3] %}' - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: i - comma: ',' - newline: "\n" - whitespace: ' ' - dedent: "" - template_loop: "" - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: i - comma: ',' - newline: "\n" - whitespace: ' ' - dedent: "" - template_loop: "" - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: column_reference: naked_identifier: i - comma: ',' - newline: "\n" - whitespace: ' ' - dedent: "" - placeholder: '{% endfor %}' - newline: "\n" - whitespace: ' ' - select_clause_element: numeric_literal: '4' - dedent: "" end_of_file: "" sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/006.sql000066400000000000000000000000501451700765000240150ustar00rootroot00000000000000{% if true %} SELECT 1 + 1 {%- endif %} sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/006.yml000066400000000000000000000010531451700765000240230ustar00rootroot00000000000000file: - placeholder: '{% if true %}' - indent: "" - newline: "\n" - statement: select_statement: select_clause: keyword: SELECT indent: "" whitespace: ' ' select_clause_element: expression: - numeric_literal: '1' - whitespace: ' ' - binary_operator: + - whitespace: ' ' - numeric_literal: '1' dedent: "" - placeholder: "\n" - dedent: "" - placeholder: "{%- endif %}" - newline: "\n" - end_of_file: "" sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/007.sql000066400000000000000000000001131451700765000240160ustar00rootroot00000000000000SELECT 1 {{ " + 2" if false }} FROM {%+if true-%} {{ref('foo')}} {%-endif%}sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/007.yml000066400000000000000000000013221451700765000240230ustar00rootroot00000000000000file: - statement: select_statement: - select_clause: keyword: SELECT indent: "" whitespace: ' ' select_clause_element: numeric_literal: '1' dedent: "" - whitespace: ' ' - placeholder: '{{ " + 2" if false }}' - whitespace: ' ' - from_clause: - keyword: FROM - whitespace: ' ' - placeholder: '{%+if true-%}' - indent: "" - placeholder: ' ' - from_expression: indent: "" from_expression_element: table_expression: table_reference: naked_identifier: foo dedent: "" - placeholder: ' ' - dedent: "" - placeholder: '{%-endif%}' - end_of_file: "" sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/008.sql000066400000000000000000000001441451700765000240230ustar00rootroot00000000000000{% for item in [1,2] -%} SELECT * FROM some_table {{ "UNION ALL\n" if not loop.last }} {%- endfor %}sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/008.yml000066400000000000000000000034021451700765000240250ustar00rootroot00000000000000file: - placeholder: "{% for item in [1,2] -%}" - indent: "" - placeholder: "\n" - statement: set_expression: - select_statement: select_clause: keyword: SELECT indent: "" whitespace: ' ' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' dedent: "" newline: "\n" from_clause: keyword: FROM whitespace: ' ' from_expression: indent: "" from_expression_element: table_expression: table_reference: naked_identifier: some_table dedent: "" - newline: "\n" - set_operator: - keyword: UNION - whitespace: ' ' - keyword: ALL - newline: "\n" - placeholder: "\n" - dedent: "" - template_loop: "" - indent: "" - placeholder: "\n" - select_statement: select_clause: keyword: SELECT indent: "" whitespace: ' ' select_clause_element: wildcard_expression: wildcard_identifier: star: '*' dedent: "" newline: "\n" from_clause: keyword: FROM whitespace: ' ' from_expression: indent: "" from_expression_element: table_expression: table_reference: naked_identifier: some_table dedent: "" - newline: "\n" - placeholder: '{{ "UNION ALL\n" if not loop.last }}' - placeholder: "\n" - dedent: "" - placeholder: '{%- endfor %}' - end_of_file: "" sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/009.sql000066400000000000000000000000751451700765000240270ustar00rootroot00000000000000SELECT 1 {% if true %} ,2 FROM a {% endif %} LIMIT 1 sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/009.yml000066400000000000000000000017311451700765000240310ustar00rootroot00000000000000file: statement: select_statement: - select_clause: - keyword: SELECT - indent: "" - newline: "\n" - whitespace: ' ' - select_clause_element: numeric_literal: '1' - newline: "\n" - placeholder: '{% if true %}' - indent: "" - newline: "\n" - whitespace: ' ' - comma: ',' - select_clause_element: numeric_literal: '2' - dedent: "" - newline: "\n" - from_clause: - keyword: FROM - whitespace: ' ' - from_expression: indent: "" from_expression_element: table_expression: table_reference: naked_identifier: a dedent: "" - newline: "\n" - dedent: "" - placeholder: '{% endif %}' - newline: "\n" - limit_clause: keyword: LIMIT indent: "" whitespace: ' ' numeric_literal: '1' dedent: "" newline: "\n" end_of_file: "" sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/010.sql000066400000000000000000000001121451700765000240070ustar00rootroot00000000000000{% macro test_macro() %} SELECT 2; {% endmacro %} {{ test_macro() }} sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/010.yml000066400000000000000000000010261451700765000240160ustar00rootroot00000000000000file: - placeholder: '{% macro test_macro() %}' - indent: '' - placeholder: "\n SELECT 2;\n" - dedent: '' - placeholder: '{% endmacro %}' - newline: "\n" - newline: "\n" - newline: "\n" - whitespace: ' ' - statement: select_statement: select_clause: keyword: SELECT indent: '' whitespace: ' ' select_clause_element: numeric_literal: '2' dedent: '' - statement_terminator: ; - newline: "\n" - newline: "\n" - end_of_file: '' sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/011.sql000066400000000000000000000000651451700765000240170ustar00rootroot00000000000000{% macro test_macro() %} SELECT 2; {% endmacro %}sqlfluff-2.3.5/test/fixtures/templater/jinja_l_metas/011.yml000066400000000000000000000002531451700765000240200ustar00rootroot00000000000000file: - placeholder: '{% macro test_macro() %}' - indent: '' - placeholder: "\n SELECT 2;\n" - dedent: '' - placeholder: '{% endmacro %}' - end_of_file: '' sqlfluff-2.3.5/test/fixtures/templater/jinja_lint_unreached_code/000077500000000000000000000000001451700765000253265ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_lint_unreached_code/if_elif_else.sql000066400000000000000000000001201451700765000304450ustar00rootroot00000000000000{% if True %} SELECT 1 {% elif True %} SELECT 2 {% else %} SELECT 3 {% endif %} sqlfluff-2.3.5/test/fixtures/templater/jinja_lint_unreached_code/if_elif_else_chain_scoring.sql000066400000000000000000000004121451700765000333370ustar00rootroot00000000000000{% if True %} SELECT 1 {% elif True %} SELECT 10 {% elif True %} SELECT 100 {% elif True %} SELECT 1000 {% elif True %} SELECT 10000 {% elif True %} SELECT 100000 {% elif True %} SELECT 1000000 {% elif True %} SELECT 10000000 {% else %} SELECT 100000000 {% endif %} sqlfluff-2.3.5/test/fixtures/templater/jinja_lint_unreached_code/if_else_if_nested.sql000066400000000000000000000001451451700765000314750ustar00rootroot00000000000000{% if True %} SELECT 1 {% else %} {% if True %} SELECT 2 {% else %} SELECT 3 {% endif %} {% endif %} sqlfluff-2.3.5/test/fixtures/templater/jinja_lint_unreached_code/if_true_elif_type_error_else.sql000066400000000000000000000001341451700765000337630ustar00rootroot00000000000000{% if True %} SELECT 1 {% elif True %} SELECT {{ 1 + "2" }} {% else %} SELECT 2 {% endif %} sqlfluff-2.3.5/test/fixtures/templater/jinja_lint_unreached_code/simple_if_false.sql000066400000000000000000000000701451700765000311650ustar00rootroot00000000000000{% if False %} SELECT 1 {% else %} SELECT 2 {% endif %} sqlfluff-2.3.5/test/fixtures/templater/jinja_lint_unreached_code/simple_if_true.sql000066400000000000000000000000671451700765000310600ustar00rootroot00000000000000{% if True %} SELECT 1 {% else %} SELECT 2 {% endif %} sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/000077500000000000000000000000001451700765000252055ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/.sqlfluff000066400000000000000000000001531451700765000270270ustar00rootroot00000000000000[sqlfluff] # Test setting the library_path via the global setting (not via jinja config) library_path=libs sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/jinja.sql000066400000000000000000000001761451700765000270250ustar00rootroot00000000000000SELECT 56 FROM {{ foo.schema }}.{{ foo.table("xyz") }} WHERE {{ foo.bar.baz.equals("x", 23) }} and {{ root_equals("y", 42) }} sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/jinja.yml000066400000000000000000000015331451700765000270250ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '56' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: foo_xyz where_clause: keyword: WHERE expression: - column_reference: naked_identifier: x - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '23' - binary_operator: and - column_reference: naked_identifier: y - comparison_operator: raw_comparison_operator: '=' - numeric_literal: '42' sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/libs/000077500000000000000000000000001451700765000261365ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/libs/__init__.py000066400000000000000000000002711451700765000302470ustar00rootroot00000000000000"""Module used to test __init__.py within the jinja template.""" def root_equals(col: str, val: str) -> str: """Return a string that has col = val.""" return f"{col} = {val}" sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/libs/foo/000077500000000000000000000000001451700765000267215ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/libs/foo/__init__.py000066400000000000000000000002561451700765000310350ustar00rootroot00000000000000"""Module used to test foo within the jinja template.""" schema = "sch1" def table(name): """Return the parameter with foo_ in front of it.""" return f"foo_{name}" sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/libs/foo/bar/000077500000000000000000000000001451700765000274655ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/libs/foo/bar/__init__.py000066400000000000000000000000561451700765000315770ustar00rootroot00000000000000"""Module used to create module hierarchy.""" sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/libs/foo/bar/baz.py000066400000000000000000000002331451700765000306110ustar00rootroot00000000000000"""Module used to test bar within the jinja template.""" def equals(col, val): """Return a string that has col = val.""" return f"{col} = {val}" sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/libs/not_python.txt000066400000000000000000000000261451700765000310760ustar00rootroot00000000000000I am just a text file sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/other/000077500000000000000000000000001451700765000263265ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_m_libraries_module/other/__init__.py000066400000000000000000000001331451700765000304340ustar00rootroot00000000000000"""Module that should not be loaded.""" raise Exception("this file should not be loaded") sqlfluff-2.3.5/test/fixtures/templater/jinja_macro_path_does_not_exist/000077500000000000000000000000001451700765000265735ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_macro_path_does_not_exist/.sqlfluff000066400000000000000000000001151451700765000304130ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=nonexistent_macro_directory sqlfluff-2.3.5/test/fixtures/templater/jinja_n_nested_macros/000077500000000000000000000000001451700765000245135ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_n_nested_macros/.sqlfluff000066400000000000000000000002751451700765000263420ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros [sqlfluff:templater:jinja:macros] foo2_def={%- macro foo2() -%}202{%- endmacro -%} foo3_def={%- macro foo3() -%}203{%- endmacro -%} sqlfluff-2.3.5/test/fixtures/templater/jinja_n_nested_macros/jinja.sql000066400000000000000000000000621451700765000263250ustar00rootroot00000000000000select {{ school_year_start_date('2021-05-01') }} sqlfluff-2.3.5/test/fixtures/templater/jinja_n_nested_macros/jinja.yml000066400000000000000000000230341451700765000263330ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: select select_clause_element: function: function_name: function_name_identifier: date_trunc bracketed: - start_bracket: ( - expression: quoted_literal: "'week'" - comma: ',' - expression: function: function_name: function_name_identifier: TO_DATE bracketed: - start_bracket: ( - expression: quoted_literal: "'01 July'" binary_operator: - pipe: '|' - pipe: '|' case_expression: - keyword: case - when_clause: - keyword: when - expression: - function: function_name: function_name_identifier: TO_DATE bracketed: - start_bracket: ( - expression: quoted_literal: "'01 July'" binary_operator: - pipe: '|' - pipe: '|' function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: year keyword: from expression: function: function_name: function_name_identifier: date_trunc bracketed: - start_bracket: ( - expression: quoted_literal: "'week'" - comma: ',' - expression: function: function_name: function_name_identifier: CONVERT_TIMEZONE bracketed: - start_bracket: ( - expression: quoted_literal: "'UTC'" - comma: ',' - expression: quoted_literal: "'America/New_York'" - comma: ',' - expression: - numeric_literal: '2021' - binary_operator: '-' - numeric_literal: '05' - binary_operator: '-' - numeric_literal: '01' - end_bracket: ) - end_bracket: ) end_bracket: ) - comma: ',' - expression: quoted_literal: "'DD Mon YYYY'" - end_bracket: ) - comparison_operator: - raw_comparison_operator: < - raw_comparison_operator: '=' - function: function_name: function_name_identifier: CONVERT_TIMEZONE bracketed: - start_bracket: ( - expression: quoted_literal: "'UTC'" - comma: ',' - expression: quoted_literal: "'America/New_York'" - comma: ',' - expression: - numeric_literal: '2021' - binary_operator: '-' - numeric_literal: '05' - binary_operator: '-' - numeric_literal: '01' - end_bracket: ) - keyword: then - expression: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: year keyword: from expression: function: function_name: function_name_identifier: date_trunc bracketed: - start_bracket: ( - expression: quoted_literal: "'week'" - comma: ',' - expression: function: function_name: function_name_identifier: CONVERT_TIMEZONE bracketed: - start_bracket: ( - expression: quoted_literal: "'UTC'" - comma: ',' - expression: quoted_literal: "'America/New_York'" - comma: ',' - expression: - numeric_literal: '2021' - binary_operator: '-' - numeric_literal: '05' - binary_operator: '-' - numeric_literal: '01' - end_bracket: ) - end_bracket: ) end_bracket: ) - else_clause: keyword: else expression: function: function_name: function_name_identifier: extract bracketed: start_bracket: ( date_part: year keyword: from expression: function: function_name: function_name_identifier: date_trunc bracketed: - start_bracket: ( - expression: quoted_literal: "'week'" - comma: ',' - expression: function: function_name: function_name_identifier: CONVERT_TIMEZONE bracketed: - start_bracket: ( - expression: quoted_literal: "'UTC'" - comma: ',' - expression: quoted_literal: "'America/New_York'" - comma: ',' - expression: - numeric_literal: '2021' - binary_operator: '-' - numeric_literal: '05' - binary_operator: '-' - numeric_literal: '01' - end_bracket: ) - end_bracket: ) end_bracket: ) binary_operator: '-' numeric_literal: '1' - keyword: end - comma: ',' - expression: quoted_literal: "'DD Mon YYYY'" - end_bracket: ) - end_bracket: ) sqlfluff-2.3.5/test/fixtures/templater/jinja_n_nested_macros/macros/000077500000000000000000000000001451700765000257775ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_n_nested_macros/macros/school_year_start_date.sql000066400000000000000000000013751451700765000332470ustar00rootroot00000000000000{% from 'week_start_date.sql' import week_start_date %} {% macro school_year_start_date( date ) %} -- Each new school year starts at the beginning of the week July 1 falls in: date_trunc( 'week', TO_DATE( '01 July' || -- If date is on or after this calendar year's school year start, -- then date is in the school year that started this calendar year case when TO_DATE( '01 July' || extract(year from {{ week_start_date( date ) }}) , 'DD Mon YYYY' ) <= CONVERT_TIMEZONE( 'UTC', 'America/New_York', {{date}} ) then extract(year from {{ week_start_date( date ) }} ) -- Otherwise, school year started in previous calendar year else extract(year from {{ week_start_date( date ) }} ) - 1 end , 'DD Mon YYYY' ) ) {% endmacro %} sqlfluff-2.3.5/test/fixtures/templater/jinja_n_nested_macros/macros/week_start_date.sql000066400000000000000000000002531451700765000316650ustar00rootroot00000000000000-- Start of the week the date belongs to {% macro week_start_date(date) -%} date_trunc('week', CONVERT_TIMEZONE( 'UTC', 'America/New_York', {{date}} ) ) {% endmacro %} sqlfluff-2.3.5/test/fixtures/templater/jinja_o_config_override_dbt_builtins/000077500000000000000000000000001451700765000275745ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_o_config_override_dbt_builtins/.sqlfluff000066400000000000000000000001471451700765000314210ustar00rootroot00000000000000[sqlfluff:templater:jinja:macros] dbt_is_incremental = {% macro is_incremental() %}False{% endmacro %} override_dbt_builtins.sql000066400000000000000000000002061451700765000346150ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_o_config_override_dbt_builtinsSELECT col1 FROM t_table1 {% if is_incremental() is true %} --This is should not be part of the rendered SQL! WHERE FALSE {% endif %} override_dbt_builtins.yml000066400000000000000000000006011451700765000346160ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_o_config_override_dbt_builtinsfile: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: column_reference: naked_identifier: col1 from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: naked_identifier: t_table1 sqlfluff-2.3.5/test/fixtures/templater/jinja_p_disable_dbt_builtins/000077500000000000000000000000001451700765000260345ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_p_disable_dbt_builtins/.sqlfluff000066400000000000000000000001431451700765000276550ustar00rootroot00000000000000[sqlfluff:templater:jinja] apply_dbt_builtins = False [sqlfluff:templater:jinja:context] var = 30 sqlfluff-2.3.5/test/fixtures/templater/jinja_p_disable_dbt_builtins/disable_dbt_builtins.sql000066400000000000000000000001761451700765000327260ustar00rootroot00000000000000-- To test if dbt builtins have been disabled we try to call -- `var` as a variable instead of as a function SELECT {{ var }} sqlfluff-2.3.5/test/fixtures/templater/jinja_p_disable_dbt_builtins/disable_dbt_builtins.yml000066400000000000000000000002231451700765000327210ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: SELECT - select_clause_element: numeric_literal: 30 sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/000077500000000000000000000000001451700765000261035ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/.sqlfluff000066400000000000000000000001241451700765000277230ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros,more_macros,even_more_macrossqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/even_more_macros/000077500000000000000000000000001451700765000314265ustar00rootroot00000000000000ultimate_foo.sql000066400000000000000000000000471451700765000345600ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/even_more_macros{%- macro foo3() -%}103{%- endmacro -%}sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/jinja.sql000066400000000000000000000001121451700765000277110ustar00rootroot00000000000000select {{ foo1() }}, {{ foo2() }}, {{ foo3() }} from my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/jinja.yml000066400000000000000000000010111451700765000277120ustar00rootroot00000000000000file: statement: select_statement: select_clause: - keyword: select - select_clause_element: numeric_literal: '101' - comma: ',' - select_clause_element: numeric_literal: '102' - comma: ',' - select_clause_element: numeric_literal: '103' from_clause: keyword: from from_expression: from_expression_element: table_expression: table_reference: naked_identifier: my_table sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/macros/000077500000000000000000000000001451700765000273675ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/macros/foo.sql000066400000000000000000000000471451700765000306740ustar00rootroot00000000000000{%- macro foo1() -%}101{%- endmacro -%}sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/more_macros/000077500000000000000000000000001451700765000304115ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_q_multiple_path_macros/more_macros/other_foo.sql000066400000000000000000000000471451700765000331170ustar00rootroot00000000000000{%- macro foo2() -%}102{%- endmacro -%}sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/000077500000000000000000000000001451700765000252045ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/jinja.sql000066400000000000000000000000311451700765000270120ustar00rootroot00000000000000{{ query_proxy('xyz') }} sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/jinja.yml000066400000000000000000000012021451700765000270150ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: SELECT select_clause_element: numeric_literal: '56' from_clause: keyword: FROM from_expression: from_expression_element: table_expression: table_reference: - naked_identifier: sch1 - dot: . - naked_identifier: foo_xyz where_clause: keyword: WHERE expression: column_reference: naked_identifier: x comparison_operator: raw_comparison_operator: '=' numeric_literal: '23' sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/libs/000077500000000000000000000000001451700765000261355ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/libs/bar.py000066400000000000000000000002331451700765000272510ustar00rootroot00000000000000"""Module used to test bar within the jinja template.""" def equals(col, val): """Return a string that has col = val.""" return f"{col} = {val}" sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/libs/foo.py000066400000000000000000000002561451700765000272750ustar00rootroot00000000000000"""Module used to test foo within the jinja template.""" schema = "sch1" def table(name): """Return the parameter with foo_ in front of it.""" return f"foo_{name}" sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/libs/not_python.txt000066400000000000000000000000261451700765000310750ustar00rootroot00000000000000I am just a text file sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/macros/000077500000000000000000000000001451700765000264705ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_r_library_in_macro/macros/query_proxy.sql000066400000000000000000000002001451700765000316070ustar00rootroot00000000000000{% macro query_proxy(tbl) %}SELECT 56 FROM {{ foo.schema }}.{{ foo.bar("xyz") }} WHERE {{ bar.equals("x", 23) }} {% endmacro %} sqlfluff-2.3.5/test/fixtures/templater/jinja_s_filters_in_library/000077500000000000000000000000001451700765000255545ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_s_filters_in_library/.sqlfluff000066400000000000000000000000571451700765000274010ustar00rootroot00000000000000[sqlfluff:templater:jinja] library_path = libs sqlfluff-2.3.5/test/fixtures/templater/jinja_s_filters_in_library/jinja.sql000066400000000000000000000000311451700765000273620ustar00rootroot00000000000000SELECT "{{ now | ds }}"; sqlfluff-2.3.5/test/fixtures/templater/jinja_s_filters_in_library/jinja.yml000066400000000000000000000004211451700765000273670ustar00rootroot00000000000000file: statement: select_statement: select_clause: keyword: 'SELECT' select_clause_element: column_reference: quoted_identifier: '"2006-01-02"' statement_terminator: ';' sqlfluff-2.3.5/test/fixtures/templater/jinja_s_filters_in_library/libs/000077500000000000000000000000001451700765000265055ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_s_filters_in_library/libs/__init__.py000066400000000000000000000010071451700765000306140ustar00rootroot00000000000000"""Module used to test filters within the jinja template.""" from __future__ import annotations import datetime # https://github.com/apache/airflow/blob/main/airflow/templates.py#L50 def ds_filter(value: datetime.date | datetime.time | None) -> str | None: """Date filter.""" if value is None: return None return value.strftime("%Y-%m-%d") SQLFLUFF_JINJA_FILTERS = {"ds": ds_filter} now = datetime.datetime( 2006, 1, 2, 3, 4, 5, 0, tzinfo=datetime.timezone(-datetime.timedelta(hours=7)) ) sqlfluff-2.3.5/test/fixtures/templater/jinja_slice_template_macros/000077500000000000000000000000001451700765000257065ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_slice_template_macros/.sqlfluff000066400000000000000000000001041451700765000275240ustar00rootroot00000000000000[sqlfluff:templater:jinja] load_macros_from_path=macros,more_macros sqlfluff-2.3.5/test/fixtures/templater/jinja_slice_template_macros/macros/000077500000000000000000000000001451700765000271725ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_slice_template_macros/macros/echo.sql000066400000000000000000000000571451700765000306330ustar00rootroot00000000000000{% macro echo(text) %} {{text}} {% endmacro %} sqlfluff-2.3.5/test/fixtures/templater/jinja_slice_template_macros/macros/subdir/000077500000000000000000000000001451700765000304625ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_slice_template_macros/macros/subdir/include_comment.sql000066400000000000000000000000221451700765000343420ustar00rootroot00000000000000-- Just a comment sqlfluff-2.3.5/test/fixtures/templater/jinja_slice_template_macros/more_macros/000077500000000000000000000000001451700765000302145ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/jinja_slice_template_macros/more_macros/echoecho.sql000066400000000000000000000000741451700765000325130ustar00rootroot00000000000000{% macro echoecho(text) %} {{text}} {{text}} {% endmacro %} sqlfluff-2.3.5/test/fixtures/templater/placeholder_flyway_var/000077500000000000000000000000001451700765000247225ustar00rootroot00000000000000sqlfluff-2.3.5/test/fixtures/templater/placeholder_flyway_var/placeholder_flyway_var_a.sql000066400000000000000000000003061451700765000324670ustar00rootroot00000000000000USE ${flyway:database}.test_schema; CREATE OR REPLACE STAGE stg_data_export_${env_name} URL = 's3://${s3_data_lake_bucket}/${env_name}/exports/stg_data_export' STORAGE_INTEGRATION = s3_integ_main; sqlfluff-2.3.5/test/generate_parse_fixture_yml.py000066400000000000000000000156771451700765000223340ustar00rootroot00000000000000"""Utility to generate yml files for all the parsing examples.""" import fnmatch import multiprocessing import os import re import sys import time from collections import defaultdict from typing import Callable, Dict, List, Optional, Tuple, TypeVar import click import yaml from conftest import ( ParseExample, compute_parse_tree_hash, get_parse_fixtures, parse_example_file, ) from sqlfluff.core.errors import SQLParseError S = TypeVar("S", bound="ParseExample") def distribute_work(work_items: List[S], work_fn: Callable[[S], None]) -> None: """Distribute work keep track of progress.""" # Build up a dict of sets, where the key is the dialect and the set # contains all the expected cases. As cases return we'll check them # off. success_map = {} expected_cases = defaultdict(set) for case in work_items: expected_cases[case.dialect].add(case) errors = [] with multiprocessing.Pool(multiprocessing.cpu_count()) as pool: for example, result in pool.imap_unordered(work_fn, work_items): if result is not None: errors.append(result) success_map[example] = False else: success_map[example] = True expected_cases[example.dialect].remove(example) # Check to see whether a dialect is complete if not expected_cases[example.dialect]: # It's done. Report success rate. local_success_map = { k: v for k, v in success_map.items() if k.dialect == example.dialect } if all(local_success_map.values()): print(f"{example.dialect!r} complete.\t\tAll Success ✅") else: fail_files = [ k.sqlfile for k, v in local_success_map.items() if not v ] print( f"{example.dialect!r} complete.\t\t{len(fail_files)} fails. ⚠️" ) for fname in fail_files: print(f" - {fname!r}") if errors: print("FAILED TO GENERATE ALL CASES") sys.exit(1) def _create_file_path(example: ParseExample, ext: str = ".yml") -> str: dialect, sqlfile = example root, _ = os.path.splitext(sqlfile) path = os.path.join("test", "fixtures", "dialects", dialect, root + ext) return path def _is_matching_new_criteria(example: ParseExample): """Is the Yaml doesn't exist or is older than the SQL.""" yaml_path = _create_file_path(example) if not os.path.exists(yaml_path): return True sql_path = os.path.join( "test", "fixtures", "dialects", example.dialect, example.sqlfile, ) return os.path.getmtime(yaml_path) < os.path.getmtime(sql_path) def generate_one_parse_fixture( example: ParseExample, ) -> Tuple[ParseExample, Optional[SQLParseError]]: """Parse example SQL file, write parse tree to YAML file.""" dialect, sqlfile = example sql_path = _create_file_path(example, ".sql") try: tree = parse_example_file(dialect, sqlfile) except Exception as err: # Catch parsing errors, and wrap the file path only it. return example, SQLParseError(f"Fatal parsing error: {sql_path}: {err}") # Check we don't have any base types or unparsable sections types = tree.type_set() if "base" in types: return example, SQLParseError(f"Unnamed base section when parsing: {sql_path}") if "unparsable" in types: return example, SQLParseError(f"Could not parse: {sql_path}") _hash = compute_parse_tree_hash(tree) # Remove the .sql file extension path = _create_file_path(example) with open(path, "w", newline="\n") as f: r: Optional[Dict[str, Optional[str]]] = None if not tree: f.write("") return example, None records = tree.as_record(code_only=True, show_raw=True) assert records, "TypeGuard" r = dict([("_hash", _hash), *list(records.items())]) print( "# YML test files are auto-generated from SQL files and should not be " "edited by", '# hand. To help enforce this, the "hash" field in the file must match ' "a hash", "# computed by SQLFluff when running the tests. Please run", "# `python test/generate_parse_fixture_yml.py` to generate them after " "adding or", "# altering SQL files.", file=f, sep="\n", ) yaml.dump(r, f, default_flow_style=False, sort_keys=False) return example, None def gather_file_list( dialect: Optional[str] = None, glob_match_pattern: Optional[str] = None, new_only: bool = False, ) -> List[ParseExample]: """Gather the list of files to generate fixtures for. Apply filters as required.""" parse_success_examples, _ = get_parse_fixtures() if new_only: parse_success_examples = [ example for example in parse_success_examples if _is_matching_new_criteria(example) ] if dialect: dialect = dialect.lower() parse_success_examples = [ example for example in parse_success_examples if example[0] == dialect ] if len(parse_success_examples) == 0: raise ValueError(f'Unknown Dialect "{dialect}"') if not glob_match_pattern: return parse_success_examples regex = re.compile(fnmatch.translate(glob_match_pattern)) return [ example for example in parse_success_examples if regex.match(example[1]) is not None ] @click.command() @click.option( "--filter", "-f", default=None, help="A glob filter to apply to file names." ) @click.option("--dialect", "-d", default=None, help="Filter to a given dialect.") @click.option( "--new-only", "new_only", is_flag=True, default=False, help="Only create missing fixtures.", ) def generate_parse_fixtures( filter: Optional[str], dialect: Optional[str], new_only: bool ): """Generate fixture or a subset based on dialect or filename glob match.""" filter_str = filter or "*" dialect_str = dialect or "all" print("Match Pattern Received:") print(f"\tfilter={filter_str} dialect={dialect_str} new-only={new_only}") parse_success_examples = gather_file_list(dialect, filter, new_only) print(f"Found {len(parse_success_examples)} file(s) to generate") t0 = time.monotonic() try: distribute_work(parse_success_examples, generate_one_parse_fixture) except SQLParseError as err: # If one fails, exit early and cleanly. print(f"PARSING FAILED: {err}") sys.exit(1) dt = time.monotonic() - t0 print(f"Built {len(parse_success_examples)} fixtures in {dt:.2f}s.") def main(): """Find all example SQL files, parse and create YAML files.""" generate_parse_fixtures() if __name__ == "__main__": main() sqlfluff-2.3.5/test/patch_lcov.py000066400000000000000000000023501451700765000170230ustar00rootroot00000000000000"""Replaces .tox/ paths in the lcov file with paths relative to repo root. Context: When the CI build runs tests, it uses tox, which installs SQLFluff in a virtual environment. Thus, the coverage.lcov file generated by the tests contains paths to the virtual environment. This script replaces those paths with paths relative to the repo root. This allows the lcov file to be used by Coveralls. Without this, Coveralls has valid coverage info, but it generates URLs that point to source files that don't exist in the SQLFluff GitHub repo. For example, we want to change this: SF:.tox/py/lib/python3.10/site-packages/sqlfluff/__init__.py to this: SF:src/sqlfluff/__init__.py """ import re from pathlib import Path path = Path("coverage.lcov") if path.exists(): lines = path.read_text().splitlines() modified_lines = [] for line in lines: if line.startswith("SF:.tox"): m = re.search(r"^(SF:).*(sqlfluff/.*)", line) if m: modified_lines.append(f"{m.group(1)}src/{m.group(2)}") else: print(f"Could not patch line: {line}") modified_lines.append(line) else: modified_lines.append(line) path.write_text("\n".join(modified_lines)) sqlfluff-2.3.5/test/rules/000077500000000000000000000000001451700765000154615ustar00rootroot00000000000000sqlfluff-2.3.5/test/rules/std_AL04_test.py000066400000000000000000000045701451700765000204120ustar00rootroot00000000000000"""Tests the python routines within AL04.""" import sqlfluff def test__rules__std_AL04_one_aliases_one_duplicate(): """Verify correct error message for one duplicate table aliases occur one times.""" sql = """ SELECT a.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk """ result = sqlfluff.lint(sql) assert "AL04" in [r["code"] for r in result] assert [r["code"] for r in result].count("AL04") == 1 def test__rules__std_AL04_one_aliases_two_duplicate(): """Verify correct error message for one duplicate table aliases occur two times.""" sql = """ SELECT a.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk JOIN table_3 AS a ON a.pk = a.pk """ result = sqlfluff.lint(sql) result_filter = [r for r in result if r["code"] == "AL04"] # Error message only show two times, not three assert len(result_filter) == 2 assert ( len( [ r for r in result_filter if "Duplicate table alias 'a'" in r["description"] ] ) == 2 ) # Test specific line number assert result_filter[0]["line_no"] == 5 assert result_filter[1]["line_no"] == 6 def test__rules__std_AL04_complex(): """Verify that AL04 returns the correct error message for complex example.""" sql = """ SELECT a.pk, b.pk FROM table_1 AS a JOIN table_2 AS a ON a.pk = a.pk JOIN table_3 AS b ON a.pk = b.pk JOIN table_4 AS b ON b.pk = b.pk JOIN table_5 AS a ON b.pk = a.pk """ result = sqlfluff.lint(sql) result_filter = [r for r in result if r["code"] == "AL04"] # Error message only show two times, not three assert len(result_filter) == 3 assert ( len( [ r for r in result_filter if "Duplicate table alias 'a'" in r["description"] ] ) == 2 ) assert ( len( [ r for r in result_filter if "Duplicate table alias 'b'" in r["description"] ] ) == 1 ) # Test specific line number assert result_filter[0]["line_no"] == 6 assert result_filter[1]["line_no"] == 8 assert result_filter[2]["line_no"] == 9 sqlfluff-2.3.5/test/rules/std_AM06_test.py000066400000000000000000000031641451700765000204130ustar00rootroot00000000000000"""Tests the python routines within AM06.""" import sqlfluff def test__rules__std_AM06_raised() -> None: """Test case for multiple AM06 errors raised with 'consistent' setting.""" sql = """ SELECT foo, bar, sum(baz) AS sum_value FROM ( SELECT foo, bar, sum(baz) AS baz FROM fake_table GROUP BY foo, bar ) GROUP BY 1, 2 ORDER BY 1, 2; """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] assert len(results_AM06) == 2 assert ( results_AM06[0]["description"] == "Inconsistent column references in 'GROUP BY/ORDER BY' clauses." ) def test__rules__std_AM06_unparsable() -> None: """Test unparsable group by doesn't result in bad rule AM06 error.""" sql = """ SELECT foo.set.barr FROM foo GROUP BY foo.set.barr """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] results_prs = [r for r in result if r["code"] == "PRS"] assert len(results_AM06) == 0 assert len(results_prs) > 0 def test__rules__std_AM06_noqa() -> None: """Test unparsable group by with no qa doesn't result in bad rule AM06 error.""" sql = """ SELECT foo.set.barr --noqa: PRS FROM foo GROUP BY f@oo.set.bar.r --noqa: PRS """ result = sqlfluff.lint(sql) results_AM06 = [r for r in result if r["code"] == "AM06"] results_prs = [r for r in result if r["code"] == "PRS"] assert len(results_AM06) == 0 assert len(results_prs) == 0 sqlfluff-2.3.5/test/rules/std_CV02_test.py000066400000000000000000000007051451700765000204200ustar00rootroot00000000000000"""Tests the python routines within CV02.""" import sqlfluff def test__rules__std_CV02_raised() -> None: """CV02 is raised for use of ``IFNULL`` or ``NVL``.""" sql = "SELECT\n\tIFNULL(NULL, 100),\n\tNVL(NULL,100);" result = sqlfluff.lint(sql, rules=["CV02"]) assert len(result) == 2 assert result[0]["description"] == "Use 'COALESCE' instead of 'IFNULL'." assert result[1]["description"] == "Use 'COALESCE' instead of 'NVL'." sqlfluff-2.3.5/test/rules/std_CV09_test.py000066400000000000000000000014521451700765000204270ustar00rootroot00000000000000"""Tests the python routines within CV09.""" from sqlfluff.core import FluffConfig, Linter def test__rules__std_CV09_raised() -> None: """CV09 is raised for use of blocked words with correct error message.""" sql = "SELECT MYOLDFUNCTION(col1) FROM deprecated_table;\n" cfg = FluffConfig(overrides={"dialect": "ansi"}) cfg.set_value( config_path=["rules", "convention.blocked_words", "blocked_words"], val="myoldfunction,deprecated_table", ) linter = Linter(config=cfg) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert len(result) == 2 assert result[0]["description"] == "Use of blocked word 'MYOLDFUNCTION'." assert result[1]["description"] == "Use of blocked word 'deprecated_table'." sqlfluff-2.3.5/test/rules/std_LT01_LT02_LT09_combo_test.py000066400000000000000000000012671451700765000231220ustar00rootroot00000000000000"""Tests issue #1373 doesn't reoccur. The combination of LT02 (incorrect indentation), LT09 (select targets), and LT01 (unnecessary white space) can result in incorrect indentation. """ import sqlfluff def test__rules__std_LT02_LT09_LT01(): """Verify that double indents don't flag LT01.""" sql = """ WITH example AS ( SELECT my_id, other_thing, one_more FROM my_table ) SELECT my_id FROM example\n""" fixed_sql = """ WITH example AS ( SELECT my_id, other_thing, one_more FROM my_table ) SELECT my_id FROM example\n""" result = sqlfluff.fix(sql, exclude_rules=["LT13"]) assert result == fixed_sql sqlfluff-2.3.5/test/rules/std_LT01_LT04_test.py000066400000000000000000000011541451700765000211700ustar00rootroot00000000000000"""Tests the python routines within LT01.""" import sqlfluff def test__rules__std_LT01_single_raise() -> None: """Test case for multiple LT01 errors raised when no post comma whitespace.""" # This query used to triple count LT01. Added memory to log previously fixed commas # (issue #2001). sql = """ SELECT col_a AS a ,col_b AS b FROM foo; """ result = sqlfluff.lint(sql, rules=["LT01", "LT04"]) results_LT01 = [r for r in result if r["code"] == "LT01"] results_LT04 = [r for r in result if r["code"] == "LT04"] assert len(results_LT01) == 1 assert len(results_LT04) == 1 sqlfluff-2.3.5/test/rules/std_LT02_LT11_combo_test.py000066400000000000000000000023701451700765000223470ustar00rootroot00000000000000"""Tests the combination of LT02 and LT11. LT02: Indentation not consistent with previous lines LT11: Set operators should be surrounded by newlines Auto fix of LT11 does not insert correct indentation but just Newlines. It relies on LT02 to sort out the indentation later. This is what is getting tested here. """ import sqlfluff def test__rules__std_LT02_LT11_union_all_in_subquery_lint(): """Verify a that LT11 reports lint errors in subqueries.""" sql = ( "SELECT * FROM (\n" " SELECT 'g' UNION ALL\n" " SELECT 'h'\n" " UNION ALL SELECT 'j'\n" ")\n" ) result = sqlfluff.lint(sql) assert "LT11" in [r["code"] for r in result] def test__rules__std_LT02_LT11_union_all_in_subquery_fix(): """Verify combination of rules LT02 and LT11 produces a correct indentation.""" sql = ( "SELECT c FROM (\n" " SELECT 'g' UNION ALL\n" " SELECT 'h'\n" " UNION ALL SELECT 'j'\n" ")\n" ) fixed_sql = ( "SELECT c FROM (\n" " SELECT 'g'\n" " UNION ALL\n" " SELECT 'h'\n" " UNION ALL\n" " SELECT 'j'\n" ")\n" ) result = sqlfluff.fix(sql) assert result == fixed_sql sqlfluff-2.3.5/test/rules/std_LT03_test.py000066400000000000000000000044041451700765000204300ustar00rootroot00000000000000"""Tests the python routines within LT03.""" import sqlfluff from sqlfluff.core import Linter from sqlfluff.core.config import FluffConfig EXPECTED_LEADING_MESSAGE = ( "Found trailing binary operator. Expected only leading near line breaks." ) EXPECTED_TRAILING_MESSAGE = ( "Found leading binary operator. Expected only trailing near line breaks." ) def test__rules__std_LT03_default(): """Verify that LT03 returns the correct error message for default (trailing).""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ result = sqlfluff.lint(sql) assert "LT03" in [r["code"] for r in result] assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result] def test__rules__std_LT03_leading(): """Verify correct error message when leading is used.""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ config = FluffConfig( configs={"layout": {"type": {"binary_operator": {"line_position": "leading"}}}}, overrides={"dialect": "ansi"}, ) # The sqlfluff.lint API doesn't allow us to pass config so need to do what it does linter = Linter(config=config) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert "LT03" in [r["code"] for r in result] assert EXPECTED_LEADING_MESSAGE in [r["description"] for r in result] def test__rules__std_LT03_trailing(): """Verify correct error message when trailing is used.""" sql = """ SELECT a, b FROM foo WHERE a = 1 AND b = 2 """ config = FluffConfig( configs={ "layout": {"type": {"binary_operator": {"line_position": "trailing"}}} }, overrides={"dialect": "ansi"}, ) # The sqlfluff.lint API doesn't allow us to pass config so need to do what it does linter = Linter(config=config) result_records = linter.lint_string_wrapped(sql).as_records() result = result_records[0]["violations"] assert "LT03" in [r["code"] for r in result] assert EXPECTED_TRAILING_MESSAGE in [r["description"] for r in result] sqlfluff-2.3.5/test/rules/std_LT04_test.py000066400000000000000000000025621451700765000204340ustar00rootroot00000000000000"""Tests the python routines within LT04.""" import sqlfluff def test__rules__std_LT04_unparseable(): """Verify that LT04 doesn't try to fix queries with parse errors. This has been observed to frequently cause syntax errors, especially in combination with Jinja templating, e.g. undefined template variables. """ # This example comes almost directly from a real-world example. The user # accidentally ran "sqlfluff fix" without defining # "readability_features_numeric" and "readability_features_count_list", and # doing so corrupted their query. sql = """ SELECT user_id, campaign_id, business_type, SPLIT(intents, ",") AS intent_list, {% for feature in readability_features_numeric %} CAST(JSON_EXTRACT(readability_scores, '$.data.{{feature}}') AS float64) AS {{feature}} {% if not loop.last %} , {% endif %} {% endfor %}, {% for feature in readability_features_count_list %} CAST(JSON_EXTRACT(asset_structure, '$.{{feature}}') AS float64) AS {{feature}}_count {% if not loop.last %} , {% endif %} {% endfor %}, track_clicks_text, track_clicks_html FROM t """ result = sqlfluff.lint(sql) assert "LT04" not in [r["code"] for r in result] sqlfluff-2.3.5/test/rules/std_LT05_LT09_combo_test.py000066400000000000000000000027521451700765000223650ustar00rootroot00000000000000"""Tests the combination of LT05 and LT09. LT05: no long lines LT09: single selects should be on SELECT line """ import sqlfluff def test__rules__std_LT05_LT09_long_line_lint(): """Verify a long line that causes a clash between LT05 and LT09 is not changed.""" sql = ( "SELECT\n1000000000000000000000000000000000000000000000000000000000000000000000" "000000000000000000000000000000\n" ) result = sqlfluff.lint(sql) assert "LT05" in [r["code"] for r in result] assert "LT09" in [r["code"] for r in result] def test__rules__std_LT05_LT09_long_line_fix(): """Verify clash between LT05 & LT09 does not add multiple newlines (see #1424).""" sql = ( "SELECT 10000000000000000000000000000000000000000000000000000000000000000000000" "00000000000000000000000000000\n" ) result = sqlfluff.fix(sql) assert result == ( "SELECT\n 100000000000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000\n" ) def test__rules__std_LT05_LT09_long_line_fix2(): """Verify clash between LT05 & LT09 does not add multiple newlines (see #1424).""" sql = ( "SELECT\n 100000000000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000\n" ) result = sqlfluff.fix(sql) assert result == ( "SELECT 10000000000000000000000000000000000000000000000000000000000000000000000" "00000000000000000000000000000\n" ) sqlfluff-2.3.5/test/rules/std_LT12_CV06_test.py000066400000000000000000000016201451700765000211630ustar00rootroot00000000000000"""Tests the python routines within LT12 and CV06.""" from sqlfluff.core import FluffConfig, Linter def test__rules__std_LT12_and_CV06_interaction() -> None: """Test interaction between LT12 and CV06 doesn't stop CV06 from being applied.""" # Test sql with no final newline and no final semicolon. sql = "SELECT foo FROM bar" # Ensure final semicolon requirement is active. cfg = FluffConfig(overrides={"dialect": "ansi"}) cfg.set_value( config_path=["rules", "convention.terminator", "require_final_semicolon"], val=True, ) linter = Linter(config=cfg) # Return linted/fixed file. linted_file = linter.lint_string(sql, fix=True) # Check expected lint errors are raised. assert set([v.rule.code for v in linted_file.violations]) == {"LT12", "CV06"} # Check file is fixed. assert linted_file.fix_string()[0] == "SELECT foo FROM bar;\n" sqlfluff-2.3.5/test/rules/std_RF01_LT09_test.py000066400000000000000000000013441451700765000211660ustar00rootroot00000000000000"""Tests observed conflict between RF01 & LT09. Root cause was BaseSegment.copy(). """ from sqlfluff.core import FluffConfig, Linter def test__rules__std_RF01_LT09_copy() -> None: """Tests observed conflict between RF01 & LT09. https://github.com/sqlfluff/sqlfluff/issues/5203 """ sql = """ SELECT DISTINCT `FIELD` FROM `TABLE`; """ cfg = FluffConfig.from_kwargs( dialect="mysql", rules=["RF01", "LT09"], ) result = Linter(config=cfg).lint_string(sql) for violation in result.violations: assert "Unexpected exception" not in violation.description assert len(result.violations) == 1 only_violation = result.violations[0] assert only_violation.rule_code() == "LT09" sqlfluff-2.3.5/test/rules/std_RF02_test.py000066400000000000000000000006351451700765000204210ustar00rootroot00000000000000"""Tests the python routines within RF02.""" import sqlfluff def test__rules__std_RF02_wildcard_single_count(): """Verify that RF02 is only raised once for wildcard (see issue #1973).""" sql = """ SELECT * FROM foo INNER JOIN bar; """ result = sqlfluff.lint(sql) assert "RF02" in [r["code"] for r in result] assert [r["code"] for r in result].count("RF02") == 1 sqlfluff-2.3.5/test/rules/std_ST03_test.py000066400000000000000000000022071451700765000204360ustar00rootroot00000000000000"""Tests the python routines within ST03.""" import sqlfluff def test__rules__std_ST03_multiple_unused_ctes(): """Verify that ST03 returns multiple lint issues, one per unused CTE.""" sql = """ WITH cte_1 AS ( SELECT 1 ), cte_2 AS ( SELECT 2 ), cte_3 AS ( SELECT 3 ), cte_4 AS ( SELECT 4 ) SELECT var_bar FROM cte_3 """ result = sqlfluff.lint(sql, rules=["ST03"]) assert result == [ { "code": "ST03", "description": 'Query defines CTE "cte_1" but does not use it.', "line_no": 3, "line_pos": 5, "name": "structure.unused_cte", }, { "code": "ST03", "description": 'Query defines CTE "cte_2" but does not use it.', "line_no": 6, "line_pos": 5, "name": "structure.unused_cte", }, { "code": "ST03", "description": 'Query defines CTE "cte_4" but does not use it.', "line_no": 12, "line_pos": 5, "name": "structure.unused_cte", }, ] sqlfluff-2.3.5/test/rules/std_fix_auto_test.py000066400000000000000000000130051451700765000215610ustar00rootroot00000000000000"""Automated tests for fixing violations. Any files in the test/fixtures/linter/autofix directory will be picked up and automatically tested against the appropriate dialect. """ import json import logging import os import shutil import tempfile from typing import Optional import pytest import yaml from sqlfluff.core import FluffConfig, Linter # Construct the tests from the filepath test_cases = [] base_auto_fix_path = ("test", "fixtures", "linter", "autofix") # Generate the filenames for each dialect from the parser test directory for dialect in os.listdir(os.path.join(*base_auto_fix_path)): # Ignore documentation if dialect.endswith(".md"): continue # assume that d is now the name of a dialect dirlist = os.listdir(os.path.join(*base_auto_fix_path, dialect)) for test_case in dirlist: test_cases.append( ( # The dialect dialect, # The directory name test_case, ) ) def make_dialect_path(dialect, fname): """Work out how to find paths given a dialect and a file name.""" return os.path.join("test", "fixtures", "dialects", dialect, fname) def auto_fix_test(dialect, folder, caplog): """A test for roundtrip testing, take a file buffer, lint, fix and lint. This is explicitly different from the linter version of this, in that it uses the command line rather than the direct api. """ # Log just the rules logger for this test. # NOTE: In debugging it may be instructive to enable some of # the other loggers listed here to debug particular issues. # Enabling all of them results in very long logs so use # wisely. # caplog.set_level(logging.DEBUG, logger="sqlfluff.templater") # caplog.set_level(logging.DEBUG, logger="sqlfluff.lexer") caplog.set_level(logging.DEBUG, logger="sqlfluff.linter") caplog.set_level(logging.DEBUG, logger="sqlfluff.rules") filename = "testing.sql" # Lets get the path of a file to use tempdir_path = tempfile.mkdtemp() filepath = os.path.join(tempdir_path, filename) cfgpath = os.path.join(tempdir_path, ".sqlfluff") src_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "before.sql") cmp_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "after.sql") vio_filepath = os.path.join(*base_auto_fix_path, dialect, folder, "violations.json") cfg_filepath = os.path.join(*base_auto_fix_path, dialect, folder, ".sqlfluff") test_conf_filepath = os.path.join( *base_auto_fix_path, dialect, folder, "test-config.yml" ) # Load the config file for the test: with open(test_conf_filepath) as cfg_file: cfg = yaml.safe_load(cfg_file) print("## Config: ", cfg) rules: Optional[str] = ",".join(cfg["test-config"].get("rules")).upper() if "ALL" in rules: rules = None raise_on_non_linting_violations = cfg["test-config"].get( "raise_on_non_linting_violations", True ) # Open the example file and write the content to it print_buff = "" with open(filepath, mode="w") as dest_file: with open(src_filepath) as source_file: for line in source_file: dest_file.write(line) print_buff += line # Copy the config file too try: with open(cfgpath, mode="w") as dest_file: with open(cfg_filepath) as source_file: print("## Config File Found.") for line in source_file: dest_file.write(line) except FileNotFoundError: # No config file? No biggie print("## No Config File Found.") pass print(f"## Input file:\n{print_buff}") # Do we need to do a violations check? try: with open(vio_filepath) as vio_file: violations = json.load(vio_file) except FileNotFoundError: # No violations file. Let's not worry violations = None # Run the fix command overrides = {"dialect": dialect} if rules: overrides["rules"] = rules cfg = FluffConfig.from_root(overrides=overrides) lnt = Linter(config=cfg) res = lnt.lint_path(filepath, fix=True) if not res.files: raise ValueError("LintedDir empty: Parsing likely failed.") print(f"## Templated file:\n{res.tree.raw}") # We call the check_tuples here, even to makes sure any non-linting # violations are raised, and the test fails. vs = set( res.check_tuples( raise_on_non_linting_violations=raise_on_non_linting_violations ) ) # If we have a violations structure, let's enforce it. if violations: # Format the violations file expected_vs = set() for rule_key in violations["violations"]["linting"]: for elem in violations["violations"]["linting"][rule_key]: expected_vs.add((rule_key, *elem)) assert expected_vs == vs # Actually do the fixes res = res.persist_changes() # Read the fixed file with open(filepath) as fixed_file: fixed_buff = fixed_file.read() # Clearup once read shutil.rmtree(tempdir_path) # Read the comparison file with open(cmp_filepath) as comp_file: comp_buff = comp_file.read() # Make sure we were successful assert res # Assert that we fixed as expected assert fixed_buff == comp_buff @pytest.mark.parametrize("dialect,folder", test_cases) def test__std_fix_auto(dialect, folder, caplog): """Automated Fixing Tests.""" auto_fix_test(dialect=dialect, folder=folder, caplog=caplog) sqlfluff-2.3.5/test/rules/std_roundtrip_test.py000066400000000000000000000110371451700765000217740ustar00rootroot00000000000000"""Round trip tests for rules with a fix method.""" import os import re import shutil import tempfile from io import StringIO import pytest from click.testing import CliRunner from sqlfluff.cli.commands import fix, lint def generic_roundtrip_test(source_file, rulestring): """Run a roundtrip test given a sql file and a rule. We take a file buffer, lint, fix and lint, finally checking that the file fails initially but not after fixing. """ if isinstance(source_file, str): # If it's a string, treat it as a path so lets load it. with open(source_file) as f: source_file = StringIO(f.read()) filename = "testing.sql" # Lets get the path of a file to use tempdir_path = tempfile.mkdtemp() filepath = os.path.join(tempdir_path, filename) # Open the example file and write the content to it with open(filepath, mode="w") as dest_file: for line in source_file: dest_file.write(line) runner = CliRunner() # Check that we first detect the issue result = runner.invoke(lint, ["--rules", rulestring, "--dialect=ansi", filepath]) assert result.exit_code == 1 # Fix the file (in force mode) result = runner.invoke( fix, ["--rules", rulestring, "--dialect=ansi", "-f", filepath] ) assert result.exit_code == 0 # Now lint the file and check for exceptions result = runner.invoke(lint, ["--rules", rulestring, "--dialect=ansi", filepath]) assert result.exit_code == 0 shutil.rmtree(tempdir_path) def jinja_roundtrip_test( source_path, rulestring, sqlfile="test.sql", cfgfile=".sqlfluff" ): """Run a roundtrip test path and rule. We take a file buffer, lint, fix and lint, finally checking that the file fails initially but not after fixing. Additionally we also check that we haven't messed up the templating tags in the process. """ tempdir_path = tempfile.mkdtemp() sql_filepath = os.path.join(tempdir_path, sqlfile) cfg_filepath = os.path.join(tempdir_path, cfgfile) # Copy the SQL file with open(sql_filepath, mode="w") as dest_file: with open(os.path.join(source_path, sqlfile)) as source_file: for line in source_file: dest_file.write(line) # Copy the Config file with open(cfg_filepath, mode="w") as dest_file: with open(os.path.join(source_path, cfgfile)) as source_file: for line in source_file: dest_file.write(line) with open(sql_filepath) as f: # Get a record of the pre-existing jinja tags tags = re.findall(r"{{[^}]*}}|{%[^}%]*%}", f.read(), flags=0) runner = CliRunner() # Check that we first detect the issue result = runner.invoke( lint, ["--rules", rulestring, "--dialect=ansi", sql_filepath] ) assert result.exit_code == 1 # Fix the file (in force mode) result = runner.invoke( fix, ["--rules", rulestring, "-f", "--dialect=ansi", sql_filepath] ) assert result.exit_code == 0 # Now lint the file and check for exceptions result = runner.invoke( lint, ["--rules", rulestring, "--dialect=ansi", sql_filepath] ) if result.exit_code != 0: # Output the file content for debugging print("File content:") with open(sql_filepath) as f: print(repr(f.read())) print("Command output:") print(result.output) assert result.exit_code == 0 with open(sql_filepath) as f: # Check that the tags are all still there! new_tags = re.findall(r"{{[^}]*}}|{%[^}%]*%}", f.read(), flags=0) # Clear up the temp dir shutil.rmtree(tempdir_path) # Assert that the tags are the same assert tags == new_tags @pytest.mark.parametrize( "rule,path", [ ("LT01", "test/fixtures/linter/indentation_errors.sql"), ("LT01", "test/fixtures/linter/whitespace_errors.sql"), ("LT01", "test/fixtures/linter/indentation_errors.sql"), ("CP01", "test/fixtures/linter/whitespace_errors.sql"), ("AL01", "test/fixtures/dialects/ansi/select_simple_i.sql"), ("AL02", "test/fixtures/dialects/ansi/select_simple_i.sql"), ], ) def test__cli__command__fix(rule, path): """Test the round trip of detecting, fixing and then not detecting given rule.""" generic_roundtrip_test(path, rule) @pytest.mark.parametrize("rule", ["CP01", "LT01"]) def test__cli__command__fix_templated(rule): """Roundtrip test, making sure that we don't drop tags while templating.""" jinja_roundtrip_test("test/fixtures/templater/jinja_d_roundtrip", rule) sqlfluff-2.3.5/test/rules/std_test.py000066400000000000000000000071251451700765000176710ustar00rootroot00000000000000"""Tests for the standard set of rules.""" import pytest from sqlfluff.core.config import FluffConfig from sqlfluff.core.rules import get_ruleset from sqlfluff.utils.testing.rules import assert_rule_raises_violations_in_file @pytest.mark.parametrize( "rule,path,violations", [ ("LT01", "indentation_errors.sql", [(4, 24)]), ( "LT02", "indentation_errors.sql", [(2, 1), (3, 1), (4, 1), (5, 1)], ), # Check we get comma whitespace errors ("LT01", "whitespace_errors.sql", [(2, 9), (3, 12)]), # Check we get operator whitespace errors and it works with brackets ( "LT01", "operator_errors.sql", [(3, 8), (4, 10), (7, 6), (7, 7), (7, 9), (7, 10), (7, 12), (7, 13)], ), ("LT03", "operator_errors.sql", [(5, 9)]), ( "LT01", "operator_errors_negative.sql", [(2, 6), (2, 9), (5, 6), (5, 7)], ), # Hard indentation errors ( "LT02", "indentation_error_hard.sql", [ (2, 1), (6, 1), (9, 1), (11, 15), (12, 1), (12, 33), (13, 15), (14, 1), (14, 36), (18, 1), (19, 1), (20, 1), ], ), # Check bracket handling with closing brackets and contained indents works. ("LT02", "indentation_error_contained.sql", []), # Check we handle block comments as expect. Github #236 ( "LT05", "block_comment_errors.sql", # Errors should flag on the first element of the line. [(1, 1), (2, 5), (4, 5)], ), ("LT05", "block_comment_errors_2.sql", [(1, 1), (2, 1)]), # Column references ("RF02", "column_references.sql", [(1, 8)]), ("RF02", "column_references_bare_function.sql", []), ("RF01", "column_references.sql", [(1, 11)]), ("AL05", "column_references.sql", [(2, 11)]), # Distinct and Group by ("AM01", "select_distinct_group_by.sql", [(1, 8)]), # Make sure that ignoring works as expected ("LT01", "operator_errors_ignore.sql", [(10, 8), (10, 9)]), ( "JJ01", "heavy_templating.sql", [(12, 13), (12, 25)], ), ], ) def test__rules__std_file(rule, path, violations): """Test the linter finds the given errors in (and only in) the right places.""" assert_rule_raises_violations_in_file( rule=rule, fpath="test/fixtures/linter/" + path, violations=violations, fluff_config=FluffConfig(overrides=dict(rules=rule, dialect="ansi")), ) @pytest.mark.parametrize( "rule_config_dict", [ {"allow_scalar": "blah"}, {"single_table_references": "blah"}, {"unquoted_identifiers_policy": "blah"}, {"capitalisation.keywords": {"capitalisation_policy": "blah"}}, {"aliasing.table": {"aliasing": "blah"}}, {"aliasing.column": {"aliasing": "blah"}}, {"capitalisation.identifiers": {"extended_capitalisation_policy": "blah"}}, {"capitalisation.functions": {"capitalisation_policy": "blah"}}, ], ) def test_improper_configs_are_rejected(rule_config_dict): """Ensure that unsupported configs raise a ValueError.""" config = FluffConfig( configs={"rules": rule_config_dict}, overrides={"dialect": "ansi"} ) with pytest.raises(ValueError): get_ruleset().get_rulepack(config) sqlfluff-2.3.5/test/rules/yaml_test_cases_test.py000066400000000000000000000032741451700765000222570ustar00rootroot00000000000000"""Runs the rule test cases.""" import logging import os import pytest from sqlfluff.core.config import FluffConfig from sqlfluff.utils.testing.rules import ( get_rule_from_set, load_test_cases, rules__test_helper, ) def pytest_generate_tests(metafunc): """Generate tests, optionally by rule_id.""" rule_id = metafunc.config.getoption("rule_id") ids, test_cases = load_test_cases( test_cases_path=os.path.join( "test/fixtures/rules/std_rule_cases", f"{rule_id}.yml" ) ) if "test_case" in metafunc.fixturenames: metafunc.parametrize("test_case", test_cases, ids=ids) @pytest.mark.integration @pytest.mark.rules_suite def test__rule_test_case(test_case, caplog): """Run the tests.""" with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules"): with caplog.at_level(logging.DEBUG, logger="sqlfluff.linter"): res = rules__test_helper(test_case) if res is not None and res != test_case.fail_str: cfg = FluffConfig(configs=test_case.configs) rule = get_rule_from_set(test_case.rule, config=cfg) assert rule.is_fix_compatible, f"Rule {test_case.rule} returned " 'fixes but does not specify "is_fix_compatible = True".' def test__rule_test_global_config(): """Test global config in rule test cases.""" ids, test_cases = load_test_cases( os.path.join("test/fixtures/rules/R001_global_config_test.yml") ) assert len(test_cases) == 2 # tc1: overwrites global config assert test_cases[0].configs["core"]["dialect"] == "ansi" # tc2: global config is used assert test_cases[1].configs["core"]["dialect"] == "exasol" sqlfluff-2.3.5/test/test_testing.py000066400000000000000000000050431451700765000174170ustar00rootroot00000000000000"""Test the sqlfluff.utils.testing module.""" import pytest from _pytest.outcomes import Failed, Skipped from sqlfluff.utils.testing.rules import ( RuleTestCase, assert_rule_fail_in_sql, assert_rule_pass_in_sql, rules__test_helper, ) def test_assert_rule_fail_in_sql_handle_parse_error(): """Util assert_rule_fail_in_sql should handle parse errors.""" with pytest.raises(Failed) as failed_test: assert_rule_fail_in_sql(code="L000", sql="select from") failed_test.match("Found the following parse errors in test case:") def test_assert_rule_fail_in_sql_should_fail_queries_that_unexpectedly_pass(): """Util assert_rule_fail_in_sql should fail if no failure.""" with pytest.raises(Failed) as failed_test: assert_rule_fail_in_sql(code="LT01", sql="select 1") failed_test.match("No LT01 failures found in query which should fail") def test_assert_rule_pass_in_sql_should_handle_parse_error(): """Util assert_rule_pass_in_sql should handle parse errors.""" with pytest.raises(Failed) as failed_test: assert_rule_pass_in_sql(code="LT01", sql="select from") failed_test.match("Found unparsable section:") def test_assert_rule_pass_in_sql_should_fail_when_there_are_violations(): """Util assert_rule_pass_in_sql should fail when there are violations.""" with pytest.raises(Failed) as failed_test: assert_rule_pass_in_sql(code="LT01", sql="select a , b from t") failed_test.match("Found LT01 failures in query which should pass") def test_rules__test_helper_skipped_when_test_case_skipped(): """Util rules__test_helper should skip the test when test case is "skipped".""" rule_test_case = RuleTestCase(skip="Skip this one for now") with pytest.raises(Skipped) as skipped_test: rules__test_helper(rule_test_case) skipped_test.match("Skip this one for now") def test_rules__test_helper_has_variable_introspection(test_verbosity_level): """Make sure the helper gives variable introspection information on failure.""" rule_test_case = RuleTestCase( rule="LT02", fail_str=""" select a, b from table """, # extra comma on purpose fix_str=""" select a, b, from table """, ) with pytest.raises(AssertionError) as skipped_test: rules__test_helper(rule_test_case) if test_verbosity_level >= 2: # Enough to check that a query diff is displayed skipped_test.match("select") sqlfluff-2.3.5/test/utils/000077500000000000000000000000001451700765000154675ustar00rootroot00000000000000sqlfluff-2.3.5/test/utils/analysis/000077500000000000000000000000001451700765000173125ustar00rootroot00000000000000sqlfluff-2.3.5/test/utils/analysis/test_query.py000066400000000000000000000233561451700765000221010ustar00rootroot00000000000000"""Test the select_crawler module.""" import pytest from sqlfluff.core.linter.linter import Linter from sqlfluff.utils.analysis.query import Query def _parse_and_crawl_outer(sql): """Helper function for select crawlers. Given a SQL statement this crawls the SQL and instantiates a Query on the outer relevant segment. """ linter = Linter(dialect="ansi") parsed = linter.parse_string(sql) # Make sure it's fully parsable. assert "unparsable" not in parsed.tree.descendant_type_set # Create a crawler from the root segment. query = Query.from_root(parsed.tree, linter.dialect) # Analyse the segment. return query, linter @pytest.mark.parametrize( "sql, expected_json", [ ( # Test trivial query. "select 1", {"selectables": ["select 1"]}, ), ( # Test set expression. "select 1 union select 2", {"selectables": ["select 1", "select 2"]}, ), ( # Test multiple CTEs. "with cte1 as (select 1 as x), cte2 as (select 2 as y) " "select * from cte1 join cte2 using (x)", { "ctes": { "CTE1": {"selectables": ["select 1 as x"]}, "CTE2": {"selectables": ["select 2 as y"]}, }, "query_type": "WithCompound", "selectables": ["select * from cte1 join cte2 using (x)"], }, ), ( # Nested CTEs (from AM04 test suite) """ with a as ( with b as (select 1 from c) select * from b ) select * from a """, { "ctes": { "A": { "ctes": {"B": {"selectables": ["select 1 from c"]}}, "query_type": "WithCompound", "selectables": ["select * from b"], } }, "query_type": "WithCompound", "selectables": ["select * from a"], }, ), ( # Nested CTEs (from AM04 test suite) """ with b as (select 1 from c) select * from ( with a as (select * from b) select * from a ) """, { "ctes": {"B": {"selectables": ["select 1 from c"]}}, "query_type": "WithCompound", "selectables": [ "select * from (\n" " with a as (select * from b)\n" " select * from a\n" " )" ], "subqueries": [ # NOTE: Subquery from the FROM clause. { "ctes": {"A": {"selectables": ["select * from b"]}}, "query_type": "WithCompound", "selectables": ["select * from a"], }, ], }, ), ( # Test that subquery in "from" not included. "select a.x from (select z from b)", { "selectables": ["select a.x from (select z from b)"], "subqueries": [{"selectables": ["select z from b"]}], }, ), ( # Test that subquery in "from" / "join" not included. "select a.x from a join (select z from b) as b on (a.x = b.x)", { "selectables": [ "select a.x from a join (select z from b) as b on (a.x = b.x)" ], "subqueries": [{"selectables": ["select z from b"]}], }, ), ( # In CTE main query, test that subquery in "from" not included. "with prep as (select 1) select a.x from (select z from b)", { "ctes": {"PREP": {"selectables": ["select 1"]}}, "query_type": "WithCompound", "selectables": ["select a.x from (select z from b)"], "subqueries": [{"selectables": ["select z from b"]}], }, ), ( # In CTE main query, test that subquery in "from" / "join" not included. "with prep as (select 1) " "select a.x from a join (select z from b) as b on (a.x = b.x)", { "ctes": {"PREP": {"selectables": ["select 1"]}}, "query_type": "WithCompound", "selectables": [ "select a.x from a join (select z from b) as b on (a.x = " "b.x)" ], "subqueries": [{"selectables": ["select z from b"]}], }, ), ( """with prep_1 as ( with d as ( select x, z from b ) select * from d ) select a.x, a.y, b.z from a join prep_1 using (x) """, { "ctes": { "PREP_1": { "ctes": { "D": {"selectables": ["select x, z from b"]}, }, "query_type": "WithCompound", "selectables": ["select * from d"], } }, "query_type": "WithCompound", "selectables": [ "select\n a.x, a.y, b.z\nfrom a\njoin prep_1 using (x)" ], }, ), # Test with a UNION as the main selectable of a WITH ( "with a as (select 1), b as (select 2) " "select * from a union select * from b\n", { "ctes": { "A": {"selectables": ["select 1"]}, "B": {"selectables": ["select 2"]}, }, "query_type": "WithCompound", "selectables": [ "select * from a", "select * from b", ], }, ), # Test with a VALUES clause in a WITH ( "WITH txt AS ( VALUES (1, 'foo') ) SELECT * FROM txt\n", { "ctes": { "TXT": {"selectables": ["VALUES (1, 'foo')"]}, }, "query_type": "WithCompound", "selectables": [ "SELECT * FROM txt", ], }, ), # Test with Subqueries ( "SELECT (\n" " SELECT other_table.other_table_field_1 FROM other_table\n" " WHERE other_table.id = field_2\n" ") FROM\n" "(SELECT * FROM some_table) AS my_alias\n", { "selectables": [ "SELECT (\n" " SELECT other_table.other_table_field_1 FROM other_table\n" " WHERE other_table.id = field_2\n" ") FROM\n" "(SELECT * FROM some_table) AS my_alias", ], "subqueries": [ { "selectables": [ "SELECT other_table.other_table_field_1 FROM other_table\n" " WHERE other_table.id = field_2", ] }, {"selectables": ["SELECT * FROM some_table"]}, ], }, ), # Test a MERGE ( """MERGE INTO t USING (SELECT * FROM u) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c);""", { "selectables": [ """MERGE INTO t USING (SELECT * FROM u) AS u ON (a = b) WHEN MATCHED THEN UPDATE SET a = b WHEN NOT MATCHED THEN INSERT (b) VALUES (c)""" # NOTE: No trailing semicolon ], "subqueries": [{"selectables": ["SELECT * FROM u"]}], }, ), # Test a DELETE ( """DELETE FROM agent1 WHERE EXISTS( SELECT customer.cust_id FROM customer WHERE agent1.agent_code <> customer.agent_code);""", { "selectables": [ """SELECT customer.cust_id FROM customer WHERE agent1.agent_code <> customer.agent_code""" ] }, ), # Test an UPDATE ( """UPDATE my_table SET row_sum = ( SELECT COUNT(*) AS row_sum FROM another_table WHERE another_table.id = my_tableeee.id )""", { "selectables": [ """SELECT COUNT(*) AS row_sum FROM another_table WHERE another_table.id = my_tableeee.id""" ] }, ), ], ) def test_select_crawler_constructor(sql, expected_json): """Test Query when created using constructor.""" query, _ = _parse_and_crawl_outer(sql) assert all(cte.cte_definition_segment is not None for cte in query.ctes.values()) query_dict = query.as_dict() assert expected_json == query_dict def test_select_crawler_nested(): """Test invoking with an outer from_expression_segment.""" sql = """ select a.x, a.y, b.z from a join ( with d as ( select x, z from b ) select * from d ) using (x) """ query, linter = _parse_and_crawl_outer(sql) inner_from = ( query.selectables[0].select_info.table_aliases[1].from_expression_element ) inner_select = next(inner_from.recursive_crawl("with_compound_statement")) inner_query = Query.from_segment(inner_select, linter.dialect) assert inner_query.as_dict() == { "selectables": [ "select * from d", ], "ctes": {"D": {"selectables": ["select x, z from b"]}}, "query_type": "WithCompound", } sqlfluff-2.3.5/test/utils/reflow/000077500000000000000000000000001451700765000167655ustar00rootroot00000000000000sqlfluff-2.3.5/test/utils/reflow/conftest.py000066400000000000000000000003731451700765000211670ustar00rootroot00000000000000"""Common test fixtures for reflow modules.""" import pytest from sqlfluff.core import FluffConfig @pytest.fixture() def default_config(): """Return the default config for reflow tests.""" return FluffConfig(overrides={"dialect": "ansi"}) sqlfluff-2.3.5/test/utils/reflow/depthmap_test.py000066400000000000000000000076571451700765000222170ustar00rootroot00000000000000"""Tests for the depthmap object.""" from sqlfluff.core import Linter from sqlfluff.utils.reflow.depthmap import DepthMap, StackPosition def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree def test_reflow_depthmap_from_parent(default_config): """Test map construction from a root segment.""" sql = "SELECT 1" root = parse_ansi_string(sql, default_config) dm = DepthMap.from_parent(root) # We use UUIDS in the depth map so we can't assert their value. # What we can do is use them. # Check that we get the right depths. assert [dm.depth_info[seg.uuid].stack_depth for seg in root.raw_segments] == [ 4, 4, 4, 5, 4, 1, ] # Check they all share the same first three hash and # class type elements (except the end of file marker at the end). # These should be the file, statement and select statement. expected = ({"file", "base"}, {"statement", "base"}, {"select_statement", "base"}) assert all( dm.depth_info[seg.uuid].stack_class_types[:3] == expected for seg in root.raw_segments[:-1] ) first_hashes = dm.depth_info[root.raw_segments[0].uuid].stack_hashes[:3] assert all( dm.depth_info[seg.uuid].stack_hashes[:3] == first_hashes for seg in root.raw_segments[:-1] ) # While we're here, test the DepthInfo.common_with method select_keyword_di = dm.depth_info[root.raw_segments[0].uuid] numeric_one_di = dm.depth_info[root.raw_segments[3].uuid] assert len(select_keyword_di.common_with(numeric_one_di)) == 4 def test_reflow_depthmap_from_raws_and_root(default_config): """Test that the indirect route is equivalent to the direct route.""" sql = "SELECT 1" root = parse_ansi_string(sql, default_config) # Direct route dm_direct = DepthMap.from_parent(root) # Indirect route. dm_indirect = DepthMap.from_raws_and_root(root.raw_segments, root) # The depth info dict depends on the sequence so we only need # to check those are equal. assert dm_direct.depth_info == dm_indirect.depth_info def test_reflow_depthmap_order_by(default_config): """Test depth mapping of an order by clause.""" sql = "SELECT * FROM foo ORDER BY bar DESC\n" root = parse_ansi_string(sql, default_config) # Get the `ORDER` and `DESC` segments. order_seg = None desc_seg = None for raw in root.raw_segments: if raw.raw_upper == "ORDER": order_seg = raw elif raw.raw_upper == "DESC": desc_seg = raw # Make sure we find them assert order_seg assert desc_seg # Generate a depth map depth_map = DepthMap.from_parent(root) # Check their depth info order_seg_di = depth_map.get_depth_info(order_seg) desc_seg_di = depth_map.get_depth_info(desc_seg) # Make sure they both contain an order by clause. assert frozenset({"base", "orderby_clause"}) in order_seg_di.stack_class_types assert frozenset({"base", "orderby_clause"}) in desc_seg_di.stack_class_types # Get the ID of one and make sure it's in the other order_by_hash = order_seg_di.stack_hashes[ order_seg_di.stack_class_types.index(frozenset({"base", "orderby_clause"})) ] assert order_by_hash in order_seg_di.stack_hashes assert order_by_hash in desc_seg_di.stack_hashes # Get the position information order_stack_pos = order_seg_di.stack_positions[order_by_hash] desc_stack_pos = desc_seg_di.stack_positions[order_by_hash] # Make sure the position information is correct print(order_stack_pos) print(desc_stack_pos) assert order_stack_pos == StackPosition(idx=0, len=9, type="start") # NOTE: Even though idx 7 is not the end, the _type_ of this location # is still an "end" because the following elements are non-code. assert desc_stack_pos == StackPosition(idx=7, len=9, type="end") sqlfluff-2.3.5/test/utils/reflow/rebreak_test.py000066400000000000000000000060651451700765000220200ustar00rootroot00000000000000"""Tests for rebreak methods. Specifically: - ReflowSequence.rebreak() """ import logging import pytest from sqlfluff.core import Linter from sqlfluff.utils.reflow.sequence import ReflowSequence def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree @pytest.mark.parametrize( "raw_sql_in,raw_sql_out", [ # Trivial Case ("select 1", "select 1"), # These rely on the default config being for leading operators ("select 1\n+2", "select 1\n+2"), ("select 1+\n2", "select 1\n+ 2"), # NOTE: Implicit respace. ("select\n 1 +\n 2", "select\n 1\n + 2"), ("select\n 1 +\n -- comment\n 2", "select\n 1\n -- comment\n + 2"), # These rely on the default config being for trailing commas ("select a,b", "select a,b"), ("select a\n,b", "select a,\nb"), ("select\n a\n , b", "select\n a,\n b"), ("select\n a\n , b", "select\n a,\n b"), ("select\n a\n , b", "select\n a,\n b"), ("select\n a\n -- comment\n , b", "select\n a,\n -- comment\n b"), ], ) def test_reflow__sequence_rebreak_root(raw_sql_in, raw_sql_out, default_config, caplog): """Test the ReflowSequence.rebreak() method directly. Focused around a whole segment. """ root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) for idx, elem in enumerate(seq.elements): print(idx, elem) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): new_seq = seq.rebreak() print(new_seq.get_fixes()) assert new_seq.get_raw() == raw_sql_out @pytest.mark.parametrize( "raw_sql_in,target_idx,seq_sql_in,seq_sql_out", [ ("select 1+\n(2+3)", 4, "1+\n(", "1\n+ ("), ("select a,\n(b+c)", 4, "a,\n(", "a,\n("), ("select a\n , (b+c)", 6, "a\n , (", "a,\n ("), # Here we don't have enough context to rebreak it so # it should be left unaltered. ("select a,\n(b+c)", 6, ",\n(b", ",\n(b"), # This intentionally targets an incomplete span. ("select a<=b", 4, "a<=", "a<="), ], ) def test_reflow__sequence_rebreak_target( raw_sql_in, target_idx, seq_sql_in, seq_sql_out, default_config, caplog ): """Test the ReflowSequence.rebreak() method directly. Focused around a target segment. This intentionally stretches some of the span logic. """ root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) target = root.raw_segments[target_idx] print("Target: ", target) seq = ReflowSequence.from_around_target(target, root, config=default_config) for idx, elem in enumerate(seq.elements): print(idx, elem) assert seq.get_raw() == seq_sql_in with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): new_seq = seq.rebreak() print(new_seq.get_fixes()) assert new_seq.get_raw() == seq_sql_out sqlfluff-2.3.5/test/utils/reflow/reindent_test.py000066400000000000000000000611631451700765000222150ustar00rootroot00000000000000"""Tests for reindenting methods. Specifically: - ReflowPoint.indent_to() - ReflowPoint.get_indent() - deduce_line_indent() """ import logging import pytest from sqlfluff.core import Linter from sqlfluff.core.rules.fix import apply_fixes, compute_anchor_edit_info from sqlfluff.utils.reflow.helpers import deduce_line_indent, fixes_from_results from sqlfluff.utils.reflow.reindent import ( _crawl_indent_points, _IndentLine, _IndentPoint, lint_indent_points, ) from sqlfluff.utils.reflow.sequence import ReflowSequence def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree @pytest.mark.parametrize( "raw_sql_in,elem_idx,indent_to,point_sql_out", [ # Trivial Case ("select\n 1", 1, " ", "\n "), # Change existing indents ("select\n 1", 1, " ", "\n "), ("select\n 1", 1, " ", "\n "), ("select\n1", 1, " ", "\n "), ("select\n 1", 1, "", "\n"), # Create new indents ("select 1", 1, " ", "\n "), ("select 1", 1, " ", "\n "), ("select 1", 1, "", "\n"), ("select 1", 1, " ", "\n "), ], ) def test_reflow__point_indent_to( raw_sql_in, elem_idx, indent_to, point_sql_out, default_config, caplog ): """Test the ReflowPoint.indent_to() method directly.""" root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) elem = seq.elements[elem_idx] print("Element: ", elem) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): new_fixes, new_point = elem.indent_to( indent_to, before=seq.elements[elem_idx - 1].segments[-1], after=seq.elements[elem_idx + 1].segments[0], ) print(new_fixes) assert new_point.raw == point_sql_out @pytest.mark.parametrize( "raw_sql_in,elem_idx,indent_out", [ # Null case ("select 1", 1, None), # Trivial Case ("select\n 1", 1, " "), # Harder Case (i.e. take the last indent) ("select\n \n \n 1", 1, " "), ], ) def test_reflow__point_get_indent( raw_sql_in, elem_idx, indent_out, default_config, caplog ): """Test the ReflowPoint.get_indent() method directly.""" root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) elem = seq.elements[elem_idx] print("Element: ", elem) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = elem.get_indent() assert result == indent_out @pytest.mark.parametrize( "raw_sql_in,target_raw,indent_out", [ # Trivial case ("select 1", "select", ""), ("select 1", "1", ""), # Easy Case ("select\n 1", "1", " "), # Harder Cases (i.e. take the last indent) ("select\n \n \n 1", "1", " "), ("select\n \n \n 1+2+3+4", "4", " "), ("select\n 1 + 2", "2", " "), ], ) def test_reflow__deduce_line_indent( raw_sql_in, target_raw, indent_out, default_config, caplog ): """Test the deduce_line_indent() method directly.""" root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) for target_seg in root.raw_segments: if target_seg.raw == target_raw: break else: raise ValueError("Target Raw Not Found") print("Target: ", target_seg) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = deduce_line_indent(target_seg, root) assert result == indent_out @pytest.mark.parametrize( "raw_sql_in,points_out", [ # Trivial ( "select 1", [ # No point at the start. # Point after select (not newline) _IndentPoint( idx=1, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=None, is_line_break=False, untaken_indents=(), ), # Point after 1 (not newline either) _IndentPoint( idx=3, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=None, is_line_break=False, untaken_indents=(1,), ), ], ), ( "\nselect 1\n", [ # Start point _IndentPoint( idx=0, indent_impulse=0, indent_trough=0, initial_indent_balance=0, last_line_break_idx=None, is_line_break=True, untaken_indents=(), ), # Point after select (not newline) _IndentPoint( idx=2, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=0, is_line_break=False, untaken_indents=(), ), # Point after 1 (is newline) _IndentPoint( idx=4, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=0, is_line_break=True, untaken_indents=(1,), ), ], ), ( "select\n1", [ # No point at the start. # Point after select (not newline) _IndentPoint( idx=1, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=None, is_line_break=True, untaken_indents=(), ), # Point after 1 (is not newline) _IndentPoint( idx=3, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=1, is_line_break=False, untaken_indents=(), ), ], ), # More stretching cases. ( "SELECT\n r.a,\n s.b\nFROM r\nJOIN s\n " "ON\n r.a = s.a\n AND true", [ # No point at the start. # After SELECT _IndentPoint( idx=1, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=None, is_line_break=True, untaken_indents=(), ), _IndentPoint( idx=9, indent_impulse=0, indent_trough=0, initial_indent_balance=1, last_line_break_idx=1, is_line_break=True, untaken_indents=(), ), # Before FROM _IndentPoint( idx=15, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=9, is_line_break=True, untaken_indents=(), ), # Untaken indent before "r" _IndentPoint( idx=17, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=15, is_line_break=False, untaken_indents=(), ), # Before JOIN (-1 balance to take us back to # baseline (in line with FROM)) # NOTE: It keeps the untaken indent from the # previous point, but shouldn't use it. _IndentPoint( idx=19, indent_impulse=-1, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=15, is_line_break=True, untaken_indents=(1,), ), # Untaken indent before "s" _IndentPoint( idx=21, indent_impulse=1, indent_trough=0, initial_indent_balance=0, last_line_break_idx=19, is_line_break=False, untaken_indents=(), ), # NOTE: this is an interesting one. It's a Dedent-Indent pair. # There's a zero balance, and a trough of -1. We carry in the previous # untaken indent. But should pass if forward after this. _IndentPoint( idx=23, indent_impulse=0, indent_trough=-1, initial_indent_balance=1, last_line_break_idx=19, is_line_break=True, untaken_indents=(1,), ), # After ON. Default is indented_on_contents = True, so there is # an indent here. We *SHOULDNT* have an untaken indent here, # because while there was one at the last point, the trough # of the last point should have cleared it. _IndentPoint( idx=25, indent_impulse=1, indent_trough=0, initial_indent_balance=1, last_line_break_idx=23, is_line_break=True, untaken_indents=(), ), # Before AND _IndentPoint( idx=39, indent_impulse=0, indent_trough=0, initial_indent_balance=2, last_line_break_idx=25, is_line_break=True, untaken_indents=(), ), # after "true" _IndentPoint( idx=43, indent_impulse=-2, indent_trough=-2, initial_indent_balance=2, last_line_break_idx=39, is_line_break=False, untaken_indents=(), ), ], ), ( "SELECT *\nFROM t1\nJOIN t2 ON true\nAND true", [ # No point at the start. # NOTE: Abbreviated notation given much is the same as above. # After SELECT _IndentPoint(1, 1, 0, 0, None, False, ()), _IndentPoint(3, -1, -1, 1, None, True, (1,)), _IndentPoint(5, 1, 0, 0, 3, False, ()), _IndentPoint(7, -1, -1, 1, 3, True, (1,)), # JOIN _IndentPoint(9, 1, 0, 0, 7, False, ()), # TRICKY POINT (we're between "t2" and "ON"). # The indent between Join and t2 wasn't taken, but we're # also climbing down from that here. It should be in the # untaken indents _here_ but not passed forward. There is # however another indent opportunity here which ALSO isn't # taken, so that one *should* be passed forward. _IndentPoint(11, 0, -1, 1, 7, False, (1,)), # TRICKY POINT (we're between "ON" and "true"). # Default is indented_on_contents = True. # This means that there is an additional indent here. # It's not taken though. The incoming balance of 1 # isn't taken yet either (hence a 1 in the untaken indent). _IndentPoint(13, 1, 0, 1, 7, False, (1,)), # Between "true" and "AND". # Balance is 2, but both untaken. _IndentPoint(15, 0, 0, 2, 7, True, (1, 2)), # End point _IndentPoint(19, -2, -2, 2, 15, False, (1, 2)), ], ), # Templated case ( "SELECT\n" " {{ 'a' }}\n" " {% for c in ['d', 'e'] %}\n" " ,{{ c }}_val\n" " {% endfor %}\n", [ # No initial indent (this is the first newline). _IndentPoint(1, 1, 0, 0, None, True, ()), # point after a _IndentPoint(3, 0, 0, 1, 1, True, ()), # point after for _IndentPoint(5, 1, 0, 1, 3, True, ()), # point after d_val _IndentPoint(9, -1, -1, 2, 5, True, ()), # point after loop _IndentPoint(11, 1, 0, 1, 9, True, ()), # point after e_val _IndentPoint(15, -2, -2, 2, 11, True, ()), # point after endfor _IndentPoint(17, 0, 0, 0, 15, True, ()), ], ), # Templated case (with consuming whitespace) ( "{% for item in [1, 2] -%}\n" "SELECT *\n" "FROM some_table\n" "{{ 'UNION ALL\n' if not loop.last }}\n" "{%- endfor %}", [ # No initial indent (this is the first newline). # Importantly this first point - IS a newline # even though that newline segment is consumed # it should still be True here. _IndentPoint(1, 1, 0, 0, None, True, ()), # point between SELECT & * _IndentPoint(3, 1, 0, 1, 1, False, ()), # point after * _IndentPoint(5, -1, -1, 2, 1, True, (2,)), # point after FROM _IndentPoint(7, 1, 0, 1, 5, False, ()), # point after some_table _IndentPoint(9, -1, -1, 2, 5, True, (2,)), # point after ALL (we dedent down to the loop marker). _IndentPoint(13, -1, -1, 1, 9, True, ()), # There should be a loop marker here. # point after loop marker and before SELECT # (we indent back up after the loop). _IndentPoint(15, 1, 0, 0, 13, True, ()), # point between SELECT & * _IndentPoint(17, 1, 0, 1, 15, False, ()), # point after * _IndentPoint(19, -1, -1, 2, 15, True, (2,)), # point after FROM _IndentPoint(21, 1, 0, 1, 19, False, ()), # point after some_table (and before unused placeholder) _IndentPoint(23, -1, -1, 2, 19, True, (2,)), # Point after placeholder and dedenting down to endfor _IndentPoint(25, -1, -1, 1, 23, True, ()), # Point between endfor and end-of-file _IndentPoint(27, 0, 0, 0, 25, False, ()), ], ), # Templated case (with templated newline and indent) ( "SELECT\n {{'1 \n, 2'}}\nFROM foo", [ # After SELECT _IndentPoint(1, 1, 0, 0, None, True, ()), # NOTE: The newline inside the tag isn't reported. # After the templated section (hence why 7) _IndentPoint(7, -1, -1, 1, 1, True, ()), # After FROM _IndentPoint(9, 1, 0, 0, 7, False, ()), # After foo _IndentPoint(11, -1, -1, 1, 7, False, (1,)), ], ), ], ) def test_reflow__crawl_indent_points(raw_sql_in, points_out, default_config, caplog): """Test _crawl_indent_points directly.""" root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): points = list(_crawl_indent_points(seq.elements)) assert points == points_out @pytest.mark.parametrize( "raw_sql_in,raw_sql_out", [ # Trivial ( "select 1", "select 1", ), # Initial Indent ( " select 1", "select 1", ), # Trailing Newline ( " select 1\n", "select 1\n", ), # Basic Multiline ( "select\n1", "select\n 1", ), # Advanced Multiline ( "select\n1+(\n2+3\n),\n4\nfrom foo", "select\n 1+(\n 2+3\n ),\n 4\nfrom foo", ), ( "select\n 1+(\n 2+3\n ),\n 4\n from foo", "select\n 1+(\n 2+3\n ),\n 4\nfrom foo", ), # Multiple untaken indents. We should only indent as many # times as required. ( " select ((((\n1\n))))", "select ((((\n 1\n))))", ), ( "select (((\n((\n3\n))\n)))", "select (((\n ((\n 3\n ))\n)))", ), # ### Templated Multiline Cases ### # NOTE: the templated tags won't show here, but they # should still be indented. # Trailing tag. NOTE: Last tag indented ( "select\n1\n{% if true %}\n+ 2\n{% endif %}", "select\n 1\n \n + 2\n ", ), # Cutting across the parse tree ( "select\n1\n{% if true %}\n,2\nFROM a\n{% endif %}", # This set of template tags cuts across the parse # tree. We should indent them appropriately. In this case # that should mean "case 3", picking the lowest of the # existing indents which should mean no indent for either. # We also shouldn't indent the contents between them either # when taking this option. "select\n 1\n\n ,2\nFROM a\n", ), # Template tags at file ends ( "{% if true %}\nSELECT 1\n{% endif %}", "\n SELECT 1\n", ), # Template loops: ( "select\n 0,\n {% for i in [1, 2, 3] %}\n {{i}},\n {% endfor %}\n 4", "select\n 0,\n \n 1,\n \n 2,\n \n 3,\n \n 4", ), # Correction and handling of hanging indents ( "select 1, 2", "select 1, 2", ), ( "select 1,\n2", "select\n 1,\n 2", ), ( "select 1,\n 2", "select\n 1,\n 2", ), # A hanging example where we're modifying a currently empty point. ( "select greatest(1,\n2)", "select greatest(\n 1,\n 2\n)", ), # Test handling of many blank lines. # NOTE: # 1. Initial whitespace should remain, because it's not an indent. # 2. Blank lines should also remain, because they're also not an indent. ( "\n\n \n\nselect\n\n\n\n \n\n 1\n\n \n\n", "\n\n \n\nselect\n\n\n\n \n\n 1\n\n \n\n", ), # Templated cases. # NOTE: We're just rendering the fixed file in the templated space # so that for these tests we don't touch the fix routines. That's # why the template tags aren't visible - BUT THEIR INDENTS SHOULD BE. # This one is useful for ensuring the tags have the same indent. # ... first with a FROM ( "SELECT\n" " {{ 'a' }}\n" " {% for c in ['d', 'e'] %}\n" " ,{{ c }}_val\n" " {% endfor %}\n" "FROM foo", "SELECT\n" " a\n" " \n" " ,d_val\n" " \n" " ,e_val\n" " \n" "FROM foo", ), # ... then without a FROM ( "SELECT\n" " {{ 'a' }}\n" " {% for c in ['d', 'e'] %}\n" " ,{{ c }}_val\n" " {% endfor %}\n", "SELECT\n a\n \n ,d_val\n \n ,e_val\n \n", ), # This one is useful for if statements get handled right. # NOTE: There's a template loop in the middle. ( "SELECT\n" " {{ 'a' }}\n" " {% for c in ['d', 'e'] %}\n" " {% if c == 'd' %}\n" " ,{{ c }}_val_a\n" " {% else %}\n" " ,{{ c }}_val_b\n" "{% endif %}\n" " {% endfor %}\n", "SELECT\n" " a\n" " \n" " \n" " ,d_val_a\n" " \n" " \n" " \n" " ,e_val_b\n" " \n" " \n", ), # Test leading templated newlines. # https://github.com/sqlfluff/sqlfluff/issues/4485 ( "{{ '\\n \\n ' }}\nSELECT 1", # NOTE: This looks a little strange, but what's important # here is that it doesn't raise an exception. "\n \n \nSELECT 1", ), ], ) def test_reflow__lint_indent_points(raw_sql_in, raw_sql_out, default_config, caplog): """Test the lint_indent_points() method directly. Rather than testing directly, for brevity we check the raw output it produces. This results in a more compact test. """ root = parse_ansi_string(raw_sql_in, default_config) print(root.stringify()) seq = ReflowSequence.from_root(root, config=default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): elements, results = lint_indent_points(seq.elements, single_indent=" ") result_raw = "".join(elem.raw for elem in elements) assert result_raw == raw_sql_out, "Raw Element Check Failed!" # Now we've checked the elements - check that applying the fixes gets us to # the same place. print("Results:", results) anchor_info = compute_anchor_edit_info(fixes_from_results(results)) fixed_tree, _, _, valid = apply_fixes( root, default_config.get("dialect_obj"), "TEST", anchor_info ) assert valid, f"Reparse check failed: {fixed_tree.raw!r}" assert fixed_tree.raw == raw_sql_out, "Element check passed - but fix check failed!" @pytest.mark.parametrize( "indent_line, forced_indents, expected_units", [ # Trivial case of a first line. ( _IndentLine(0, [_IndentPoint(0, 0, 0, 0, None, False, ())]), [], 0, ), # Simple cases of a normal lines. ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, ())]), [], 3, ), ( # NOTE: Initial indent for *line* is different to *point*. # The *line* takes precedence. _IndentLine(1, [_IndentPoint(6, 0, 0, 3, 1, True, ())]), [], 1, ), # Indents and dedents on the line break. # NOTE: The line indent still takes precedence here. ( _IndentLine(3, [_IndentPoint(6, 1, 0, 3, 1, True, ())]), [], 3, ), ( _IndentLine(3, [_IndentPoint(6, -1, -1, 3, 1, True, ())]), [], 3, ), # Handle untaken indents. ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (1,))]), [], 2, ), ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (1, 2))]), [], 1, ), ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (2,))]), # Forced indent takes us back up. [2], 3, ), ( _IndentLine(3, [_IndentPoint(6, 0, 0, 3, 1, True, (3,))]), [], 2, ), ( _IndentLine(3, [_IndentPoint(6, 0, -1, 3, 1, True, (3,))]), # Untaken indent is pruned by trough. [], 3, ), ], ) def test_reflow__desired_indent_units(indent_line, forced_indents, expected_units): """Test _IndentLine.desired_indent_units() directly.""" assert indent_line.desired_indent_units(forced_indents) == expected_units sqlfluff-2.3.5/test/utils/reflow/respace_test.py000066400000000000000000000070721451700765000220260ustar00rootroot00000000000000"""Tests for respacing methods. These are mostly on the ReflowPoint class. """ import logging import pytest from sqlfluff.core import Linter from sqlfluff.utils.reflow.elements import ReflowPoint from sqlfluff.utils.reflow.helpers import fixes_from_results from sqlfluff.utils.reflow.sequence import ReflowSequence def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree @pytest.mark.parametrize( "raw_sql_in,kwargs,raw_sql_out", [ # Basic cases ("select 1+2", {}, "select 1 + 2"), ("select 1 + 2 ", {}, "select 1 + 2"), # Check newline handling ("select\n 1 + 2", {}, "select\n 1 + 2"), ("select\n 1 + 2", {}, "select\n 1 + 2"), ("select\n 1 + 2", {"strip_newlines": True}, "select 1 + 2"), # Check filtering ("select \n 1 + 2 \n ", {}, "select\n 1 + 2\n"), ("select \n 1 + 2 \n ", {"filter": "all"}, "select\n 1 + 2\n"), ("select \n 1 + 2 \n ", {"filter": "inline"}, "select \n 1 + 2 \n "), ("select \n 1 + 2 \n ", {"filter": "newline"}, "select\n 1 + 2\n"), ], ) def test_reflow__sequence_respace( raw_sql_in, kwargs, raw_sql_out, default_config, caplog ): """Test the ReflowSequence.respace() method directly.""" root = parse_ansi_string(raw_sql_in, default_config) seq = ReflowSequence.from_root(root, config=default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): new_seq = seq.respace(**kwargs) assert new_seq.get_raw() == raw_sql_out @pytest.mark.parametrize( "raw_sql_in,point_idx,kwargs,raw_point_sql_out,fixes_out", [ # Basic cases ("select 1", 1, {}, " ", {("replace", " ")}), ("select 1+2", 3, {}, " ", {("create_after", "1")}), ("select (1+2)", 3, {}, "", set()), ("select ( 1+2)", 3, {}, "", {("delete", " ")}), # Newline handling ("select\n1", 1, {}, "\n", set()), ("select\n 1", 1, {}, "\n ", set()), ("select \n 1", 1, {}, "\n ", {("delete", " ")}), ( "select \n 1", 1, {"strip_newlines": True}, " ", {("delete", "\n"), ("delete", " "), ("replace", " ")}, ), ( "select ( \n 1)", 3, {"strip_newlines": True}, "", {("delete", "\n"), ("delete", " "), ("delete", " ")}, ), ], ) def test_reflow__point_respace_point( raw_sql_in, point_idx, kwargs, raw_point_sql_out, fixes_out, default_config, caplog ): """Test the ReflowPoint.respace_point() method directly. NOTE: This doesn't check any pre-existing fixes. That should be a separate more specific test. """ root = parse_ansi_string(raw_sql_in, default_config) seq = ReflowSequence.from_root(root, config=default_config) pnt = seq.elements[point_idx] assert isinstance(pnt, ReflowPoint) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): results, new_pnt = pnt.respace_point( prev_block=seq.elements[point_idx - 1], next_block=seq.elements[point_idx + 1], root_segment=root, lint_results=[], **kwargs ) assert new_pnt.raw == raw_point_sql_out # NOTE: We use set comparison, because ordering isn't important for fixes. assert { (fix.edit_type, fix.anchor.raw) for fix in fixes_from_results(results) } == fixes_out sqlfluff-2.3.5/test/utils/reflow/sequence_test.py000066400000000000000000000163251451700765000222150ustar00rootroot00000000000000"""Tests for the reflow module.""" import logging import pytest from sqlfluff.core import Linter from sqlfluff.core.rules.base import LintFix from sqlfluff.utils.reflow.elements import ReflowBlock, ReflowPoint from sqlfluff.utils.reflow.sequence import ReflowSequence def parse_ansi_string(sql, config): """Parse an ansi sql string for testing.""" linter = Linter(config=config) return linter.parse_string(sql).tree def assert_reflow_structure(sequence, StartClass, raw_elems): """Assert a ReflowSequence has the defined structure.""" assert [ [seg.raw for seg in elem.segments] for elem in sequence.elements ] == raw_elems # We can assert all the classes just by knowing which we should start with assert all(type(elem) is StartClass for elem in sequence.elements[::2]) OtherClass = ReflowBlock if StartClass is ReflowPoint else ReflowPoint assert all(type(elem) is OtherClass for elem in sequence.elements[1::2]) @pytest.mark.parametrize( "raw_sql,StartClass,raw_elems", [ ( "select 1 +2", ReflowBlock, [ ["select"], # NOTE: The empty strings are indents and dedents ["", " "], ["1"], [" "], ["+"], [], ["2"], # indent (as point) [""], # end_of_file (as block) [""], ], ) ], ) def test_reflow_sequence_from_segments( raw_sql, StartClass, raw_elems, default_config, caplog ): """Test direct sequence construction from segments.""" root = parse_ansi_string(raw_sql, default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = ReflowSequence.from_raw_segments( root.raw_segments, root, config=default_config ) assert_reflow_structure(result, StartClass, raw_elems) @pytest.mark.parametrize( "raw_sql,sides,target_idx,target_raw,StartClass,raw_elems", [ ( "select 1 +2", "both", 5, "+", ReflowBlock, [ # We should have expanded as far as the blocks either side ["1"], [" "], ["+"], [], ["2"], ], ), ( "select 1 +2", "before", 5, "+", ReflowBlock, [ ["1"], [" "], ["+"], ], ), ( "select 1 +2", "after", 5, "+", ReflowBlock, [ ["+"], [], ["2"], ], ), ( "select 1 +2", "before", 6, "2", ReflowBlock, [ ["+"], [], ["2"], ], ), ( "select 1 +2", "both", 4, " ", ReflowBlock, [ # Even targeting whitespace, we should get points either side. ["1"], [" "], ["+"], ], ), ( "select (1+2)", "both", 5, "1", ReflowBlock, [ # NOTE: We don't just stop at the indent, we go as far as code. ["("], # The indent sits in the point. [""], ["1"], [], ["+"], ], ), ( " SELECT 1 ", "both", 1, "SELECT", ReflowPoint, [ # We'll hit the edge of the file so start with a point. [" "], ["SELECT"], ["", " "], ["1"], ], ), ], ) def test_reflow_sequence_from_around_target( raw_sql, sides, target_idx, target_raw, StartClass, raw_elems, default_config, caplog, ): """Test direct sequence construction from a target.""" root = parse_ansi_string(raw_sql, default_config) print("Raw Segments:", root.raw_segments) target = root.raw_segments[target_idx] # Check we're aiming at the right place assert target.raw == target_raw with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = ReflowSequence.from_around_target( target, root, config=default_config, sides=sides ) assert_reflow_structure(result, StartClass, raw_elems) def test_reflow_sequence_from_around_target_non_raw(default_config, caplog): """Test direct sequence construction from a target. This time we use a target which isn't a RawSegment. """ sql = " SELECT 1 " root = parse_ansi_string(sql, default_config) # We should have a statement as a first level child. statement = root.segments[1] assert statement.is_type("statement") assert statement.raw == "SELECT 1" with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): result = ReflowSequence.from_around_target( statement, root, config=default_config ) # We should start with a point, because we hit the start of the file. # It should also hit the end of the file and effectively cover all # the raw segments of the file. assert_reflow_structure( result, ReflowPoint, [ [" "], ["SELECT"], ["", " "], ["1"], # dedent - ws ["", " "], # end of file [""], ], ) @pytest.mark.parametrize( "raw_sql,filter,delete_indices,edit_indices", [ # NOTE: These tests rely on the position of code *and non code* elements # in the parsed sequence, so may need to be altered if the parse structure # changes. ("SELECT \n 4", "all", [2], []), ("SELECT \n 4, \n 6", "all", [2, 7], []), ("SELECT \n 4, \n 6 ", "all", [2, 7, 12], []), ("SELECT \n 4, 5, 6 , 7 \n 6 ", "newline", [2, 17, 21], []), ("SELECT \n 4, 5, 6 , 7 \n 6 ", "inline", [12], [10, 14]), ("SELECT \n 4, 5, 6 , 7 \n 6 ", "all", [2, 12, 17, 21], [10, 14]), ], ) def test_reflow_sequence_respace_filter( raw_sql, filter, delete_indices, edit_indices, default_config, caplog ): """Test iteration of trailing whitespace fixes.""" root = parse_ansi_string(raw_sql, default_config) with caplog.at_level(logging.DEBUG, logger="sqlfluff.rules.reflow"): sequence = ReflowSequence.from_root(root, config=default_config) fixes = sequence.respace(filter=filter).get_fixes() # assert deletes assert [fix for fix in fixes if fix.edit_type == "delete"] == [ LintFix("delete", root.raw_segments[idx]) for idx in delete_indices ] # assert edits (with slightly less detail) assert [ root.raw_segments.index(fix.anchor) for fix in fixes if fix.edit_type == "replace" ] == edit_indices sqlfluff-2.3.5/tox.ini000066400000000000000000000152331451700765000146670ustar00rootroot00000000000000[tox] envlist = generate-fixture-yml, linting, doclinting, ruleslinting, docbuild, cov-init, doctests, py{37,38,39,310,311,312}, dbt{110,120,130,140,150,160}, cov-report, mypy, winpy, dbt{130,150}-winpy, yamllint [testenv] passenv = CI, CIRCLECI, CIRCLE_*, HOME, SQLFLUFF_BENCHMARK_API_KEY # Have option to explicitly set TMPDIR for python as on GitHub Action Windows # machines it doesn't read this from env and resets to system default, which # is often on different drive (C) from working dir (D), which causes problems. setenv = SQLFLUFF_TESTENV = 1 COVERAGE_FILE = .coverage.{envname} winpy: TMPDIR = temp_pytest allowlist_externals = make pip_pre = false deps = -r requirements_dev.txt # Apply the contraints files _as requirements_ files here so that # we force the right installation version up front in each environment. # NOTE: This is a bit of a hack around tox, but it does achieve reasonably # consistent results. dbt{110,120,130,140,150,160,}: -r {toxinidir}/constraints/{envname}.txt # Include any other steps necessary for testing below. # {posargs} is there to allow us to specify specific tests, which # can then be invoked from tox by calling e.g. # tox -e py35 -- project/tests/test_file.py::TestClassName::test_method commands = # Install the plugins as required. # NOTE: We do them here, so that when version numbers update, we don't # get install errors for version conflicts. The dbt templater has a version # number pinned to the same version number of the main sqlfluff library # so it _must_ be installed second in the context of a version which isn't # yet released (and so not available on pypi). dbt{110,120,130,140,150,160,}: python -m pip install {toxinidir}/plugins/sqlfluff-templater-dbt # Add the example plugin. # NOTE: The trailing comma is important because in the github test suite # the python version is not specified and instead the "py" environment # is invoked. Leaving the trailing comma ensures that this environment # still installs the relevant plugins. {py,winpy}{37,38,39,310,311,312,}: python -m pip install {toxinidir}/plugins/sqlfluff-plugin-example # For the dbt test cases install dependencies. dbt{110,120,130,140,150,160,}: dbt deps --project-dir {toxinidir}/plugins/sqlfluff-templater-dbt/test/fixtures/dbt/dbt_project --profiles-dir {toxinidir}/plugins/sqlfluff-templater-dbt/test/fixtures/dbt # Clean up from previous tests python {toxinidir}/util.py clean-tests # Run tests pytest -vv -rsfE --cov-report=lcov {posargs: {toxinidir}/test} python test/patch_lcov.py [testenv:cov-init] setenv = COVERAGE_FILE = .coverage commands = coverage erase [testenv:cov-report] setenv = COVERAGE_FILE = .coverage commands = coverage combine # Exclude dbt templater by default as those tests do not run without dbt coverage report --fail-under=100 --show-missing --omit "*templaters/dbt.py,*/dbt_templater/*" # Have copy of above for full coverage - including dbt - for those that want it [testenv:cov-report-dbt] setenv = COVERAGE_FILE = .coverage commands = coverage combine coverage report --fail-under=100 --show-missing [testenv:generate-fixture-yml] commands = python {toxinidir}/test/generate_parse_fixture_yml.py {posargs} [testenv:linting] # NOTE: We do install sqlfluff to run linting. This is # because lint-imports requires the module to be installed. commands = # ruff is configured to check linting and imports. # see: .ruff.toml and https://docs.astral.sh/ruff/rules/ ruff check . # Ruff *should* catch everything that flake8 does. Until # that has been confirmed, flake8 remains, but ruff runs first. flake8 # Check import references. lint-imports [testenv:doctests] commands = pytest -vv -rsfE --doctest-modules {posargs: {toxinidir}/src} [testenv:yamllint] skip_install = true deps = yamllint commands = yamllint -c .yamllint . [testenv:doclinting] deps = -rdocs/requirements.txt commands = # Before linting, generate the rule docs. # If we don't we get import errors. python {toxinidir}/docs/generate-rule-docs.py doc8 {toxinidir}/docs/source --file-encoding utf8 [testenv:docbuild] deps = -rdocs/requirements.txt commands = make -C {toxinidir}/docs html [testenv:mypy] # NOTE: We do install sqlfluff to run mypy, this # is so we can refer to the package as a package. # It also appears to make --strict checking more # stable. commands = # Standard MyPy on the main package mypy -p sqlfluff # Strict MyPy on the parser mypy -p sqlfluff.core.parser --strict [testenv:build-dist] skip_install = true deps = build commands = python -m build --sdist --wheel {posargs: {toxinidir}} [testenv:check-dist] skip_install = true deps = twine commands = twine check {toxinidir}/dist/* [testenv:publish-dist] skip_install = true deps = {[testenv:build-dist]deps} twine commands = {[testenv:build-dist]commands} twine upload --skip-existing {toxinidir}/dist/* [testenv:pre-commit] skip_install = true deps = pre-commit commands = pre-commit {posargs:run --all-files} [flake8] # Ignore: # W503: Line break before binary operator # D105: Missing docstring in magic method # D107: Missing docstring in __init__ # D418: Function/ Method decorated with @overload shouldn’t contain a docstring # C812: Missing trailing comma ignore = W503, D107, D105, D418, C812 exclude = .git,__pycache__,env,.tox,build,.venv,venv,.coverage.py,plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/osmosis/*.py max-line-length = 88 inline-quotes = double extend-ignore = # See https://github.com/PyCQA/pycodestyle/issues/373 E203, # sqlfluff uses flake8-docstrings https://pypi.org/project/flake8-docstrings/ # this is to assist with the sphinx based autodoc docstring-convention = google [pytest] python_files = *_test.py testpaths = test [coverage:run] source = src/sqlfluff omit = src/sqlfluff/__main__.py plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/osmosis/*.py [coverage:report] exclude_lines = sys.version_info pragma: no cover # See: https://github.com/nedbat/coveragepy/issues/970 # NOTE: Prefer not including the ... instead @overload # See: https://github.com/pytest-dev/pytest-cov/issues/428 @abstractmethod # NOTE: Prefer not including the ... instead [coverage:paths] source = # Local path src/ # These are the Github likely source paths D:\a\sqlfluff\sqlfluff\src\ D:\a\sqlfluff\sqlfluff\.tox\winpy\Lib\site-packages\ /home/runner/work/sqlfluff/sqlfluff/src/ /home/runner/work/sqlfluff/sqlfluff/.tox/*/lib/*/site-packages/ [doc8] # Ignore auto-generated docs ignore-path=docs/source/partials/ sqlfluff-2.3.5/util.py000066400000000000000000000172751451700765000147130ustar00rootroot00000000000000#!/usr/bin/env python """Utility strings for use during deployment. NB: This is not part of the core sqlfluff code. """ # This contains various utility scripts import os import re import shutil import time import click from ghapi.all import GhApi @click.group() def cli(): """Launch the utility cli.""" pass @cli.command() @click.option("--path", default=".test-reports") def clean_tests(path): """Clear up the tests directory. NB: Using scripts allows platform independence Makes a new one afterward """ try: shutil.rmtree(path) click.echo(f"Removed {path!r}...") # OSError is for python 27 # in py36 its FileNotFoundError (but that inherits from IOError, which exists in # py27) except OSError: click.echo(f"Directory {path!r} does not exist. Skipping...") os.mkdir(path) click.echo(f"Created {path!r}") @cli.command() @click.argument("new_version_num") def release(new_version_num): """Change version number in the cfg files. NOTE: For fine grained personal access tokens, this requires _write_ access to the "contents" scope. For dome reason, if you only grant the _read_ access, you can't see any *draft* PRs which are necessary for this script to run. """ api = GhApi( owner=os.environ["GITHUB_REPOSITORY_OWNER"], repo="sqlfluff", token=os.environ["GITHUB_TOKEN"], ) releases = api.repos.list_releases(per_page=100) latest_draft_release = None for rel in releases: if rel["draft"]: latest_draft_release = rel break if not latest_draft_release: raise ValueError("No draft release found!") # Linkify the PRs and authors draft_body_parts = latest_draft_release["body"].split("\n") potential_new_contributors = [] for i, p in enumerate(draft_body_parts): draft_body_parts[i] = re.sub( r"\(#([0-9]*)\) @([^ ]*)$", r"[#\1](https://github.com/sqlfluff/sqlfluff/pull/\1) [@\2](https://github.com/\2)", # noqa E501 p, ) new_contrib_string = re.sub( r".*\(#([0-9]*)\) @([^ ]*)$", r"* [@\2](https://github.com/\2) made their first contribution in [#\1](https://github.com/sqlfluff/sqlfluff/pull/\1)", # noqa E501 p, ) if new_contrib_string.startswith("* "): new_contrib_name = re.sub(r"\* \[(.*?)\].*", r"\1", new_contrib_string) potential_new_contributors.append( {"name": new_contrib_name, "line": new_contrib_string} ) whats_changed_text = "\n".join(draft_body_parts) # Find the first commit for each contributor in this release potential_new_contributors.reverse() seen_contributors = set() deduped_potential_new_contributors = [] for c in potential_new_contributors: if c["name"] not in seen_contributors: seen_contributors.add(c["name"]) deduped_potential_new_contributors.append(c) input_changelog = open("CHANGELOG.md", encoding="utf8").readlines() write_changelog = open("CHANGELOG.md", "w", encoding="utf8") for i, line in enumerate(input_changelog): write_changelog.write(line) if "DO NOT DELETE THIS LINE" in line: existing_entry_start = i + 2 # If the release is already in the changelog, update it if f"## [{new_version_num}]" in input_changelog[existing_entry_start]: input_changelog[ existing_entry_start ] = f"## [{new_version_num}] - {time.strftime('%Y-%m-%d')}\n" # Delete the existing What’s Changed and New Contributors sections remaining_changelog = input_changelog[existing_entry_start:] existing_whats_changed_start = ( next( j for j, line in enumerate(remaining_changelog) if line.startswith("## What’s Changed") ) + existing_entry_start ) existing_new_contributors_start = ( next( j for j, line in enumerate(remaining_changelog) if line.startswith("## New Contributors") ) + existing_entry_start ) existing_new_contributors_length = ( next( j for j, line in enumerate( input_changelog[existing_new_contributors_start:] ) if line.startswith("## [") ) - 1 ) del input_changelog[ existing_whats_changed_start : existing_new_contributors_start + existing_new_contributors_length ] # Now that we've cleared the previous sections, we will accurately # find if contributors have been previously mentioned in the changelog new_contributor_lines = [] input_changelog_str = "".join( input_changelog[existing_whats_changed_start:] ) for c in deduped_potential_new_contributors: if c["name"] not in input_changelog_str: new_contributor_lines.append(c["line"]) input_changelog[existing_whats_changed_start] = ( whats_changed_text + "\n\n## New Contributors\n" + "\n".join(new_contributor_lines) + "\n\n" ) else: write_changelog.write( f"\n## [{new_version_num}] - {time.strftime('%Y-%m-%d')}\n\n## Highlights\n\n" # noqa E501 ) write_changelog.write(whats_changed_text) write_changelog.write("\n## New Contributors\n\n") # Ensure contributor names don't appear in input_changelog list new_contributor_lines = [] input_changelog_str = "".join(input_changelog) for c in deduped_potential_new_contributors: if c["name"] not in input_changelog_str: new_contributor_lines.append(c["line"]) write_changelog.write("\n".join(new_contributor_lines)) write_changelog.write("\n") write_changelog.close() for filename in ["setup.cfg", "plugins/sqlfluff-templater-dbt/setup.cfg"]: input_file = open(filename, "r").readlines() # Regardless of platform, write newlines as \n write_file = open(filename, "w", newline="\n") for line in input_file: for key in ["stable_version", "version"]: if line.startswith(key): line = f"{key} = {new_version_num}\n" break if line.startswith(" sqlfluff=="): line = f" sqlfluff=={new_version_num}\n" write_file.write(line) write_file.close() for filename in ["docs/source/gettingstarted.rst"]: input_file = open(filename, "r").readlines() # Regardless of platform, write newlines as \n write_file = open(filename, "w", newline="\n") change_next_line = False for line in input_file: if change_next_line: line = f" {new_version_num}\n" change_next_line = False elif line.startswith(" $ sqlfluff version"): change_next_line = True write_file.write(line) write_file.close() if __name__ == "__main__": cli()